Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#ifndef __MSM_GEM_H__
8#define __MSM_GEM_H__
9
10#include <linux/kref.h>
11#include <linux/dma-resv.h>
12#include "msm_drv.h"
13
14/* Additional internal-use only BO flags: */
15#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
16
17struct msm_gem_address_space {
18 const char *name;
19 /* NOTE: mm managed at the page level, size is in # of pages
20 * and position mm_node->start is in # of pages:
21 */
22 struct drm_mm mm;
23 spinlock_t lock; /* Protects drm_mm node allocation/removal */
24 struct msm_mmu *mmu;
25 struct kref kref;
26};
27
28struct msm_gem_vma {
29 struct drm_mm_node node;
30 uint64_t iova;
31 struct msm_gem_address_space *aspace;
32 struct list_head list; /* node in msm_gem_object::vmas */
33 bool mapped;
34 int inuse;
35};
36
37struct msm_gem_object {
38 struct drm_gem_object base;
39
40 uint32_t flags;
41
42 /**
43 * Advice: are the backing pages purgeable?
44 */
45 uint8_t madv;
46
47 /**
48 * count of active vmap'ing
49 */
50 uint8_t vmap_count;
51
52 /* And object is either:
53 * inactive - on priv->inactive_list
54 * active - on one one of the gpu's active_list.. well, at
55 * least for now we don't have (I don't think) hw sync between
56 * 2d and 3d one devices which have both, meaning we need to
57 * block on submit if a bo is already on other ring
58 *
59 */
60 struct list_head mm_list;
61 struct msm_gpu *gpu; /* non-null if active */
62
63 /* Transiently in the process of submit ioctl, objects associated
64 * with the submit are on submit->bo_list.. this only lasts for
65 * the duration of the ioctl, so one bo can never be on multiple
66 * submit lists.
67 */
68 struct list_head submit_entry;
69
70 struct page **pages;
71 struct sg_table *sgt;
72 void *vaddr;
73
74 struct list_head vmas; /* list of msm_gem_vma */
75
76 struct llist_node freed;
77
78 /* For physically contiguous buffers. Used when we don't have
79 * an IOMMU. Also used for stolen/splashscreen buffer.
80 */
81 struct drm_mm_node *vram_node;
82 struct mutex lock; /* Protects resources associated with bo */
83
84 char name[32]; /* Identifier to print for the debugfs files */
85};
86#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
87
88static inline bool is_active(struct msm_gem_object *msm_obj)
89{
90 return msm_obj->gpu != NULL;
91}
92
93static inline bool is_purgeable(struct msm_gem_object *msm_obj)
94{
95 WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
96 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
97 !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
98}
99
100static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
101{
102 return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
103}
104
105/* The shrinker can be triggered while we hold objA->lock, and need
106 * to grab objB->lock to purge it. Lockdep just sees these as a single
107 * class of lock, so we use subclasses to teach it the difference.
108 *
109 * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
110 * OBJ_LOCK_SHRINKER is used by shrinker.
111 *
112 * It is *essential* that we never go down paths that could trigger the
113 * shrinker for a purgable object. This is ensured by checking that
114 * msm_obj->madv == MSM_MADV_WILLNEED.
115 */
116enum msm_gem_lock {
117 OBJ_LOCK_NORMAL,
118 OBJ_LOCK_SHRINKER,
119};
120
121void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
122void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
123void msm_gem_free_work(struct work_struct *work);
124
125/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
126 * associated with the cmdstream submission for synchronization (and
127 * make it easier to unwind when things go wrong, etc). This only
128 * lasts for the duration of the submit-ioctl.
129 */
130struct msm_gem_submit {
131 struct drm_device *dev;
132 struct msm_gpu *gpu;
133 struct msm_gem_address_space *aspace;
134 struct list_head node; /* node in ring submit list */
135 struct list_head bo_list;
136 struct ww_acquire_ctx ticket;
137 uint32_t seqno; /* Sequence number of the submit on the ring */
138 struct dma_fence *fence;
139 struct msm_gpu_submitqueue *queue;
140 struct pid *pid; /* submitting process */
141 bool valid; /* true if no cmdstream patching needed */
142 bool in_rb; /* "sudo" mode, copy cmds into RB */
143 struct msm_ringbuffer *ring;
144 unsigned int nr_cmds;
145 unsigned int nr_bos;
146 u32 ident; /* A "identifier" for the submit for logging */
147 struct {
148 uint32_t type;
149 uint32_t size; /* in dwords */
150 uint64_t iova;
151 uint32_t idx; /* cmdstream buffer idx in bos[] */
152 } *cmd; /* array of size nr_cmds */
153 struct {
154 uint32_t flags;
155 union {
156 struct msm_gem_object *obj;
157 uint32_t handle;
158 };
159 uint64_t iova;
160 } bos[0];
161};
162
163#endif /* __MSM_GEM_H__ */
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GEM_H__
19#define __MSM_GEM_H__
20
21#include <linux/reservation.h>
22#include "msm_drv.h"
23
24struct msm_gem_object {
25 struct drm_gem_object base;
26
27 uint32_t flags;
28
29 /* And object is either:
30 * inactive - on priv->inactive_list
31 * active - on one one of the gpu's active_list.. well, at
32 * least for now we don't have (I don't think) hw sync between
33 * 2d and 3d one devices which have both, meaning we need to
34 * block on submit if a bo is already on other ring
35 *
36 */
37 struct list_head mm_list;
38 struct msm_gpu *gpu; /* non-null if active */
39 uint32_t read_fence, write_fence;
40
41 /* Transiently in the process of submit ioctl, objects associated
42 * with the submit are on submit->bo_list.. this only lasts for
43 * the duration of the ioctl, so one bo can never be on multiple
44 * submit lists.
45 */
46 struct list_head submit_entry;
47
48 struct page **pages;
49 struct sg_table *sgt;
50 void *vaddr;
51
52 struct {
53 // XXX
54 uint32_t iova;
55 } domain[NUM_DOMAINS];
56
57 /* normally (resv == &_resv) except for imported bo's */
58 struct reservation_object *resv;
59 struct reservation_object _resv;
60
61 /* For physically contiguous buffers. Used when we don't have
62 * an IOMMU.
63 */
64 struct drm_mm_node *vram_node;
65};
66#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
67
68static inline bool is_active(struct msm_gem_object *msm_obj)
69{
70 return msm_obj->gpu != NULL;
71}
72
73#define MAX_CMDS 4
74
75/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
76 * associated with the cmdstream submission for synchronization (and
77 * make it easier to unwind when things go wrong, etc). This only
78 * lasts for the duration of the submit-ioctl.
79 */
80struct msm_gem_submit {
81 struct drm_device *dev;
82 struct msm_gpu *gpu;
83 struct list_head bo_list;
84 struct ww_acquire_ctx ticket;
85 uint32_t fence;
86 bool valid;
87 unsigned int nr_cmds;
88 unsigned int nr_bos;
89 struct {
90 uint32_t type;
91 uint32_t size; /* in dwords */
92 uint32_t iova;
93 } cmd[MAX_CMDS];
94 struct {
95 uint32_t flags;
96 struct msm_gem_object *obj;
97 uint32_t iova;
98 } bos[0];
99};
100
101#endif /* __MSM_GEM_H__ */