Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4#include <linux/err.h>
5#include <linux/slab.h>
6#include <linux/dma-buf.h>
7#include <linux/dma-mapping.h>
8
9#include <drm/panfrost_drm.h>
10#include "panfrost_device.h"
11#include "panfrost_gem.h"
12#include "panfrost_mmu.h"
13
14/* Called DRM core on the last userspace/kernel unreference of the
15 * BO.
16 */
17static void panfrost_gem_free_object(struct drm_gem_object *obj)
18{
19 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20 struct panfrost_device *pfdev = obj->dev->dev_private;
21
22 /*
23 * Make sure the BO is no longer inserted in the shrinker list before
24 * taking care of the destruction itself. If we don't do that we have a
25 * race condition between this function and what's done in
26 * panfrost_gem_shrinker_scan().
27 */
28 mutex_lock(&pfdev->shrinker_lock);
29 list_del_init(&bo->base.madv_list);
30 mutex_unlock(&pfdev->shrinker_lock);
31
32 /*
33 * If we still have mappings attached to the BO, there's a problem in
34 * our refcounting.
35 */
36 WARN_ON_ONCE(!list_empty(&bo->mappings.list));
37
38 if (bo->sgts) {
39 int i;
40 int n_sgt = bo->base.base.size / SZ_2M;
41
42 for (i = 0; i < n_sgt; i++) {
43 if (bo->sgts[i].sgl) {
44 dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
45 DMA_BIDIRECTIONAL, 0);
46 sg_free_table(&bo->sgts[i]);
47 }
48 }
49 kvfree(bo->sgts);
50 }
51
52 drm_gem_shmem_free(&bo->base);
53}
54
55struct panfrost_gem_mapping *
56panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
57 struct panfrost_file_priv *priv)
58{
59 struct panfrost_gem_mapping *iter, *mapping = NULL;
60
61 mutex_lock(&bo->mappings.lock);
62 list_for_each_entry(iter, &bo->mappings.list, node) {
63 if (iter->mmu == priv->mmu) {
64 kref_get(&iter->refcount);
65 mapping = iter;
66 break;
67 }
68 }
69 mutex_unlock(&bo->mappings.lock);
70
71 return mapping;
72}
73
74static void
75panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
76{
77 if (mapping->active)
78 panfrost_mmu_unmap(mapping);
79
80 spin_lock(&mapping->mmu->mm_lock);
81 if (drm_mm_node_allocated(&mapping->mmnode))
82 drm_mm_remove_node(&mapping->mmnode);
83 spin_unlock(&mapping->mmu->mm_lock);
84}
85
86static void panfrost_gem_mapping_release(struct kref *kref)
87{
88 struct panfrost_gem_mapping *mapping;
89
90 mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
91
92 panfrost_gem_teardown_mapping(mapping);
93 drm_gem_object_put(&mapping->obj->base.base);
94 panfrost_mmu_ctx_put(mapping->mmu);
95 kfree(mapping);
96}
97
98void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
99{
100 if (!mapping)
101 return;
102
103 kref_put(&mapping->refcount, panfrost_gem_mapping_release);
104}
105
106void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
107{
108 struct panfrost_gem_mapping *mapping;
109
110 list_for_each_entry(mapping, &bo->mappings.list, node)
111 panfrost_gem_teardown_mapping(mapping);
112}
113
114int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
115{
116 int ret;
117 size_t size = obj->size;
118 u64 align;
119 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
120 unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
121 struct panfrost_file_priv *priv = file_priv->driver_priv;
122 struct panfrost_gem_mapping *mapping;
123
124 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
125 if (!mapping)
126 return -ENOMEM;
127
128 INIT_LIST_HEAD(&mapping->node);
129 kref_init(&mapping->refcount);
130 drm_gem_object_get(obj);
131 mapping->obj = bo;
132
133 /*
134 * Executable buffers cannot cross a 16MB boundary as the program
135 * counter is 24-bits. We assume executable buffers will be less than
136 * 16MB and aligning executable buffers to their size will avoid
137 * crossing a 16MB boundary.
138 */
139 if (!bo->noexec)
140 align = size >> PAGE_SHIFT;
141 else
142 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
143
144 mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
145 spin_lock(&mapping->mmu->mm_lock);
146 ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
147 size >> PAGE_SHIFT, align, color, 0);
148 spin_unlock(&mapping->mmu->mm_lock);
149 if (ret)
150 goto err;
151
152 if (!bo->is_heap) {
153 ret = panfrost_mmu_map(mapping);
154 if (ret)
155 goto err;
156 }
157
158 mutex_lock(&bo->mappings.lock);
159 WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
160 list_add_tail(&mapping->node, &bo->mappings.list);
161 mutex_unlock(&bo->mappings.lock);
162
163err:
164 if (ret)
165 panfrost_gem_mapping_put(mapping);
166 return ret;
167}
168
169void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
170{
171 struct panfrost_file_priv *priv = file_priv->driver_priv;
172 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
173 struct panfrost_gem_mapping *mapping = NULL, *iter;
174
175 mutex_lock(&bo->mappings.lock);
176 list_for_each_entry(iter, &bo->mappings.list, node) {
177 if (iter->mmu == priv->mmu) {
178 mapping = iter;
179 list_del(&iter->node);
180 break;
181 }
182 }
183 mutex_unlock(&bo->mappings.lock);
184
185 panfrost_gem_mapping_put(mapping);
186}
187
188static int panfrost_gem_pin(struct drm_gem_object *obj)
189{
190 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
191
192 if (bo->is_heap)
193 return -EINVAL;
194
195 return drm_gem_shmem_pin_locked(&bo->base);
196}
197
198static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
199{
200 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
201 enum drm_gem_object_status res = 0;
202
203 if (bo->base.base.import_attach || bo->base.pages)
204 res |= DRM_GEM_OBJECT_RESIDENT;
205
206 if (bo->base.madv == PANFROST_MADV_DONTNEED)
207 res |= DRM_GEM_OBJECT_PURGEABLE;
208
209 return res;
210}
211
212static size_t panfrost_gem_rss(struct drm_gem_object *obj)
213{
214 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
215
216 if (bo->is_heap) {
217 return bo->heap_rss_size;
218 } else if (bo->base.pages) {
219 WARN_ON(bo->heap_rss_size);
220 return bo->base.base.size;
221 }
222
223 return 0;
224}
225
226static const struct drm_gem_object_funcs panfrost_gem_funcs = {
227 .free = panfrost_gem_free_object,
228 .open = panfrost_gem_open,
229 .close = panfrost_gem_close,
230 .print_info = drm_gem_shmem_object_print_info,
231 .pin = panfrost_gem_pin,
232 .unpin = drm_gem_shmem_object_unpin,
233 .get_sg_table = drm_gem_shmem_object_get_sg_table,
234 .vmap = drm_gem_shmem_object_vmap,
235 .vunmap = drm_gem_shmem_object_vunmap,
236 .mmap = drm_gem_shmem_object_mmap,
237 .status = panfrost_gem_status,
238 .rss = panfrost_gem_rss,
239 .vm_ops = &drm_gem_shmem_vm_ops,
240};
241
242/**
243 * panfrost_gem_create_object - Implementation of driver->gem_create_object.
244 * @dev: DRM device
245 * @size: Size in bytes of the memory the object will reference
246 *
247 * This lets the GEM helpers allocate object structs for us, and keep
248 * our BO stats correct.
249 */
250struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
251{
252 struct panfrost_device *pfdev = dev->dev_private;
253 struct panfrost_gem_object *obj;
254
255 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
256 if (!obj)
257 return ERR_PTR(-ENOMEM);
258
259 INIT_LIST_HEAD(&obj->mappings.list);
260 mutex_init(&obj->mappings.lock);
261 obj->base.base.funcs = &panfrost_gem_funcs;
262 obj->base.map_wc = !pfdev->coherent;
263
264 return &obj->base.base;
265}
266
267struct panfrost_gem_object *
268panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
269{
270 struct drm_gem_shmem_object *shmem;
271 struct panfrost_gem_object *bo;
272
273 /* Round up heap allocations to 2MB to keep fault handling simple */
274 if (flags & PANFROST_BO_HEAP)
275 size = roundup(size, SZ_2M);
276
277 shmem = drm_gem_shmem_create(dev, size);
278 if (IS_ERR(shmem))
279 return ERR_CAST(shmem);
280
281 bo = to_panfrost_bo(&shmem->base);
282 bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
283 bo->is_heap = !!(flags & PANFROST_BO_HEAP);
284
285 return bo;
286}
287
288struct drm_gem_object *
289panfrost_gem_prime_import_sg_table(struct drm_device *dev,
290 struct dma_buf_attachment *attach,
291 struct sg_table *sgt)
292{
293 struct drm_gem_object *obj;
294 struct panfrost_gem_object *bo;
295
296 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
297 if (IS_ERR(obj))
298 return ERR_CAST(obj);
299
300 bo = to_panfrost_bo(obj);
301 bo->noexec = true;
302
303 return obj;
304}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4#include <linux/err.h>
5#include <linux/slab.h>
6#include <linux/dma-buf.h>
7#include <linux/dma-mapping.h>
8
9#include <drm/panfrost_drm.h>
10#include "panfrost_device.h"
11#include "panfrost_gem.h"
12#include "panfrost_mmu.h"
13
14/* Called DRM core on the last userspace/kernel unreference of the
15 * BO.
16 */
17static void panfrost_gem_free_object(struct drm_gem_object *obj)
18{
19 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20 struct panfrost_device *pfdev = obj->dev->dev_private;
21
22 /*
23 * Make sure the BO is no longer inserted in the shrinker list before
24 * taking care of the destruction itself. If we don't do that we have a
25 * race condition between this function and what's done in
26 * panfrost_gem_shrinker_scan().
27 */
28 mutex_lock(&pfdev->shrinker_lock);
29 list_del_init(&bo->base.madv_list);
30 mutex_unlock(&pfdev->shrinker_lock);
31
32 /*
33 * If we still have mappings attached to the BO, there's a problem in
34 * our refcounting.
35 */
36 WARN_ON_ONCE(!list_empty(&bo->mappings.list));
37
38 if (bo->sgts) {
39 int i;
40 int n_sgt = bo->base.base.size / SZ_2M;
41
42 for (i = 0; i < n_sgt; i++) {
43 if (bo->sgts[i].sgl) {
44 dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
45 bo->sgts[i].nents, DMA_BIDIRECTIONAL);
46 sg_free_table(&bo->sgts[i]);
47 }
48 }
49 kvfree(bo->sgts);
50 }
51
52 drm_gem_shmem_free_object(obj);
53}
54
55struct panfrost_gem_mapping *
56panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
57 struct panfrost_file_priv *priv)
58{
59 struct panfrost_gem_mapping *iter, *mapping = NULL;
60
61 mutex_lock(&bo->mappings.lock);
62 list_for_each_entry(iter, &bo->mappings.list, node) {
63 if (iter->mmu == &priv->mmu) {
64 kref_get(&iter->refcount);
65 mapping = iter;
66 break;
67 }
68 }
69 mutex_unlock(&bo->mappings.lock);
70
71 return mapping;
72}
73
74static void
75panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
76{
77 struct panfrost_file_priv *priv;
78
79 if (mapping->active)
80 panfrost_mmu_unmap(mapping);
81
82 priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
83 spin_lock(&priv->mm_lock);
84 if (drm_mm_node_allocated(&mapping->mmnode))
85 drm_mm_remove_node(&mapping->mmnode);
86 spin_unlock(&priv->mm_lock);
87}
88
89static void panfrost_gem_mapping_release(struct kref *kref)
90{
91 struct panfrost_gem_mapping *mapping;
92
93 mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
94
95 panfrost_gem_teardown_mapping(mapping);
96 drm_gem_object_put(&mapping->obj->base.base);
97 kfree(mapping);
98}
99
100void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
101{
102 if (!mapping)
103 return;
104
105 kref_put(&mapping->refcount, panfrost_gem_mapping_release);
106}
107
108void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
109{
110 struct panfrost_gem_mapping *mapping;
111
112 mutex_lock(&bo->mappings.lock);
113 list_for_each_entry(mapping, &bo->mappings.list, node)
114 panfrost_gem_teardown_mapping(mapping);
115 mutex_unlock(&bo->mappings.lock);
116}
117
118int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
119{
120 int ret;
121 size_t size = obj->size;
122 u64 align;
123 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
124 unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
125 struct panfrost_file_priv *priv = file_priv->driver_priv;
126 struct panfrost_gem_mapping *mapping;
127
128 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
129 if (!mapping)
130 return -ENOMEM;
131
132 INIT_LIST_HEAD(&mapping->node);
133 kref_init(&mapping->refcount);
134 drm_gem_object_get(obj);
135 mapping->obj = bo;
136
137 /*
138 * Executable buffers cannot cross a 16MB boundary as the program
139 * counter is 24-bits. We assume executable buffers will be less than
140 * 16MB and aligning executable buffers to their size will avoid
141 * crossing a 16MB boundary.
142 */
143 if (!bo->noexec)
144 align = size >> PAGE_SHIFT;
145 else
146 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
147
148 mapping->mmu = &priv->mmu;
149 spin_lock(&priv->mm_lock);
150 ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
151 size >> PAGE_SHIFT, align, color, 0);
152 spin_unlock(&priv->mm_lock);
153 if (ret)
154 goto err;
155
156 if (!bo->is_heap) {
157 ret = panfrost_mmu_map(mapping);
158 if (ret)
159 goto err;
160 }
161
162 mutex_lock(&bo->mappings.lock);
163 WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
164 list_add_tail(&mapping->node, &bo->mappings.list);
165 mutex_unlock(&bo->mappings.lock);
166
167err:
168 if (ret)
169 panfrost_gem_mapping_put(mapping);
170 return ret;
171}
172
173void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
174{
175 struct panfrost_file_priv *priv = file_priv->driver_priv;
176 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
177 struct panfrost_gem_mapping *mapping = NULL, *iter;
178
179 mutex_lock(&bo->mappings.lock);
180 list_for_each_entry(iter, &bo->mappings.list, node) {
181 if (iter->mmu == &priv->mmu) {
182 mapping = iter;
183 list_del(&iter->node);
184 break;
185 }
186 }
187 mutex_unlock(&bo->mappings.lock);
188
189 panfrost_gem_mapping_put(mapping);
190}
191
192static int panfrost_gem_pin(struct drm_gem_object *obj)
193{
194 if (to_panfrost_bo(obj)->is_heap)
195 return -EINVAL;
196
197 return drm_gem_shmem_pin(obj);
198}
199
200static const struct drm_gem_object_funcs panfrost_gem_funcs = {
201 .free = panfrost_gem_free_object,
202 .open = panfrost_gem_open,
203 .close = panfrost_gem_close,
204 .print_info = drm_gem_shmem_print_info,
205 .pin = panfrost_gem_pin,
206 .unpin = drm_gem_shmem_unpin,
207 .get_sg_table = drm_gem_shmem_get_sg_table,
208 .vmap = drm_gem_shmem_vmap,
209 .vunmap = drm_gem_shmem_vunmap,
210 .mmap = drm_gem_shmem_mmap,
211};
212
213/**
214 * panfrost_gem_create_object - Implementation of driver->gem_create_object.
215 * @dev: DRM device
216 * @size: Size in bytes of the memory the object will reference
217 *
218 * This lets the GEM helpers allocate object structs for us, and keep
219 * our BO stats correct.
220 */
221struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
222{
223 struct panfrost_gem_object *obj;
224
225 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
226 if (!obj)
227 return NULL;
228
229 INIT_LIST_HEAD(&obj->mappings.list);
230 mutex_init(&obj->mappings.lock);
231 obj->base.base.funcs = &panfrost_gem_funcs;
232
233 return &obj->base.base;
234}
235
236struct panfrost_gem_object *
237panfrost_gem_create_with_handle(struct drm_file *file_priv,
238 struct drm_device *dev, size_t size,
239 u32 flags,
240 uint32_t *handle)
241{
242 int ret;
243 struct drm_gem_shmem_object *shmem;
244 struct panfrost_gem_object *bo;
245
246 /* Round up heap allocations to 2MB to keep fault handling simple */
247 if (flags & PANFROST_BO_HEAP)
248 size = roundup(size, SZ_2M);
249
250 shmem = drm_gem_shmem_create(dev, size);
251 if (IS_ERR(shmem))
252 return ERR_CAST(shmem);
253
254 bo = to_panfrost_bo(&shmem->base);
255 bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
256 bo->is_heap = !!(flags & PANFROST_BO_HEAP);
257
258 /*
259 * Allocate an id of idr table where the obj is registered
260 * and handle has the id what user can see.
261 */
262 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
263 /* drop reference from allocate - handle holds it now. */
264 drm_gem_object_put(&shmem->base);
265 if (ret)
266 return ERR_PTR(ret);
267
268 return bo;
269}
270
271struct drm_gem_object *
272panfrost_gem_prime_import_sg_table(struct drm_device *dev,
273 struct dma_buf_attachment *attach,
274 struct sg_table *sgt)
275{
276 struct drm_gem_object *obj;
277 struct panfrost_gem_object *bo;
278
279 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
280 if (IS_ERR(obj))
281 return ERR_CAST(obj);
282
283 bo = to_panfrost_bo(obj);
284 bo->noexec = true;
285
286 return obj;
287}