Loading...
1/*
2 * psb GEM interface
3 *
4 * Copyright (c) 2011, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Authors: Alan Cox
20 *
21 * TODO:
22 * - we need to work out if the MMU is relevant (eg for
23 * accelerated operations on a GEM object)
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm.h>
28#include <drm/gma_drm.h>
29#include <drm/drm_vma_manager.h>
30#include "psb_drv.h"
31
32void psb_gem_free_object(struct drm_gem_object *obj)
33{
34 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
35
36 /* Remove the list map if one is present */
37 drm_gem_free_mmap_offset(obj);
38 drm_gem_object_release(obj);
39
40 /* This must occur last as it frees up the memory of the GEM object */
41 psb_gtt_free_range(obj->dev, gtt);
42}
43
44int psb_gem_get_aperture(struct drm_device *dev, void *data,
45 struct drm_file *file)
46{
47 return -EINVAL;
48}
49
50/**
51 * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
52 * @file: our drm client file
53 * @dev: drm device
54 * @handle: GEM handle to the object (from dumb_create)
55 *
56 * Do the necessary setup to allow the mapping of the frame buffer
57 * into user memory. We don't have to do much here at the moment.
58 */
59int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
60 uint32_t handle, uint64_t *offset)
61{
62 int ret = 0;
63 struct drm_gem_object *obj;
64
65 /* GEM does all our handle to object mapping */
66 obj = drm_gem_object_lookup(dev, file, handle);
67 if (obj == NULL)
68 return -ENOENT;
69
70 /* Make it mmapable */
71 ret = drm_gem_create_mmap_offset(obj);
72 if (ret)
73 goto out;
74 *offset = drm_vma_node_offset_addr(&obj->vma_node);
75out:
76 drm_gem_object_unreference_unlocked(obj);
77 return ret;
78}
79
80/**
81 * psb_gem_create - create a mappable object
82 * @file: the DRM file of the client
83 * @dev: our device
84 * @size: the size requested
85 * @handlep: returned handle (opaque number)
86 *
87 * Create a GEM object, fill in the boilerplate and attach a handle to
88 * it so that userspace can speak about it. This does the core work
89 * for the various methods that do/will create GEM objects for things
90 */
91int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
92 u32 *handlep, int stolen, u32 align)
93{
94 struct gtt_range *r;
95 int ret;
96 u32 handle;
97
98 size = roundup(size, PAGE_SIZE);
99
100 /* Allocate our object - for now a direct gtt range which is not
101 stolen memory backed */
102 r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE);
103 if (r == NULL) {
104 dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
105 return -ENOSPC;
106 }
107 /* Initialize the extra goodies GEM needs to do all the hard work */
108 if (drm_gem_object_init(dev, &r->gem, size) != 0) {
109 psb_gtt_free_range(dev, r);
110 /* GEM doesn't give an error code so use -ENOMEM */
111 dev_err(dev->dev, "GEM init failed for %lld\n", size);
112 return -ENOMEM;
113 }
114 /* Limit the object to 32bit mappings */
115 mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
116 /* Give the object a handle so we can carry it more easily */
117 ret = drm_gem_handle_create(file, &r->gem, &handle);
118 if (ret) {
119 dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
120 &r->gem, size);
121 drm_gem_object_release(&r->gem);
122 psb_gtt_free_range(dev, r);
123 return ret;
124 }
125 /* We have the initial and handle reference but need only one now */
126 drm_gem_object_unreference_unlocked(&r->gem);
127 *handlep = handle;
128 return 0;
129}
130
131/**
132 * psb_gem_dumb_create - create a dumb buffer
133 * @drm_file: our client file
134 * @dev: our device
135 * @args: the requested arguments copied from userspace
136 *
137 * Allocate a buffer suitable for use for a frame buffer of the
138 * form described by user space. Give userspace a handle by which
139 * to reference it.
140 */
141int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
142 struct drm_mode_create_dumb *args)
143{
144 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
145 args->size = args->pitch * args->height;
146 return psb_gem_create(file, dev, args->size, &args->handle, 0,
147 PAGE_SIZE);
148}
149
150/**
151 * psb_gem_fault - pagefault handler for GEM objects
152 * @vma: the VMA of the GEM object
153 * @vmf: fault detail
154 *
155 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
156 * does most of the work for us including the actual map/unmap calls
157 * but we need to do the actual page work.
158 *
159 * This code eventually needs to handle faulting objects in and out
160 * of the GTT and repacking it when we run out of space. We can put
161 * that off for now and for our simple uses
162 *
163 * The VMA was set up by GEM. In doing so it also ensured that the
164 * vma->vm_private_data points to the GEM object that is backing this
165 * mapping.
166 */
167int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
168{
169 struct drm_gem_object *obj;
170 struct gtt_range *r;
171 int ret;
172 unsigned long pfn;
173 pgoff_t page_offset;
174 struct drm_device *dev;
175 struct drm_psb_private *dev_priv;
176
177 obj = vma->vm_private_data; /* GEM object */
178 dev = obj->dev;
179 dev_priv = dev->dev_private;
180
181 r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
182
183 /* Make sure we don't parallel update on a fault, nor move or remove
184 something from beneath our feet */
185 mutex_lock(&dev_priv->mmap_mutex);
186
187 /* For now the mmap pins the object and it stays pinned. As things
188 stand that will do us no harm */
189 if (r->mmapping == 0) {
190 ret = psb_gtt_pin(r);
191 if (ret < 0) {
192 dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
193 goto fail;
194 }
195 r->mmapping = 1;
196 }
197
198 /* Page relative to the VMA start - we must calculate this ourselves
199 because vmf->pgoff is the fake GEM offset */
200 page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
201 >> PAGE_SHIFT;
202
203 /* CPU view of the page, don't go via the GART for CPU writes */
204 if (r->stolen)
205 pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
206 else
207 pfn = page_to_pfn(r->pages[page_offset]);
208 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
209
210fail:
211 mutex_unlock(&dev_priv->mmap_mutex);
212 switch (ret) {
213 case 0:
214 case -ERESTARTSYS:
215 case -EINTR:
216 return VM_FAULT_NOPAGE;
217 case -ENOMEM:
218 return VM_FAULT_OOM;
219 default:
220 return VM_FAULT_SIGBUS;
221 }
222}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * psb GEM interface
4 *
5 * Copyright (c) 2011, Intel Corporation.
6 *
7 * Authors: Alan Cox
8 *
9 * TODO:
10 * - we need to work out if the MMU is relevant (eg for
11 * accelerated operations on a GEM object)
12 */
13
14#include <linux/pagemap.h>
15
16#include <asm/set_memory.h>
17
18#include <drm/drm.h>
19#include <drm/drm_vma_manager.h>
20
21#include "gem.h"
22#include "psb_drv.h"
23
24/*
25 * PSB GEM object
26 */
27
28int psb_gem_pin(struct psb_gem_object *pobj)
29{
30 struct drm_gem_object *obj = &pobj->base;
31 struct drm_device *dev = obj->dev;
32 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
33 u32 gpu_base = dev_priv->gtt.gatt_start;
34 struct page **pages;
35 unsigned int npages;
36 int ret;
37
38 ret = dma_resv_lock(obj->resv, NULL);
39 if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
40 return ret;
41
42 if (pobj->in_gart || pobj->stolen)
43 goto out; /* already mapped */
44
45 pages = drm_gem_get_pages(obj);
46 if (IS_ERR(pages)) {
47 ret = PTR_ERR(pages);
48 goto err_dma_resv_unlock;
49 }
50
51 npages = obj->size / PAGE_SIZE;
52
53 set_pages_array_wc(pages, npages);
54
55 psb_gtt_insert_pages(dev_priv, &pobj->resource, pages);
56 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages,
57 (gpu_base + pobj->offset), npages, 0, 0,
58 PSB_MMU_CACHED_MEMORY);
59
60 pobj->pages = pages;
61
62out:
63 ++pobj->in_gart;
64 dma_resv_unlock(obj->resv);
65
66 return 0;
67
68err_dma_resv_unlock:
69 dma_resv_unlock(obj->resv);
70 return ret;
71}
72
73void psb_gem_unpin(struct psb_gem_object *pobj)
74{
75 struct drm_gem_object *obj = &pobj->base;
76 struct drm_device *dev = obj->dev;
77 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
78 u32 gpu_base = dev_priv->gtt.gatt_start;
79 unsigned long npages;
80 int ret;
81
82 ret = dma_resv_lock(obj->resv, NULL);
83 if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
84 return;
85
86 WARN_ON(!pobj->in_gart);
87
88 --pobj->in_gart;
89
90 if (pobj->in_gart || pobj->stolen)
91 goto out;
92
93 npages = obj->size / PAGE_SIZE;
94
95 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
96 (gpu_base + pobj->offset), npages, 0, 0);
97 psb_gtt_remove_pages(dev_priv, &pobj->resource);
98
99 /* Reset caching flags */
100 set_pages_array_wb(pobj->pages, npages);
101
102 drm_gem_put_pages(obj, pobj->pages, true, false);
103 pobj->pages = NULL;
104
105out:
106 dma_resv_unlock(obj->resv);
107}
108
109static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
110
111static void psb_gem_free_object(struct drm_gem_object *obj)
112{
113 struct psb_gem_object *pobj = to_psb_gem_object(obj);
114
115 /* Undo the mmap pin if we are destroying the object */
116 if (pobj->mmapping)
117 psb_gem_unpin(pobj);
118
119 drm_gem_object_release(obj);
120
121 WARN_ON(pobj->in_gart && !pobj->stolen);
122
123 release_resource(&pobj->resource);
124 kfree(pobj);
125}
126
127static const struct vm_operations_struct psb_gem_vm_ops = {
128 .fault = psb_gem_fault,
129 .open = drm_gem_vm_open,
130 .close = drm_gem_vm_close,
131};
132
133static const struct drm_gem_object_funcs psb_gem_object_funcs = {
134 .free = psb_gem_free_object,
135 .vm_ops = &psb_gem_vm_ops,
136};
137
138struct psb_gem_object *
139psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align)
140{
141 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
142 struct psb_gem_object *pobj;
143 struct drm_gem_object *obj;
144 int ret;
145
146 size = roundup(size, PAGE_SIZE);
147
148 pobj = kzalloc(sizeof(*pobj), GFP_KERNEL);
149 if (!pobj)
150 return ERR_PTR(-ENOMEM);
151 obj = &pobj->base;
152
153 /* GTT resource */
154
155 ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen,
156 &pobj->offset);
157 if (ret)
158 goto err_kfree;
159
160 if (stolen) {
161 pobj->stolen = true;
162 pobj->in_gart = 1;
163 }
164
165 /* GEM object */
166
167 obj->funcs = &psb_gem_object_funcs;
168
169 if (stolen) {
170 drm_gem_private_object_init(dev, obj, size);
171 } else {
172 ret = drm_gem_object_init(dev, obj, size);
173 if (ret)
174 goto err_release_resource;
175
176 /* Limit the object to 32-bit mappings */
177 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
178 }
179
180 return pobj;
181
182err_release_resource:
183 release_resource(&pobj->resource);
184err_kfree:
185 kfree(pobj);
186 return ERR_PTR(ret);
187}
188
189/**
190 * psb_gem_dumb_create - create a dumb buffer
191 * @file: our client file
192 * @dev: our device
193 * @args: the requested arguments copied from userspace
194 *
195 * Allocate a buffer suitable for use for a frame buffer of the
196 * form described by user space. Give userspace a handle by which
197 * to reference it.
198 */
199int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
200 struct drm_mode_create_dumb *args)
201{
202 size_t pitch, size;
203 struct psb_gem_object *pobj;
204 struct drm_gem_object *obj;
205 u32 handle;
206 int ret;
207
208 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
209 pitch = ALIGN(pitch, 64);
210
211 size = pitch * args->height;
212 size = roundup(size, PAGE_SIZE);
213 if (!size)
214 return -EINVAL;
215
216 pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
217 if (IS_ERR(pobj))
218 return PTR_ERR(pobj);
219 obj = &pobj->base;
220
221 ret = drm_gem_handle_create(file, obj, &handle);
222 if (ret)
223 goto err_drm_gem_object_put;
224
225 drm_gem_object_put(obj);
226
227 args->pitch = pitch;
228 args->size = size;
229 args->handle = handle;
230
231 return 0;
232
233err_drm_gem_object_put:
234 drm_gem_object_put(obj);
235 return ret;
236}
237
238/**
239 * psb_gem_fault - pagefault handler for GEM objects
240 * @vmf: fault detail
241 *
242 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
243 * does most of the work for us including the actual map/unmap calls
244 * but we need to do the actual page work.
245 *
246 * This code eventually needs to handle faulting objects in and out
247 * of the GTT and repacking it when we run out of space. We can put
248 * that off for now and for our simple uses
249 *
250 * The VMA was set up by GEM. In doing so it also ensured that the
251 * vma->vm_private_data points to the GEM object that is backing this
252 * mapping.
253 */
254static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
255{
256 struct vm_area_struct *vma = vmf->vma;
257 struct drm_gem_object *obj;
258 struct psb_gem_object *pobj;
259 int err;
260 vm_fault_t ret;
261 unsigned long pfn;
262 pgoff_t page_offset;
263 struct drm_device *dev;
264 struct drm_psb_private *dev_priv;
265
266 obj = vma->vm_private_data; /* GEM object */
267 dev = obj->dev;
268 dev_priv = to_drm_psb_private(dev);
269
270 pobj = to_psb_gem_object(obj);
271
272 /* Make sure we don't parallel update on a fault, nor move or remove
273 something from beneath our feet */
274 mutex_lock(&dev_priv->mmap_mutex);
275
276 /* For now the mmap pins the object and it stays pinned. As things
277 stand that will do us no harm */
278 if (pobj->mmapping == 0) {
279 err = psb_gem_pin(pobj);
280 if (err < 0) {
281 dev_err(dev->dev, "gma500: pin failed: %d\n", err);
282 ret = vmf_error(err);
283 goto fail;
284 }
285 pobj->mmapping = 1;
286 }
287
288 /* Page relative to the VMA start - we must calculate this ourselves
289 because vmf->pgoff is the fake GEM offset */
290 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
291
292 /* CPU view of the page, don't go via the GART for CPU writes */
293 if (pobj->stolen)
294 pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT;
295 else
296 pfn = page_to_pfn(pobj->pages[page_offset]);
297 ret = vmf_insert_pfn(vma, vmf->address, pfn);
298fail:
299 mutex_unlock(&dev_priv->mmap_mutex);
300
301 return ret;
302}
303
304/*
305 * Memory management
306 */
307
308/* Insert vram stolen pages into the GTT. */
309static void psb_gem_mm_populate_stolen(struct drm_psb_private *pdev)
310{
311 struct drm_device *dev = &pdev->dev;
312 unsigned int pfn_base;
313 unsigned int i, num_pages;
314 uint32_t pte;
315
316 pfn_base = pdev->stolen_base >> PAGE_SHIFT;
317 num_pages = pdev->vram_stolen_size >> PAGE_SHIFT;
318
319 drm_dbg(dev, "Set up %u stolen pages starting at 0x%08x, GTT offset %dK\n",
320 num_pages, pfn_base << PAGE_SHIFT, 0);
321
322 for (i = 0; i < num_pages; ++i) {
323 pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
324 iowrite32(pte, pdev->gtt_map + i);
325 }
326
327 (void)ioread32(pdev->gtt_map + i - 1);
328}
329
330int psb_gem_mm_init(struct drm_device *dev)
331{
332 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
333 struct pci_dev *pdev = to_pci_dev(dev->dev);
334 unsigned long stolen_size, vram_stolen_size;
335 struct psb_gtt *pg;
336 int ret;
337
338 mutex_init(&dev_priv->mmap_mutex);
339
340 pg = &dev_priv->gtt;
341
342 pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
343 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
344
345 stolen_size = vram_stolen_size;
346
347 dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
348 dev_priv->stolen_base, vram_stolen_size / 1024);
349
350 pg->stolen_size = stolen_size;
351 dev_priv->vram_stolen_size = vram_stolen_size;
352
353 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
354 if (!dev_priv->vram_addr) {
355 dev_err(dev->dev, "Failure to map stolen base.\n");
356 ret = -ENOMEM;
357 goto err_mutex_destroy;
358 }
359
360 psb_gem_mm_populate_stolen(dev_priv);
361
362 return 0;
363
364err_mutex_destroy:
365 mutex_destroy(&dev_priv->mmap_mutex);
366 return ret;
367}
368
369void psb_gem_mm_fini(struct drm_device *dev)
370{
371 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
372
373 iounmap(dev_priv->vram_addr);
374
375 mutex_destroy(&dev_priv->mmap_mutex);
376}
377
378/* Re-insert all pinned GEM objects into GTT. */
379static void psb_gem_mm_populate_resources(struct drm_psb_private *pdev)
380{
381 unsigned int restored = 0, total = 0, size = 0;
382 struct resource *r = pdev->gtt_mem->child;
383 struct drm_device *dev = &pdev->dev;
384 struct psb_gem_object *pobj;
385
386 while (r) {
387 /*
388 * TODO: GTT restoration needs a refactoring, so that we don't have to touch
389 * struct psb_gem_object here. The type represents a GEM object and is
390 * not related to the GTT itself.
391 */
392 pobj = container_of(r, struct psb_gem_object, resource);
393 if (pobj->pages) {
394 psb_gtt_insert_pages(pdev, &pobj->resource, pobj->pages);
395 size += resource_size(&pobj->resource);
396 ++restored;
397 }
398 r = r->sibling;
399 ++total;
400 }
401
402 drm_dbg(dev, "Restored %u of %u gtt ranges (%u KB)", restored, total, (size / 1024));
403}
404
405int psb_gem_mm_resume(struct drm_device *dev)
406{
407 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
408 struct pci_dev *pdev = to_pci_dev(dev->dev);
409 unsigned long stolen_size, vram_stolen_size;
410 struct psb_gtt *pg;
411
412 pg = &dev_priv->gtt;
413
414 pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
415 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
416
417 stolen_size = vram_stolen_size;
418
419 dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n", dev_priv->stolen_base,
420 vram_stolen_size / 1024);
421
422 if (stolen_size != pg->stolen_size) {
423 dev_err(dev->dev, "GTT resume error.\n");
424 return -EINVAL;
425 }
426
427 psb_gem_mm_populate_stolen(dev_priv);
428 psb_gem_mm_populate_resources(dev_priv);
429
430 return 0;
431}