Loading...
1/**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/vmalloc.h>
37#include <linux/slab.h>
38#include <linux/log2.h>
39#include <asm/shmparam.h>
40#include "drmP.h"
41
42static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
43 struct drm_local_map *map)
44{
45 struct drm_map_list *entry;
46 list_for_each_entry(entry, &dev->maplist, head) {
47 /*
48 * Because the kernel-userspace ABI is fixed at a 32-bit offset
49 * while PCI resources may live above that, we only compare the
50 * lower 32 bits of the map offset for maps of type
51 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
52 * It is assumed that if a driver have more than one resource
53 * of each type, the lower 32 bits are different.
54 */
55 if (!entry->map ||
56 map->type != entry->map->type ||
57 entry->master != dev->primary->master)
58 continue;
59 switch (map->type) {
60 case _DRM_SHM:
61 if (map->flags != _DRM_CONTAINS_LOCK)
62 break;
63 return entry;
64 case _DRM_REGISTERS:
65 case _DRM_FRAME_BUFFER:
66 if ((entry->map->offset & 0xffffffff) ==
67 (map->offset & 0xffffffff))
68 return entry;
69 default: /* Make gcc happy */
70 ;
71 }
72 if (entry->map->offset == map->offset)
73 return entry;
74 }
75
76 return NULL;
77}
78
79static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
80 unsigned long user_token, int hashed_handle, int shm)
81{
82 int use_hashed_handle, shift;
83 unsigned long add;
84
85#if (BITS_PER_LONG == 64)
86 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
87#elif (BITS_PER_LONG == 32)
88 use_hashed_handle = hashed_handle;
89#else
90#error Unsupported long size. Neither 64 nor 32 bits.
91#endif
92
93 if (!use_hashed_handle) {
94 int ret;
95 hash->key = user_token >> PAGE_SHIFT;
96 ret = drm_ht_insert_item(&dev->map_hash, hash);
97 if (ret != -EINVAL)
98 return ret;
99 }
100
101 shift = 0;
102 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
103 if (shm && (SHMLBA > PAGE_SIZE)) {
104 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
105
106 /* For shared memory, we have to preserve the SHMLBA
107 * bits of the eventual vma->vm_pgoff value during
108 * mmap(). Otherwise we run into cache aliasing problems
109 * on some platforms. On these platforms, the pgoff of
110 * a mmap() request is used to pick a suitable virtual
111 * address for the mmap() region such that it will not
112 * cause cache aliasing problems.
113 *
114 * Therefore, make sure the SHMLBA relevant bits of the
115 * hash value we use are equal to those in the original
116 * kernel virtual address.
117 */
118 shift = bits;
119 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
120 }
121
122 return drm_ht_just_insert_please(&dev->map_hash, hash,
123 user_token, 32 - PAGE_SHIFT - 3,
124 shift, add);
125}
126
127/**
128 * Core function to create a range of memory available for mapping by a
129 * non-root process.
130 *
131 * Adjusts the memory offset to its absolute value according to the mapping
132 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
133 * applicable and if supported by the kernel.
134 */
135static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
136 unsigned int size, enum drm_map_type type,
137 enum drm_map_flags flags,
138 struct drm_map_list ** maplist)
139{
140 struct drm_local_map *map;
141 struct drm_map_list *list;
142 drm_dma_handle_t *dmah;
143 unsigned long user_token;
144 int ret;
145
146 map = kmalloc(sizeof(*map), GFP_KERNEL);
147 if (!map)
148 return -ENOMEM;
149
150 map->offset = offset;
151 map->size = size;
152 map->flags = flags;
153 map->type = type;
154
155 /* Only allow shared memory to be removable since we only keep enough
156 * book keeping information about shared memory to allow for removal
157 * when processes fork.
158 */
159 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
160 kfree(map);
161 return -EINVAL;
162 }
163 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
164 (unsigned long long)map->offset, map->size, map->type);
165
166 /* page-align _DRM_SHM maps. They are allocated here so there is no security
167 * hole created by that and it works around various broken drivers that use
168 * a non-aligned quantity to map the SAREA. --BenH
169 */
170 if (map->type == _DRM_SHM)
171 map->size = PAGE_ALIGN(map->size);
172
173 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
174 kfree(map);
175 return -EINVAL;
176 }
177 map->mtrr = -1;
178 map->handle = NULL;
179
180 switch (map->type) {
181 case _DRM_REGISTERS:
182 case _DRM_FRAME_BUFFER:
183#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
184 if (map->offset + (map->size-1) < map->offset ||
185 map->offset < virt_to_phys(high_memory)) {
186 kfree(map);
187 return -EINVAL;
188 }
189#endif
190 /* Some drivers preinitialize some maps, without the X Server
191 * needing to be aware of it. Therefore, we just return success
192 * when the server tries to create a duplicate map.
193 */
194 list = drm_find_matching_map(dev, map);
195 if (list != NULL) {
196 if (list->map->size != map->size) {
197 DRM_DEBUG("Matching maps of type %d with "
198 "mismatched sizes, (%ld vs %ld)\n",
199 map->type, map->size,
200 list->map->size);
201 list->map->size = map->size;
202 }
203
204 kfree(map);
205 *maplist = list;
206 return 0;
207 }
208
209 if (drm_core_has_MTRR(dev)) {
210 if (map->type == _DRM_FRAME_BUFFER ||
211 (map->flags & _DRM_WRITE_COMBINING)) {
212 map->mtrr = mtrr_add(map->offset, map->size,
213 MTRR_TYPE_WRCOMB, 1);
214 }
215 }
216 if (map->type == _DRM_REGISTERS) {
217 map->handle = ioremap(map->offset, map->size);
218 if (!map->handle) {
219 kfree(map);
220 return -ENOMEM;
221 }
222 }
223
224 break;
225 case _DRM_SHM:
226 list = drm_find_matching_map(dev, map);
227 if (list != NULL) {
228 if(list->map->size != map->size) {
229 DRM_DEBUG("Matching maps of type %d with "
230 "mismatched sizes, (%ld vs %ld)\n",
231 map->type, map->size, list->map->size);
232 list->map->size = map->size;
233 }
234
235 kfree(map);
236 *maplist = list;
237 return 0;
238 }
239 map->handle = vmalloc_user(map->size);
240 DRM_DEBUG("%lu %d %p\n",
241 map->size, drm_order(map->size), map->handle);
242 if (!map->handle) {
243 kfree(map);
244 return -ENOMEM;
245 }
246 map->offset = (unsigned long)map->handle;
247 if (map->flags & _DRM_CONTAINS_LOCK) {
248 /* Prevent a 2nd X Server from creating a 2nd lock */
249 if (dev->primary->master->lock.hw_lock != NULL) {
250 vfree(map->handle);
251 kfree(map);
252 return -EBUSY;
253 }
254 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
255 }
256 break;
257 case _DRM_AGP: {
258 struct drm_agp_mem *entry;
259 int valid = 0;
260
261 if (!drm_core_has_AGP(dev)) {
262 kfree(map);
263 return -EINVAL;
264 }
265#ifdef __alpha__
266 map->offset += dev->hose->mem_space->start;
267#endif
268 /* In some cases (i810 driver), user space may have already
269 * added the AGP base itself, because dev->agp->base previously
270 * only got set during AGP enable. So, only add the base
271 * address if the map's offset isn't already within the
272 * aperture.
273 */
274 if (map->offset < dev->agp->base ||
275 map->offset > dev->agp->base +
276 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
277 map->offset += dev->agp->base;
278 }
279 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
280
281 /* This assumes the DRM is in total control of AGP space.
282 * It's not always the case as AGP can be in the control
283 * of user space (i.e. i810 driver). So this loop will get
284 * skipped and we double check that dev->agp->memory is
285 * actually set as well as being invalid before EPERM'ing
286 */
287 list_for_each_entry(entry, &dev->agp->memory, head) {
288 if ((map->offset >= entry->bound) &&
289 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
290 valid = 1;
291 break;
292 }
293 }
294 if (!list_empty(&dev->agp->memory) && !valid) {
295 kfree(map);
296 return -EPERM;
297 }
298 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
299 (unsigned long long)map->offset, map->size);
300
301 break;
302 }
303 case _DRM_GEM:
304 DRM_ERROR("tried to addmap GEM object\n");
305 break;
306 case _DRM_SCATTER_GATHER:
307 if (!dev->sg) {
308 kfree(map);
309 return -EINVAL;
310 }
311 map->offset += (unsigned long)dev->sg->virtual;
312 break;
313 case _DRM_CONSISTENT:
314 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
315 * As we're limiting the address to 2^32-1 (or less),
316 * casting it down to 32 bits is no problem, but we
317 * need to point to a 64bit variable first. */
318 dmah = drm_pci_alloc(dev, map->size, map->size);
319 if (!dmah) {
320 kfree(map);
321 return -ENOMEM;
322 }
323 map->handle = dmah->vaddr;
324 map->offset = (unsigned long)dmah->busaddr;
325 kfree(dmah);
326 break;
327 default:
328 kfree(map);
329 return -EINVAL;
330 }
331
332 list = kzalloc(sizeof(*list), GFP_KERNEL);
333 if (!list) {
334 if (map->type == _DRM_REGISTERS)
335 iounmap(map->handle);
336 kfree(map);
337 return -EINVAL;
338 }
339 list->map = map;
340
341 mutex_lock(&dev->struct_mutex);
342 list_add(&list->head, &dev->maplist);
343
344 /* Assign a 32-bit handle */
345 /* We do it here so that dev->struct_mutex protects the increment */
346 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
347 map->offset;
348 ret = drm_map_handle(dev, &list->hash, user_token, 0,
349 (map->type == _DRM_SHM));
350 if (ret) {
351 if (map->type == _DRM_REGISTERS)
352 iounmap(map->handle);
353 kfree(map);
354 kfree(list);
355 mutex_unlock(&dev->struct_mutex);
356 return ret;
357 }
358
359 list->user_token = list->hash.key << PAGE_SHIFT;
360 mutex_unlock(&dev->struct_mutex);
361
362 if (!(map->flags & _DRM_DRIVER))
363 list->master = dev->primary->master;
364 *maplist = list;
365 return 0;
366 }
367
368int drm_addmap(struct drm_device * dev, resource_size_t offset,
369 unsigned int size, enum drm_map_type type,
370 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
371{
372 struct drm_map_list *list;
373 int rc;
374
375 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
376 if (!rc)
377 *map_ptr = list->map;
378 return rc;
379}
380
381EXPORT_SYMBOL(drm_addmap);
382
383/**
384 * Ioctl to specify a range of memory that is available for mapping by a
385 * non-root process.
386 *
387 * \param inode device inode.
388 * \param file_priv DRM file private.
389 * \param cmd command.
390 * \param arg pointer to a drm_map structure.
391 * \return zero on success or a negative value on error.
392 *
393 */
394int drm_addmap_ioctl(struct drm_device *dev, void *data,
395 struct drm_file *file_priv)
396{
397 struct drm_map *map = data;
398 struct drm_map_list *maplist;
399 int err;
400
401 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
402 return -EPERM;
403
404 err = drm_addmap_core(dev, map->offset, map->size, map->type,
405 map->flags, &maplist);
406
407 if (err)
408 return err;
409
410 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
411 map->handle = (void *)(unsigned long)maplist->user_token;
412 return 0;
413}
414
415/**
416 * Remove a map private from list and deallocate resources if the mapping
417 * isn't in use.
418 *
419 * Searches the map on drm_device::maplist, removes it from the list, see if
420 * its being used, and free any associate resource (such as MTRR's) if it's not
421 * being on use.
422 *
423 * \sa drm_addmap
424 */
425int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
426{
427 struct drm_map_list *r_list = NULL, *list_t;
428 drm_dma_handle_t dmah;
429 int found = 0;
430 struct drm_master *master;
431
432 /* Find the list entry for the map and remove it */
433 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
434 if (r_list->map == map) {
435 master = r_list->master;
436 list_del(&r_list->head);
437 drm_ht_remove_key(&dev->map_hash,
438 r_list->user_token >> PAGE_SHIFT);
439 kfree(r_list);
440 found = 1;
441 break;
442 }
443 }
444
445 if (!found)
446 return -EINVAL;
447
448 switch (map->type) {
449 case _DRM_REGISTERS:
450 iounmap(map->handle);
451 /* FALLTHROUGH */
452 case _DRM_FRAME_BUFFER:
453 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
454 int retcode;
455 retcode = mtrr_del(map->mtrr, map->offset, map->size);
456 DRM_DEBUG("mtrr_del=%d\n", retcode);
457 }
458 break;
459 case _DRM_SHM:
460 vfree(map->handle);
461 if (master) {
462 if (dev->sigdata.lock == master->lock.hw_lock)
463 dev->sigdata.lock = NULL;
464 master->lock.hw_lock = NULL; /* SHM removed */
465 master->lock.file_priv = NULL;
466 wake_up_interruptible_all(&master->lock.lock_queue);
467 }
468 break;
469 case _DRM_AGP:
470 case _DRM_SCATTER_GATHER:
471 break;
472 case _DRM_CONSISTENT:
473 dmah.vaddr = map->handle;
474 dmah.busaddr = map->offset;
475 dmah.size = map->size;
476 __drm_pci_free(dev, &dmah);
477 break;
478 case _DRM_GEM:
479 DRM_ERROR("tried to rmmap GEM object\n");
480 break;
481 }
482 kfree(map);
483
484 return 0;
485}
486EXPORT_SYMBOL(drm_rmmap_locked);
487
488int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
489{
490 int ret;
491
492 mutex_lock(&dev->struct_mutex);
493 ret = drm_rmmap_locked(dev, map);
494 mutex_unlock(&dev->struct_mutex);
495
496 return ret;
497}
498EXPORT_SYMBOL(drm_rmmap);
499
500/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
501 * the last close of the device, and this is necessary for cleanup when things
502 * exit uncleanly. Therefore, having userland manually remove mappings seems
503 * like a pointless exercise since they're going away anyway.
504 *
505 * One use case might be after addmap is allowed for normal users for SHM and
506 * gets used by drivers that the server doesn't need to care about. This seems
507 * unlikely.
508 *
509 * \param inode device inode.
510 * \param file_priv DRM file private.
511 * \param cmd command.
512 * \param arg pointer to a struct drm_map structure.
513 * \return zero on success or a negative value on error.
514 */
515int drm_rmmap_ioctl(struct drm_device *dev, void *data,
516 struct drm_file *file_priv)
517{
518 struct drm_map *request = data;
519 struct drm_local_map *map = NULL;
520 struct drm_map_list *r_list;
521 int ret;
522
523 mutex_lock(&dev->struct_mutex);
524 list_for_each_entry(r_list, &dev->maplist, head) {
525 if (r_list->map &&
526 r_list->user_token == (unsigned long)request->handle &&
527 r_list->map->flags & _DRM_REMOVABLE) {
528 map = r_list->map;
529 break;
530 }
531 }
532
533 /* List has wrapped around to the head pointer, or its empty we didn't
534 * find anything.
535 */
536 if (list_empty(&dev->maplist) || !map) {
537 mutex_unlock(&dev->struct_mutex);
538 return -EINVAL;
539 }
540
541 /* Register and framebuffer maps are permanent */
542 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
543 mutex_unlock(&dev->struct_mutex);
544 return 0;
545 }
546
547 ret = drm_rmmap_locked(dev, map);
548
549 mutex_unlock(&dev->struct_mutex);
550
551 return ret;
552}
553
554/**
555 * Cleanup after an error on one of the addbufs() functions.
556 *
557 * \param dev DRM device.
558 * \param entry buffer entry where the error occurred.
559 *
560 * Frees any pages and buffers associated with the given entry.
561 */
562static void drm_cleanup_buf_error(struct drm_device * dev,
563 struct drm_buf_entry * entry)
564{
565 int i;
566
567 if (entry->seg_count) {
568 for (i = 0; i < entry->seg_count; i++) {
569 if (entry->seglist[i]) {
570 drm_pci_free(dev, entry->seglist[i]);
571 }
572 }
573 kfree(entry->seglist);
574
575 entry->seg_count = 0;
576 }
577
578 if (entry->buf_count) {
579 for (i = 0; i < entry->buf_count; i++) {
580 kfree(entry->buflist[i].dev_private);
581 }
582 kfree(entry->buflist);
583
584 entry->buf_count = 0;
585 }
586}
587
588#if __OS_HAS_AGP
589/**
590 * Add AGP buffers for DMA transfers.
591 *
592 * \param dev struct drm_device to which the buffers are to be added.
593 * \param request pointer to a struct drm_buf_desc describing the request.
594 * \return zero on success or a negative number on failure.
595 *
596 * After some sanity checks creates a drm_buf structure for each buffer and
597 * reallocates the buffer list of the same size order to accommodate the new
598 * buffers.
599 */
600int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
601{
602 struct drm_device_dma *dma = dev->dma;
603 struct drm_buf_entry *entry;
604 struct drm_agp_mem *agp_entry;
605 struct drm_buf *buf;
606 unsigned long offset;
607 unsigned long agp_offset;
608 int count;
609 int order;
610 int size;
611 int alignment;
612 int page_order;
613 int total;
614 int byte_count;
615 int i, valid;
616 struct drm_buf **temp_buflist;
617
618 if (!dma)
619 return -EINVAL;
620
621 count = request->count;
622 order = drm_order(request->size);
623 size = 1 << order;
624
625 alignment = (request->flags & _DRM_PAGE_ALIGN)
626 ? PAGE_ALIGN(size) : size;
627 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
628 total = PAGE_SIZE << page_order;
629
630 byte_count = 0;
631 agp_offset = dev->agp->base + request->agp_start;
632
633 DRM_DEBUG("count: %d\n", count);
634 DRM_DEBUG("order: %d\n", order);
635 DRM_DEBUG("size: %d\n", size);
636 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
637 DRM_DEBUG("alignment: %d\n", alignment);
638 DRM_DEBUG("page_order: %d\n", page_order);
639 DRM_DEBUG("total: %d\n", total);
640
641 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
642 return -EINVAL;
643 if (dev->queue_count)
644 return -EBUSY; /* Not while in use */
645
646 /* Make sure buffers are located in AGP memory that we own */
647 valid = 0;
648 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
649 if ((agp_offset >= agp_entry->bound) &&
650 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
651 valid = 1;
652 break;
653 }
654 }
655 if (!list_empty(&dev->agp->memory) && !valid) {
656 DRM_DEBUG("zone invalid\n");
657 return -EINVAL;
658 }
659 spin_lock(&dev->count_lock);
660 if (dev->buf_use) {
661 spin_unlock(&dev->count_lock);
662 return -EBUSY;
663 }
664 atomic_inc(&dev->buf_alloc);
665 spin_unlock(&dev->count_lock);
666
667 mutex_lock(&dev->struct_mutex);
668 entry = &dma->bufs[order];
669 if (entry->buf_count) {
670 mutex_unlock(&dev->struct_mutex);
671 atomic_dec(&dev->buf_alloc);
672 return -ENOMEM; /* May only call once for each order */
673 }
674
675 if (count < 0 || count > 4096) {
676 mutex_unlock(&dev->struct_mutex);
677 atomic_dec(&dev->buf_alloc);
678 return -EINVAL;
679 }
680
681 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
682 if (!entry->buflist) {
683 mutex_unlock(&dev->struct_mutex);
684 atomic_dec(&dev->buf_alloc);
685 return -ENOMEM;
686 }
687
688 entry->buf_size = size;
689 entry->page_order = page_order;
690
691 offset = 0;
692
693 while (entry->buf_count < count) {
694 buf = &entry->buflist[entry->buf_count];
695 buf->idx = dma->buf_count + entry->buf_count;
696 buf->total = alignment;
697 buf->order = order;
698 buf->used = 0;
699
700 buf->offset = (dma->byte_count + offset);
701 buf->bus_address = agp_offset + offset;
702 buf->address = (void *)(agp_offset + offset);
703 buf->next = NULL;
704 buf->waiting = 0;
705 buf->pending = 0;
706 init_waitqueue_head(&buf->dma_wait);
707 buf->file_priv = NULL;
708
709 buf->dev_priv_size = dev->driver->dev_priv_size;
710 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
711 if (!buf->dev_private) {
712 /* Set count correctly so we free the proper amount. */
713 entry->buf_count = count;
714 drm_cleanup_buf_error(dev, entry);
715 mutex_unlock(&dev->struct_mutex);
716 atomic_dec(&dev->buf_alloc);
717 return -ENOMEM;
718 }
719
720 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
721
722 offset += alignment;
723 entry->buf_count++;
724 byte_count += PAGE_SIZE << page_order;
725 }
726
727 DRM_DEBUG("byte_count: %d\n", byte_count);
728
729 temp_buflist = krealloc(dma->buflist,
730 (dma->buf_count + entry->buf_count) *
731 sizeof(*dma->buflist), GFP_KERNEL);
732 if (!temp_buflist) {
733 /* Free the entry because it isn't valid */
734 drm_cleanup_buf_error(dev, entry);
735 mutex_unlock(&dev->struct_mutex);
736 atomic_dec(&dev->buf_alloc);
737 return -ENOMEM;
738 }
739 dma->buflist = temp_buflist;
740
741 for (i = 0; i < entry->buf_count; i++) {
742 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
743 }
744
745 dma->buf_count += entry->buf_count;
746 dma->seg_count += entry->seg_count;
747 dma->page_count += byte_count >> PAGE_SHIFT;
748 dma->byte_count += byte_count;
749
750 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
751 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
752
753 mutex_unlock(&dev->struct_mutex);
754
755 request->count = entry->buf_count;
756 request->size = size;
757
758 dma->flags = _DRM_DMA_USE_AGP;
759
760 atomic_dec(&dev->buf_alloc);
761 return 0;
762}
763EXPORT_SYMBOL(drm_addbufs_agp);
764#endif /* __OS_HAS_AGP */
765
766int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
767{
768 struct drm_device_dma *dma = dev->dma;
769 int count;
770 int order;
771 int size;
772 int total;
773 int page_order;
774 struct drm_buf_entry *entry;
775 drm_dma_handle_t *dmah;
776 struct drm_buf *buf;
777 int alignment;
778 unsigned long offset;
779 int i;
780 int byte_count;
781 int page_count;
782 unsigned long *temp_pagelist;
783 struct drm_buf **temp_buflist;
784
785 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
786 return -EINVAL;
787
788 if (!dma)
789 return -EINVAL;
790
791 if (!capable(CAP_SYS_ADMIN))
792 return -EPERM;
793
794 count = request->count;
795 order = drm_order(request->size);
796 size = 1 << order;
797
798 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
799 request->count, request->size, size, order, dev->queue_count);
800
801 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
802 return -EINVAL;
803 if (dev->queue_count)
804 return -EBUSY; /* Not while in use */
805
806 alignment = (request->flags & _DRM_PAGE_ALIGN)
807 ? PAGE_ALIGN(size) : size;
808 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
809 total = PAGE_SIZE << page_order;
810
811 spin_lock(&dev->count_lock);
812 if (dev->buf_use) {
813 spin_unlock(&dev->count_lock);
814 return -EBUSY;
815 }
816 atomic_inc(&dev->buf_alloc);
817 spin_unlock(&dev->count_lock);
818
819 mutex_lock(&dev->struct_mutex);
820 entry = &dma->bufs[order];
821 if (entry->buf_count) {
822 mutex_unlock(&dev->struct_mutex);
823 atomic_dec(&dev->buf_alloc);
824 return -ENOMEM; /* May only call once for each order */
825 }
826
827 if (count < 0 || count > 4096) {
828 mutex_unlock(&dev->struct_mutex);
829 atomic_dec(&dev->buf_alloc);
830 return -EINVAL;
831 }
832
833 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
834 if (!entry->buflist) {
835 mutex_unlock(&dev->struct_mutex);
836 atomic_dec(&dev->buf_alloc);
837 return -ENOMEM;
838 }
839
840 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
841 if (!entry->seglist) {
842 kfree(entry->buflist);
843 mutex_unlock(&dev->struct_mutex);
844 atomic_dec(&dev->buf_alloc);
845 return -ENOMEM;
846 }
847
848 /* Keep the original pagelist until we know all the allocations
849 * have succeeded
850 */
851 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
852 sizeof(*dma->pagelist), GFP_KERNEL);
853 if (!temp_pagelist) {
854 kfree(entry->buflist);
855 kfree(entry->seglist);
856 mutex_unlock(&dev->struct_mutex);
857 atomic_dec(&dev->buf_alloc);
858 return -ENOMEM;
859 }
860 memcpy(temp_pagelist,
861 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
862 DRM_DEBUG("pagelist: %d entries\n",
863 dma->page_count + (count << page_order));
864
865 entry->buf_size = size;
866 entry->page_order = page_order;
867 byte_count = 0;
868 page_count = 0;
869
870 while (entry->buf_count < count) {
871
872 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
873
874 if (!dmah) {
875 /* Set count correctly so we free the proper amount. */
876 entry->buf_count = count;
877 entry->seg_count = count;
878 drm_cleanup_buf_error(dev, entry);
879 kfree(temp_pagelist);
880 mutex_unlock(&dev->struct_mutex);
881 atomic_dec(&dev->buf_alloc);
882 return -ENOMEM;
883 }
884 entry->seglist[entry->seg_count++] = dmah;
885 for (i = 0; i < (1 << page_order); i++) {
886 DRM_DEBUG("page %d @ 0x%08lx\n",
887 dma->page_count + page_count,
888 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
889 temp_pagelist[dma->page_count + page_count++]
890 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
891 }
892 for (offset = 0;
893 offset + size <= total && entry->buf_count < count;
894 offset += alignment, ++entry->buf_count) {
895 buf = &entry->buflist[entry->buf_count];
896 buf->idx = dma->buf_count + entry->buf_count;
897 buf->total = alignment;
898 buf->order = order;
899 buf->used = 0;
900 buf->offset = (dma->byte_count + byte_count + offset);
901 buf->address = (void *)(dmah->vaddr + offset);
902 buf->bus_address = dmah->busaddr + offset;
903 buf->next = NULL;
904 buf->waiting = 0;
905 buf->pending = 0;
906 init_waitqueue_head(&buf->dma_wait);
907 buf->file_priv = NULL;
908
909 buf->dev_priv_size = dev->driver->dev_priv_size;
910 buf->dev_private = kzalloc(buf->dev_priv_size,
911 GFP_KERNEL);
912 if (!buf->dev_private) {
913 /* Set count correctly so we free the proper amount. */
914 entry->buf_count = count;
915 entry->seg_count = count;
916 drm_cleanup_buf_error(dev, entry);
917 kfree(temp_pagelist);
918 mutex_unlock(&dev->struct_mutex);
919 atomic_dec(&dev->buf_alloc);
920 return -ENOMEM;
921 }
922
923 DRM_DEBUG("buffer %d @ %p\n",
924 entry->buf_count, buf->address);
925 }
926 byte_count += PAGE_SIZE << page_order;
927 }
928
929 temp_buflist = krealloc(dma->buflist,
930 (dma->buf_count + entry->buf_count) *
931 sizeof(*dma->buflist), GFP_KERNEL);
932 if (!temp_buflist) {
933 /* Free the entry because it isn't valid */
934 drm_cleanup_buf_error(dev, entry);
935 kfree(temp_pagelist);
936 mutex_unlock(&dev->struct_mutex);
937 atomic_dec(&dev->buf_alloc);
938 return -ENOMEM;
939 }
940 dma->buflist = temp_buflist;
941
942 for (i = 0; i < entry->buf_count; i++) {
943 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
944 }
945
946 /* No allocations failed, so now we can replace the original pagelist
947 * with the new one.
948 */
949 if (dma->page_count) {
950 kfree(dma->pagelist);
951 }
952 dma->pagelist = temp_pagelist;
953
954 dma->buf_count += entry->buf_count;
955 dma->seg_count += entry->seg_count;
956 dma->page_count += entry->seg_count << page_order;
957 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
958
959 mutex_unlock(&dev->struct_mutex);
960
961 request->count = entry->buf_count;
962 request->size = size;
963
964 if (request->flags & _DRM_PCI_BUFFER_RO)
965 dma->flags = _DRM_DMA_USE_PCI_RO;
966
967 atomic_dec(&dev->buf_alloc);
968 return 0;
969
970}
971EXPORT_SYMBOL(drm_addbufs_pci);
972
973static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
974{
975 struct drm_device_dma *dma = dev->dma;
976 struct drm_buf_entry *entry;
977 struct drm_buf *buf;
978 unsigned long offset;
979 unsigned long agp_offset;
980 int count;
981 int order;
982 int size;
983 int alignment;
984 int page_order;
985 int total;
986 int byte_count;
987 int i;
988 struct drm_buf **temp_buflist;
989
990 if (!drm_core_check_feature(dev, DRIVER_SG))
991 return -EINVAL;
992
993 if (!dma)
994 return -EINVAL;
995
996 if (!capable(CAP_SYS_ADMIN))
997 return -EPERM;
998
999 count = request->count;
1000 order = drm_order(request->size);
1001 size = 1 << order;
1002
1003 alignment = (request->flags & _DRM_PAGE_ALIGN)
1004 ? PAGE_ALIGN(size) : size;
1005 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1006 total = PAGE_SIZE << page_order;
1007
1008 byte_count = 0;
1009 agp_offset = request->agp_start;
1010
1011 DRM_DEBUG("count: %d\n", count);
1012 DRM_DEBUG("order: %d\n", order);
1013 DRM_DEBUG("size: %d\n", size);
1014 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1015 DRM_DEBUG("alignment: %d\n", alignment);
1016 DRM_DEBUG("page_order: %d\n", page_order);
1017 DRM_DEBUG("total: %d\n", total);
1018
1019 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1020 return -EINVAL;
1021 if (dev->queue_count)
1022 return -EBUSY; /* Not while in use */
1023
1024 spin_lock(&dev->count_lock);
1025 if (dev->buf_use) {
1026 spin_unlock(&dev->count_lock);
1027 return -EBUSY;
1028 }
1029 atomic_inc(&dev->buf_alloc);
1030 spin_unlock(&dev->count_lock);
1031
1032 mutex_lock(&dev->struct_mutex);
1033 entry = &dma->bufs[order];
1034 if (entry->buf_count) {
1035 mutex_unlock(&dev->struct_mutex);
1036 atomic_dec(&dev->buf_alloc);
1037 return -ENOMEM; /* May only call once for each order */
1038 }
1039
1040 if (count < 0 || count > 4096) {
1041 mutex_unlock(&dev->struct_mutex);
1042 atomic_dec(&dev->buf_alloc);
1043 return -EINVAL;
1044 }
1045
1046 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1047 GFP_KERNEL);
1048 if (!entry->buflist) {
1049 mutex_unlock(&dev->struct_mutex);
1050 atomic_dec(&dev->buf_alloc);
1051 return -ENOMEM;
1052 }
1053
1054 entry->buf_size = size;
1055 entry->page_order = page_order;
1056
1057 offset = 0;
1058
1059 while (entry->buf_count < count) {
1060 buf = &entry->buflist[entry->buf_count];
1061 buf->idx = dma->buf_count + entry->buf_count;
1062 buf->total = alignment;
1063 buf->order = order;
1064 buf->used = 0;
1065
1066 buf->offset = (dma->byte_count + offset);
1067 buf->bus_address = agp_offset + offset;
1068 buf->address = (void *)(agp_offset + offset
1069 + (unsigned long)dev->sg->virtual);
1070 buf->next = NULL;
1071 buf->waiting = 0;
1072 buf->pending = 0;
1073 init_waitqueue_head(&buf->dma_wait);
1074 buf->file_priv = NULL;
1075
1076 buf->dev_priv_size = dev->driver->dev_priv_size;
1077 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1078 if (!buf->dev_private) {
1079 /* Set count correctly so we free the proper amount. */
1080 entry->buf_count = count;
1081 drm_cleanup_buf_error(dev, entry);
1082 mutex_unlock(&dev->struct_mutex);
1083 atomic_dec(&dev->buf_alloc);
1084 return -ENOMEM;
1085 }
1086
1087 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1088
1089 offset += alignment;
1090 entry->buf_count++;
1091 byte_count += PAGE_SIZE << page_order;
1092 }
1093
1094 DRM_DEBUG("byte_count: %d\n", byte_count);
1095
1096 temp_buflist = krealloc(dma->buflist,
1097 (dma->buf_count + entry->buf_count) *
1098 sizeof(*dma->buflist), GFP_KERNEL);
1099 if (!temp_buflist) {
1100 /* Free the entry because it isn't valid */
1101 drm_cleanup_buf_error(dev, entry);
1102 mutex_unlock(&dev->struct_mutex);
1103 atomic_dec(&dev->buf_alloc);
1104 return -ENOMEM;
1105 }
1106 dma->buflist = temp_buflist;
1107
1108 for (i = 0; i < entry->buf_count; i++) {
1109 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1110 }
1111
1112 dma->buf_count += entry->buf_count;
1113 dma->seg_count += entry->seg_count;
1114 dma->page_count += byte_count >> PAGE_SHIFT;
1115 dma->byte_count += byte_count;
1116
1117 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1118 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1119
1120 mutex_unlock(&dev->struct_mutex);
1121
1122 request->count = entry->buf_count;
1123 request->size = size;
1124
1125 dma->flags = _DRM_DMA_USE_SG;
1126
1127 atomic_dec(&dev->buf_alloc);
1128 return 0;
1129}
1130
1131static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1132{
1133 struct drm_device_dma *dma = dev->dma;
1134 struct drm_buf_entry *entry;
1135 struct drm_buf *buf;
1136 unsigned long offset;
1137 unsigned long agp_offset;
1138 int count;
1139 int order;
1140 int size;
1141 int alignment;
1142 int page_order;
1143 int total;
1144 int byte_count;
1145 int i;
1146 struct drm_buf **temp_buflist;
1147
1148 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1149 return -EINVAL;
1150
1151 if (!dma)
1152 return -EINVAL;
1153
1154 if (!capable(CAP_SYS_ADMIN))
1155 return -EPERM;
1156
1157 count = request->count;
1158 order = drm_order(request->size);
1159 size = 1 << order;
1160
1161 alignment = (request->flags & _DRM_PAGE_ALIGN)
1162 ? PAGE_ALIGN(size) : size;
1163 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1164 total = PAGE_SIZE << page_order;
1165
1166 byte_count = 0;
1167 agp_offset = request->agp_start;
1168
1169 DRM_DEBUG("count: %d\n", count);
1170 DRM_DEBUG("order: %d\n", order);
1171 DRM_DEBUG("size: %d\n", size);
1172 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1173 DRM_DEBUG("alignment: %d\n", alignment);
1174 DRM_DEBUG("page_order: %d\n", page_order);
1175 DRM_DEBUG("total: %d\n", total);
1176
1177 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1178 return -EINVAL;
1179 if (dev->queue_count)
1180 return -EBUSY; /* Not while in use */
1181
1182 spin_lock(&dev->count_lock);
1183 if (dev->buf_use) {
1184 spin_unlock(&dev->count_lock);
1185 return -EBUSY;
1186 }
1187 atomic_inc(&dev->buf_alloc);
1188 spin_unlock(&dev->count_lock);
1189
1190 mutex_lock(&dev->struct_mutex);
1191 entry = &dma->bufs[order];
1192 if (entry->buf_count) {
1193 mutex_unlock(&dev->struct_mutex);
1194 atomic_dec(&dev->buf_alloc);
1195 return -ENOMEM; /* May only call once for each order */
1196 }
1197
1198 if (count < 0 || count > 4096) {
1199 mutex_unlock(&dev->struct_mutex);
1200 atomic_dec(&dev->buf_alloc);
1201 return -EINVAL;
1202 }
1203
1204 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1205 GFP_KERNEL);
1206 if (!entry->buflist) {
1207 mutex_unlock(&dev->struct_mutex);
1208 atomic_dec(&dev->buf_alloc);
1209 return -ENOMEM;
1210 }
1211
1212 entry->buf_size = size;
1213 entry->page_order = page_order;
1214
1215 offset = 0;
1216
1217 while (entry->buf_count < count) {
1218 buf = &entry->buflist[entry->buf_count];
1219 buf->idx = dma->buf_count + entry->buf_count;
1220 buf->total = alignment;
1221 buf->order = order;
1222 buf->used = 0;
1223
1224 buf->offset = (dma->byte_count + offset);
1225 buf->bus_address = agp_offset + offset;
1226 buf->address = (void *)(agp_offset + offset);
1227 buf->next = NULL;
1228 buf->waiting = 0;
1229 buf->pending = 0;
1230 init_waitqueue_head(&buf->dma_wait);
1231 buf->file_priv = NULL;
1232
1233 buf->dev_priv_size = dev->driver->dev_priv_size;
1234 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1235 if (!buf->dev_private) {
1236 /* Set count correctly so we free the proper amount. */
1237 entry->buf_count = count;
1238 drm_cleanup_buf_error(dev, entry);
1239 mutex_unlock(&dev->struct_mutex);
1240 atomic_dec(&dev->buf_alloc);
1241 return -ENOMEM;
1242 }
1243
1244 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1245
1246 offset += alignment;
1247 entry->buf_count++;
1248 byte_count += PAGE_SIZE << page_order;
1249 }
1250
1251 DRM_DEBUG("byte_count: %d\n", byte_count);
1252
1253 temp_buflist = krealloc(dma->buflist,
1254 (dma->buf_count + entry->buf_count) *
1255 sizeof(*dma->buflist), GFP_KERNEL);
1256 if (!temp_buflist) {
1257 /* Free the entry because it isn't valid */
1258 drm_cleanup_buf_error(dev, entry);
1259 mutex_unlock(&dev->struct_mutex);
1260 atomic_dec(&dev->buf_alloc);
1261 return -ENOMEM;
1262 }
1263 dma->buflist = temp_buflist;
1264
1265 for (i = 0; i < entry->buf_count; i++) {
1266 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1267 }
1268
1269 dma->buf_count += entry->buf_count;
1270 dma->seg_count += entry->seg_count;
1271 dma->page_count += byte_count >> PAGE_SHIFT;
1272 dma->byte_count += byte_count;
1273
1274 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1275 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1276
1277 mutex_unlock(&dev->struct_mutex);
1278
1279 request->count = entry->buf_count;
1280 request->size = size;
1281
1282 dma->flags = _DRM_DMA_USE_FB;
1283
1284 atomic_dec(&dev->buf_alloc);
1285 return 0;
1286}
1287
1288
1289/**
1290 * Add buffers for DMA transfers (ioctl).
1291 *
1292 * \param inode device inode.
1293 * \param file_priv DRM file private.
1294 * \param cmd command.
1295 * \param arg pointer to a struct drm_buf_desc request.
1296 * \return zero on success or a negative number on failure.
1297 *
1298 * According with the memory type specified in drm_buf_desc::flags and the
1299 * build options, it dispatches the call either to addbufs_agp(),
1300 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1301 * PCI memory respectively.
1302 */
1303int drm_addbufs(struct drm_device *dev, void *data,
1304 struct drm_file *file_priv)
1305{
1306 struct drm_buf_desc *request = data;
1307 int ret;
1308
1309 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1310 return -EINVAL;
1311
1312#if __OS_HAS_AGP
1313 if (request->flags & _DRM_AGP_BUFFER)
1314 ret = drm_addbufs_agp(dev, request);
1315 else
1316#endif
1317 if (request->flags & _DRM_SG_BUFFER)
1318 ret = drm_addbufs_sg(dev, request);
1319 else if (request->flags & _DRM_FB_BUFFER)
1320 ret = drm_addbufs_fb(dev, request);
1321 else
1322 ret = drm_addbufs_pci(dev, request);
1323
1324 return ret;
1325}
1326
1327/**
1328 * Get information about the buffer mappings.
1329 *
1330 * This was originally mean for debugging purposes, or by a sophisticated
1331 * client library to determine how best to use the available buffers (e.g.,
1332 * large buffers can be used for image transfer).
1333 *
1334 * \param inode device inode.
1335 * \param file_priv DRM file private.
1336 * \param cmd command.
1337 * \param arg pointer to a drm_buf_info structure.
1338 * \return zero on success or a negative number on failure.
1339 *
1340 * Increments drm_device::buf_use while holding the drm_device::count_lock
1341 * lock, preventing of allocating more buffers after this call. Information
1342 * about each requested buffer is then copied into user space.
1343 */
1344int drm_infobufs(struct drm_device *dev, void *data,
1345 struct drm_file *file_priv)
1346{
1347 struct drm_device_dma *dma = dev->dma;
1348 struct drm_buf_info *request = data;
1349 int i;
1350 int count;
1351
1352 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1353 return -EINVAL;
1354
1355 if (!dma)
1356 return -EINVAL;
1357
1358 spin_lock(&dev->count_lock);
1359 if (atomic_read(&dev->buf_alloc)) {
1360 spin_unlock(&dev->count_lock);
1361 return -EBUSY;
1362 }
1363 ++dev->buf_use; /* Can't allocate more after this call */
1364 spin_unlock(&dev->count_lock);
1365
1366 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1367 if (dma->bufs[i].buf_count)
1368 ++count;
1369 }
1370
1371 DRM_DEBUG("count = %d\n", count);
1372
1373 if (request->count >= count) {
1374 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1375 if (dma->bufs[i].buf_count) {
1376 struct drm_buf_desc __user *to =
1377 &request->list[count];
1378 struct drm_buf_entry *from = &dma->bufs[i];
1379 struct drm_freelist *list = &dma->bufs[i].freelist;
1380 if (copy_to_user(&to->count,
1381 &from->buf_count,
1382 sizeof(from->buf_count)) ||
1383 copy_to_user(&to->size,
1384 &from->buf_size,
1385 sizeof(from->buf_size)) ||
1386 copy_to_user(&to->low_mark,
1387 &list->low_mark,
1388 sizeof(list->low_mark)) ||
1389 copy_to_user(&to->high_mark,
1390 &list->high_mark,
1391 sizeof(list->high_mark)))
1392 return -EFAULT;
1393
1394 DRM_DEBUG("%d %d %d %d %d\n",
1395 i,
1396 dma->bufs[i].buf_count,
1397 dma->bufs[i].buf_size,
1398 dma->bufs[i].freelist.low_mark,
1399 dma->bufs[i].freelist.high_mark);
1400 ++count;
1401 }
1402 }
1403 }
1404 request->count = count;
1405
1406 return 0;
1407}
1408
1409/**
1410 * Specifies a low and high water mark for buffer allocation
1411 *
1412 * \param inode device inode.
1413 * \param file_priv DRM file private.
1414 * \param cmd command.
1415 * \param arg a pointer to a drm_buf_desc structure.
1416 * \return zero on success or a negative number on failure.
1417 *
1418 * Verifies that the size order is bounded between the admissible orders and
1419 * updates the respective drm_device_dma::bufs entry low and high water mark.
1420 *
1421 * \note This ioctl is deprecated and mostly never used.
1422 */
1423int drm_markbufs(struct drm_device *dev, void *data,
1424 struct drm_file *file_priv)
1425{
1426 struct drm_device_dma *dma = dev->dma;
1427 struct drm_buf_desc *request = data;
1428 int order;
1429 struct drm_buf_entry *entry;
1430
1431 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1432 return -EINVAL;
1433
1434 if (!dma)
1435 return -EINVAL;
1436
1437 DRM_DEBUG("%d, %d, %d\n",
1438 request->size, request->low_mark, request->high_mark);
1439 order = drm_order(request->size);
1440 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1441 return -EINVAL;
1442 entry = &dma->bufs[order];
1443
1444 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1445 return -EINVAL;
1446 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1447 return -EINVAL;
1448
1449 entry->freelist.low_mark = request->low_mark;
1450 entry->freelist.high_mark = request->high_mark;
1451
1452 return 0;
1453}
1454
1455/**
1456 * Unreserve the buffers in list, previously reserved using drmDMA.
1457 *
1458 * \param inode device inode.
1459 * \param file_priv DRM file private.
1460 * \param cmd command.
1461 * \param arg pointer to a drm_buf_free structure.
1462 * \return zero on success or a negative number on failure.
1463 *
1464 * Calls free_buffer() for each used buffer.
1465 * This function is primarily used for debugging.
1466 */
1467int drm_freebufs(struct drm_device *dev, void *data,
1468 struct drm_file *file_priv)
1469{
1470 struct drm_device_dma *dma = dev->dma;
1471 struct drm_buf_free *request = data;
1472 int i;
1473 int idx;
1474 struct drm_buf *buf;
1475
1476 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1477 return -EINVAL;
1478
1479 if (!dma)
1480 return -EINVAL;
1481
1482 DRM_DEBUG("%d\n", request->count);
1483 for (i = 0; i < request->count; i++) {
1484 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1485 return -EFAULT;
1486 if (idx < 0 || idx >= dma->buf_count) {
1487 DRM_ERROR("Index %d (of %d max)\n",
1488 idx, dma->buf_count - 1);
1489 return -EINVAL;
1490 }
1491 buf = dma->buflist[idx];
1492 if (buf->file_priv != file_priv) {
1493 DRM_ERROR("Process %d freeing buffer not owned\n",
1494 task_pid_nr(current));
1495 return -EINVAL;
1496 }
1497 drm_free_buffer(dev, buf);
1498 }
1499
1500 return 0;
1501}
1502
1503/**
1504 * Maps all of the DMA buffers into client-virtual space (ioctl).
1505 *
1506 * \param inode device inode.
1507 * \param file_priv DRM file private.
1508 * \param cmd command.
1509 * \param arg pointer to a drm_buf_map structure.
1510 * \return zero on success or a negative number on failure.
1511 *
1512 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1513 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1514 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1515 * drm_mmap_dma().
1516 */
1517int drm_mapbufs(struct drm_device *dev, void *data,
1518 struct drm_file *file_priv)
1519{
1520 struct drm_device_dma *dma = dev->dma;
1521 int retcode = 0;
1522 const int zero = 0;
1523 unsigned long virtual;
1524 unsigned long address;
1525 struct drm_buf_map *request = data;
1526 int i;
1527
1528 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1529 return -EINVAL;
1530
1531 if (!dma)
1532 return -EINVAL;
1533
1534 spin_lock(&dev->count_lock);
1535 if (atomic_read(&dev->buf_alloc)) {
1536 spin_unlock(&dev->count_lock);
1537 return -EBUSY;
1538 }
1539 dev->buf_use++; /* Can't allocate more after this call */
1540 spin_unlock(&dev->count_lock);
1541
1542 if (request->count >= dma->buf_count) {
1543 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1544 || (drm_core_check_feature(dev, DRIVER_SG)
1545 && (dma->flags & _DRM_DMA_USE_SG))
1546 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1547 && (dma->flags & _DRM_DMA_USE_FB))) {
1548 struct drm_local_map *map = dev->agp_buffer_map;
1549 unsigned long token = dev->agp_buffer_token;
1550
1551 if (!map) {
1552 retcode = -EINVAL;
1553 goto done;
1554 }
1555 down_write(¤t->mm->mmap_sem);
1556 virtual = do_mmap(file_priv->filp, 0, map->size,
1557 PROT_READ | PROT_WRITE,
1558 MAP_SHARED,
1559 token);
1560 up_write(¤t->mm->mmap_sem);
1561 } else {
1562 down_write(¤t->mm->mmap_sem);
1563 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1564 PROT_READ | PROT_WRITE,
1565 MAP_SHARED, 0);
1566 up_write(¤t->mm->mmap_sem);
1567 }
1568 if (virtual > -1024UL) {
1569 /* Real error */
1570 retcode = (signed long)virtual;
1571 goto done;
1572 }
1573 request->virtual = (void __user *)virtual;
1574
1575 for (i = 0; i < dma->buf_count; i++) {
1576 if (copy_to_user(&request->list[i].idx,
1577 &dma->buflist[i]->idx,
1578 sizeof(request->list[0].idx))) {
1579 retcode = -EFAULT;
1580 goto done;
1581 }
1582 if (copy_to_user(&request->list[i].total,
1583 &dma->buflist[i]->total,
1584 sizeof(request->list[0].total))) {
1585 retcode = -EFAULT;
1586 goto done;
1587 }
1588 if (copy_to_user(&request->list[i].used,
1589 &zero, sizeof(zero))) {
1590 retcode = -EFAULT;
1591 goto done;
1592 }
1593 address = virtual + dma->buflist[i]->offset; /* *** */
1594 if (copy_to_user(&request->list[i].address,
1595 &address, sizeof(address))) {
1596 retcode = -EFAULT;
1597 goto done;
1598 }
1599 }
1600 }
1601 done:
1602 request->count = dma->buf_count;
1603 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1604
1605 return retcode;
1606}
1607
1608/**
1609 * Compute size order. Returns the exponent of the smaller power of two which
1610 * is greater or equal to given number.
1611 *
1612 * \param size size.
1613 * \return order.
1614 *
1615 * \todo Can be made faster.
1616 */
1617int drm_order(unsigned long size)
1618{
1619 int order;
1620 unsigned long tmp;
1621
1622 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1623
1624 if (size & (size - 1))
1625 ++order;
1626
1627 return order;
1628}
1629EXPORT_SYMBOL(drm_order);
1/*
2 * Legacy: Generic DRM Buffer Management
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31#include <linux/export.h>
32#include <linux/log2.h>
33#include <linux/mm.h>
34#include <linux/mman.h>
35#include <linux/nospec.h>
36#include <linux/pci.h>
37#include <linux/slab.h>
38#include <linux/uaccess.h>
39#include <linux/vmalloc.h>
40
41#include <asm/shmparam.h>
42
43#include <drm/drm_device.h>
44#include <drm/drm_drv.h>
45#include <drm/drm_file.h>
46#include <drm/drm_print.h>
47
48#include "drm_legacy.h"
49
50
51static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
52 struct drm_local_map *map)
53{
54 struct drm_map_list *entry;
55
56 list_for_each_entry(entry, &dev->maplist, head) {
57 /*
58 * Because the kernel-userspace ABI is fixed at a 32-bit offset
59 * while PCI resources may live above that, we only compare the
60 * lower 32 bits of the map offset for maps of type
61 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
62 * It is assumed that if a driver have more than one resource
63 * of each type, the lower 32 bits are different.
64 */
65 if (!entry->map ||
66 map->type != entry->map->type ||
67 entry->master != dev->master)
68 continue;
69 switch (map->type) {
70 case _DRM_SHM:
71 if (map->flags != _DRM_CONTAINS_LOCK)
72 break;
73 return entry;
74 case _DRM_REGISTERS:
75 case _DRM_FRAME_BUFFER:
76 if ((entry->map->offset & 0xffffffff) ==
77 (map->offset & 0xffffffff))
78 return entry;
79 break;
80 default: /* Make gcc happy */
81 break;
82 }
83 if (entry->map->offset == map->offset)
84 return entry;
85 }
86
87 return NULL;
88}
89
90static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
91 unsigned long user_token, int hashed_handle, int shm)
92{
93 int use_hashed_handle, shift;
94 unsigned long add;
95
96#if (BITS_PER_LONG == 64)
97 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
98#elif (BITS_PER_LONG == 32)
99 use_hashed_handle = hashed_handle;
100#else
101#error Unsupported long size. Neither 64 nor 32 bits.
102#endif
103
104 if (!use_hashed_handle) {
105 int ret;
106
107 hash->key = user_token >> PAGE_SHIFT;
108 ret = drm_ht_insert_item(&dev->map_hash, hash);
109 if (ret != -EINVAL)
110 return ret;
111 }
112
113 shift = 0;
114 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
115 if (shm && (SHMLBA > PAGE_SIZE)) {
116 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
117
118 /* For shared memory, we have to preserve the SHMLBA
119 * bits of the eventual vma->vm_pgoff value during
120 * mmap(). Otherwise we run into cache aliasing problems
121 * on some platforms. On these platforms, the pgoff of
122 * a mmap() request is used to pick a suitable virtual
123 * address for the mmap() region such that it will not
124 * cause cache aliasing problems.
125 *
126 * Therefore, make sure the SHMLBA relevant bits of the
127 * hash value we use are equal to those in the original
128 * kernel virtual address.
129 */
130 shift = bits;
131 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
132 }
133
134 return drm_ht_just_insert_please(&dev->map_hash, hash,
135 user_token, 32 - PAGE_SHIFT - 3,
136 shift, add);
137}
138
139/*
140 * Core function to create a range of memory available for mapping by a
141 * non-root process.
142 *
143 * Adjusts the memory offset to its absolute value according to the mapping
144 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
145 * applicable and if supported by the kernel.
146 */
147static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
148 unsigned int size, enum drm_map_type type,
149 enum drm_map_flags flags,
150 struct drm_map_list **maplist)
151{
152 struct drm_local_map *map;
153 struct drm_map_list *list;
154 unsigned long user_token;
155 int ret;
156
157 map = kmalloc(sizeof(*map), GFP_KERNEL);
158 if (!map)
159 return -ENOMEM;
160
161 map->offset = offset;
162 map->size = size;
163 map->flags = flags;
164 map->type = type;
165
166 /* Only allow shared memory to be removable since we only keep enough
167 * book keeping information about shared memory to allow for removal
168 * when processes fork.
169 */
170 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
171 kfree(map);
172 return -EINVAL;
173 }
174 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
175 (unsigned long long)map->offset, map->size, map->type);
176
177 /* page-align _DRM_SHM maps. They are allocated here so there is no security
178 * hole created by that and it works around various broken drivers that use
179 * a non-aligned quantity to map the SAREA. --BenH
180 */
181 if (map->type == _DRM_SHM)
182 map->size = PAGE_ALIGN(map->size);
183
184 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
185 kfree(map);
186 return -EINVAL;
187 }
188 map->mtrr = -1;
189 map->handle = NULL;
190
191 switch (map->type) {
192 case _DRM_REGISTERS:
193 case _DRM_FRAME_BUFFER:
194#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
195 if (map->offset + (map->size-1) < map->offset ||
196 map->offset < virt_to_phys(high_memory)) {
197 kfree(map);
198 return -EINVAL;
199 }
200#endif
201 /* Some drivers preinitialize some maps, without the X Server
202 * needing to be aware of it. Therefore, we just return success
203 * when the server tries to create a duplicate map.
204 */
205 list = drm_find_matching_map(dev, map);
206 if (list != NULL) {
207 if (list->map->size != map->size) {
208 DRM_DEBUG("Matching maps of type %d with "
209 "mismatched sizes, (%ld vs %ld)\n",
210 map->type, map->size,
211 list->map->size);
212 list->map->size = map->size;
213 }
214
215 kfree(map);
216 *maplist = list;
217 return 0;
218 }
219
220 if (map->type == _DRM_FRAME_BUFFER ||
221 (map->flags & _DRM_WRITE_COMBINING)) {
222 map->mtrr =
223 arch_phys_wc_add(map->offset, map->size);
224 }
225 if (map->type == _DRM_REGISTERS) {
226 if (map->flags & _DRM_WRITE_COMBINING)
227 map->handle = ioremap_wc(map->offset,
228 map->size);
229 else
230 map->handle = ioremap(map->offset, map->size);
231 if (!map->handle) {
232 kfree(map);
233 return -ENOMEM;
234 }
235 }
236
237 break;
238 case _DRM_SHM:
239 list = drm_find_matching_map(dev, map);
240 if (list != NULL) {
241 if (list->map->size != map->size) {
242 DRM_DEBUG("Matching maps of type %d with "
243 "mismatched sizes, (%ld vs %ld)\n",
244 map->type, map->size, list->map->size);
245 list->map->size = map->size;
246 }
247
248 kfree(map);
249 *maplist = list;
250 return 0;
251 }
252 map->handle = vmalloc_user(map->size);
253 DRM_DEBUG("%lu %d %p\n",
254 map->size, order_base_2(map->size), map->handle);
255 if (!map->handle) {
256 kfree(map);
257 return -ENOMEM;
258 }
259 map->offset = (unsigned long)map->handle;
260 if (map->flags & _DRM_CONTAINS_LOCK) {
261 /* Prevent a 2nd X Server from creating a 2nd lock */
262 if (dev->master->lock.hw_lock != NULL) {
263 vfree(map->handle);
264 kfree(map);
265 return -EBUSY;
266 }
267 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
268 }
269 break;
270 case _DRM_AGP: {
271 struct drm_agp_mem *entry;
272 int valid = 0;
273
274 if (!dev->agp) {
275 kfree(map);
276 return -EINVAL;
277 }
278#ifdef __alpha__
279 map->offset += dev->hose->mem_space->start;
280#endif
281 /* In some cases (i810 driver), user space may have already
282 * added the AGP base itself, because dev->agp->base previously
283 * only got set during AGP enable. So, only add the base
284 * address if the map's offset isn't already within the
285 * aperture.
286 */
287 if (map->offset < dev->agp->base ||
288 map->offset > dev->agp->base +
289 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
290 map->offset += dev->agp->base;
291 }
292 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
293
294 /* This assumes the DRM is in total control of AGP space.
295 * It's not always the case as AGP can be in the control
296 * of user space (i.e. i810 driver). So this loop will get
297 * skipped and we double check that dev->agp->memory is
298 * actually set as well as being invalid before EPERM'ing
299 */
300 list_for_each_entry(entry, &dev->agp->memory, head) {
301 if ((map->offset >= entry->bound) &&
302 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
303 valid = 1;
304 break;
305 }
306 }
307 if (!list_empty(&dev->agp->memory) && !valid) {
308 kfree(map);
309 return -EPERM;
310 }
311 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
312 (unsigned long long)map->offset, map->size);
313
314 break;
315 }
316 case _DRM_SCATTER_GATHER:
317 if (!dev->sg) {
318 kfree(map);
319 return -EINVAL;
320 }
321 map->offset += (unsigned long)dev->sg->virtual;
322 break;
323 case _DRM_CONSISTENT:
324 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
325 * As we're limiting the address to 2^32-1 (or less),
326 * casting it down to 32 bits is no problem, but we
327 * need to point to a 64bit variable first.
328 */
329 map->handle = dma_alloc_coherent(dev->dev,
330 map->size,
331 &map->offset,
332 GFP_KERNEL);
333 if (!map->handle) {
334 kfree(map);
335 return -ENOMEM;
336 }
337 break;
338 default:
339 kfree(map);
340 return -EINVAL;
341 }
342
343 list = kzalloc(sizeof(*list), GFP_KERNEL);
344 if (!list) {
345 if (map->type == _DRM_REGISTERS)
346 iounmap(map->handle);
347 kfree(map);
348 return -EINVAL;
349 }
350 list->map = map;
351
352 mutex_lock(&dev->struct_mutex);
353 list_add(&list->head, &dev->maplist);
354
355 /* Assign a 32-bit handle */
356 /* We do it here so that dev->struct_mutex protects the increment */
357 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
358 map->offset;
359 ret = drm_map_handle(dev, &list->hash, user_token, 0,
360 (map->type == _DRM_SHM));
361 if (ret) {
362 if (map->type == _DRM_REGISTERS)
363 iounmap(map->handle);
364 kfree(map);
365 kfree(list);
366 mutex_unlock(&dev->struct_mutex);
367 return ret;
368 }
369
370 list->user_token = list->hash.key << PAGE_SHIFT;
371 mutex_unlock(&dev->struct_mutex);
372
373 if (!(map->flags & _DRM_DRIVER))
374 list->master = dev->master;
375 *maplist = list;
376 return 0;
377}
378
379int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
380 unsigned int size, enum drm_map_type type,
381 enum drm_map_flags flags, struct drm_local_map **map_ptr)
382{
383 struct drm_map_list *list;
384 int rc;
385
386 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
387 if (!rc)
388 *map_ptr = list->map;
389 return rc;
390}
391EXPORT_SYMBOL(drm_legacy_addmap);
392
393struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
394 unsigned int token)
395{
396 struct drm_map_list *_entry;
397
398 list_for_each_entry(_entry, &dev->maplist, head)
399 if (_entry->user_token == token)
400 return _entry->map;
401 return NULL;
402}
403EXPORT_SYMBOL(drm_legacy_findmap);
404
405/*
406 * Ioctl to specify a range of memory that is available for mapping by a
407 * non-root process.
408 *
409 * \param inode device inode.
410 * \param file_priv DRM file private.
411 * \param cmd command.
412 * \param arg pointer to a drm_map structure.
413 * \return zero on success or a negative value on error.
414 *
415 */
416int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
417 struct drm_file *file_priv)
418{
419 struct drm_map *map = data;
420 struct drm_map_list *maplist;
421 int err;
422
423 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
424 return -EPERM;
425
426 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
427 !drm_core_check_feature(dev, DRIVER_LEGACY))
428 return -EOPNOTSUPP;
429
430 err = drm_addmap_core(dev, map->offset, map->size, map->type,
431 map->flags, &maplist);
432
433 if (err)
434 return err;
435
436 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
437 map->handle = (void *)(unsigned long)maplist->user_token;
438
439 /*
440 * It appears that there are no users of this value whatsoever --
441 * drmAddMap just discards it. Let's not encourage its use.
442 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
443 * it's not a real mtrr index anymore.)
444 */
445 map->mtrr = -1;
446
447 return 0;
448}
449
450/*
451 * Get a mapping information.
452 *
453 * \param inode device inode.
454 * \param file_priv DRM file private.
455 * \param cmd command.
456 * \param arg user argument, pointing to a drm_map structure.
457 *
458 * \return zero on success or a negative number on failure.
459 *
460 * Searches for the mapping with the specified offset and copies its information
461 * into userspace
462 */
463int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
464 struct drm_file *file_priv)
465{
466 struct drm_map *map = data;
467 struct drm_map_list *r_list = NULL;
468 struct list_head *list;
469 int idx;
470 int i;
471
472 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
473 !drm_core_check_feature(dev, DRIVER_LEGACY))
474 return -EOPNOTSUPP;
475
476 idx = map->offset;
477 if (idx < 0)
478 return -EINVAL;
479
480 i = 0;
481 mutex_lock(&dev->struct_mutex);
482 list_for_each(list, &dev->maplist) {
483 if (i == idx) {
484 r_list = list_entry(list, struct drm_map_list, head);
485 break;
486 }
487 i++;
488 }
489 if (!r_list || !r_list->map) {
490 mutex_unlock(&dev->struct_mutex);
491 return -EINVAL;
492 }
493
494 map->offset = r_list->map->offset;
495 map->size = r_list->map->size;
496 map->type = r_list->map->type;
497 map->flags = r_list->map->flags;
498 map->handle = (void *)(unsigned long) r_list->user_token;
499 map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
500
501 mutex_unlock(&dev->struct_mutex);
502
503 return 0;
504}
505
506/*
507 * Remove a map private from list and deallocate resources if the mapping
508 * isn't in use.
509 *
510 * Searches the map on drm_device::maplist, removes it from the list, see if
511 * it's being used, and free any associated resource (such as MTRR's) if it's not
512 * being on use.
513 *
514 * \sa drm_legacy_addmap
515 */
516int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
517{
518 struct drm_map_list *r_list = NULL, *list_t;
519 int found = 0;
520 struct drm_master *master;
521
522 /* Find the list entry for the map and remove it */
523 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
524 if (r_list->map == map) {
525 master = r_list->master;
526 list_del(&r_list->head);
527 drm_ht_remove_key(&dev->map_hash,
528 r_list->user_token >> PAGE_SHIFT);
529 kfree(r_list);
530 found = 1;
531 break;
532 }
533 }
534
535 if (!found)
536 return -EINVAL;
537
538 switch (map->type) {
539 case _DRM_REGISTERS:
540 iounmap(map->handle);
541 fallthrough;
542 case _DRM_FRAME_BUFFER:
543 arch_phys_wc_del(map->mtrr);
544 break;
545 case _DRM_SHM:
546 vfree(map->handle);
547 if (master) {
548 if (dev->sigdata.lock == master->lock.hw_lock)
549 dev->sigdata.lock = NULL;
550 master->lock.hw_lock = NULL; /* SHM removed */
551 master->lock.file_priv = NULL;
552 wake_up_interruptible_all(&master->lock.lock_queue);
553 }
554 break;
555 case _DRM_AGP:
556 case _DRM_SCATTER_GATHER:
557 break;
558 case _DRM_CONSISTENT:
559 dma_free_coherent(dev->dev,
560 map->size,
561 map->handle,
562 map->offset);
563 break;
564 }
565 kfree(map);
566
567 return 0;
568}
569EXPORT_SYMBOL(drm_legacy_rmmap_locked);
570
571void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
572{
573 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
574 !drm_core_check_feature(dev, DRIVER_LEGACY))
575 return;
576
577 mutex_lock(&dev->struct_mutex);
578 drm_legacy_rmmap_locked(dev, map);
579 mutex_unlock(&dev->struct_mutex);
580}
581EXPORT_SYMBOL(drm_legacy_rmmap);
582
583void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
584{
585 struct drm_map_list *r_list, *list_temp;
586
587 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
588 return;
589
590 mutex_lock(&dev->struct_mutex);
591 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
592 if (r_list->master == master) {
593 drm_legacy_rmmap_locked(dev, r_list->map);
594 r_list = NULL;
595 }
596 }
597 mutex_unlock(&dev->struct_mutex);
598}
599
600void drm_legacy_rmmaps(struct drm_device *dev)
601{
602 struct drm_map_list *r_list, *list_temp;
603
604 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
605 drm_legacy_rmmap(dev, r_list->map);
606}
607
608/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
609 * the last close of the device, and this is necessary for cleanup when things
610 * exit uncleanly. Therefore, having userland manually remove mappings seems
611 * like a pointless exercise since they're going away anyway.
612 *
613 * One use case might be after addmap is allowed for normal users for SHM and
614 * gets used by drivers that the server doesn't need to care about. This seems
615 * unlikely.
616 *
617 * \param inode device inode.
618 * \param file_priv DRM file private.
619 * \param cmd command.
620 * \param arg pointer to a struct drm_map structure.
621 * \return zero on success or a negative value on error.
622 */
623int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
624 struct drm_file *file_priv)
625{
626 struct drm_map *request = data;
627 struct drm_local_map *map = NULL;
628 struct drm_map_list *r_list;
629 int ret;
630
631 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
632 !drm_core_check_feature(dev, DRIVER_LEGACY))
633 return -EOPNOTSUPP;
634
635 mutex_lock(&dev->struct_mutex);
636 list_for_each_entry(r_list, &dev->maplist, head) {
637 if (r_list->map &&
638 r_list->user_token == (unsigned long)request->handle &&
639 r_list->map->flags & _DRM_REMOVABLE) {
640 map = r_list->map;
641 break;
642 }
643 }
644
645 /* List has wrapped around to the head pointer, or it's empty we didn't
646 * find anything.
647 */
648 if (list_empty(&dev->maplist) || !map) {
649 mutex_unlock(&dev->struct_mutex);
650 return -EINVAL;
651 }
652
653 /* Register and framebuffer maps are permanent */
654 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
655 mutex_unlock(&dev->struct_mutex);
656 return 0;
657 }
658
659 ret = drm_legacy_rmmap_locked(dev, map);
660
661 mutex_unlock(&dev->struct_mutex);
662
663 return ret;
664}
665
666/*
667 * Cleanup after an error on one of the addbufs() functions.
668 *
669 * \param dev DRM device.
670 * \param entry buffer entry where the error occurred.
671 *
672 * Frees any pages and buffers associated with the given entry.
673 */
674static void drm_cleanup_buf_error(struct drm_device *dev,
675 struct drm_buf_entry *entry)
676{
677 drm_dma_handle_t *dmah;
678 int i;
679
680 if (entry->seg_count) {
681 for (i = 0; i < entry->seg_count; i++) {
682 if (entry->seglist[i]) {
683 dmah = entry->seglist[i];
684 dma_free_coherent(dev->dev,
685 dmah->size,
686 dmah->vaddr,
687 dmah->busaddr);
688 kfree(dmah);
689 }
690 }
691 kfree(entry->seglist);
692
693 entry->seg_count = 0;
694 }
695
696 if (entry->buf_count) {
697 for (i = 0; i < entry->buf_count; i++) {
698 kfree(entry->buflist[i].dev_private);
699 }
700 kfree(entry->buflist);
701
702 entry->buf_count = 0;
703 }
704}
705
706#if IS_ENABLED(CONFIG_AGP)
707/*
708 * Add AGP buffers for DMA transfers.
709 *
710 * \param dev struct drm_device to which the buffers are to be added.
711 * \param request pointer to a struct drm_buf_desc describing the request.
712 * \return zero on success or a negative number on failure.
713 *
714 * After some sanity checks creates a drm_buf structure for each buffer and
715 * reallocates the buffer list of the same size order to accommodate the new
716 * buffers.
717 */
718int drm_legacy_addbufs_agp(struct drm_device *dev,
719 struct drm_buf_desc *request)
720{
721 struct drm_device_dma *dma = dev->dma;
722 struct drm_buf_entry *entry;
723 struct drm_agp_mem *agp_entry;
724 struct drm_buf *buf;
725 unsigned long offset;
726 unsigned long agp_offset;
727 int count;
728 int order;
729 int size;
730 int alignment;
731 int page_order;
732 int total;
733 int byte_count;
734 int i, valid;
735 struct drm_buf **temp_buflist;
736
737 if (!dma)
738 return -EINVAL;
739
740 count = request->count;
741 order = order_base_2(request->size);
742 size = 1 << order;
743
744 alignment = (request->flags & _DRM_PAGE_ALIGN)
745 ? PAGE_ALIGN(size) : size;
746 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
747 total = PAGE_SIZE << page_order;
748
749 byte_count = 0;
750 agp_offset = dev->agp->base + request->agp_start;
751
752 DRM_DEBUG("count: %d\n", count);
753 DRM_DEBUG("order: %d\n", order);
754 DRM_DEBUG("size: %d\n", size);
755 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
756 DRM_DEBUG("alignment: %d\n", alignment);
757 DRM_DEBUG("page_order: %d\n", page_order);
758 DRM_DEBUG("total: %d\n", total);
759
760 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
761 return -EINVAL;
762
763 /* Make sure buffers are located in AGP memory that we own */
764 valid = 0;
765 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
766 if ((agp_offset >= agp_entry->bound) &&
767 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
768 valid = 1;
769 break;
770 }
771 }
772 if (!list_empty(&dev->agp->memory) && !valid) {
773 DRM_DEBUG("zone invalid\n");
774 return -EINVAL;
775 }
776 spin_lock(&dev->buf_lock);
777 if (dev->buf_use) {
778 spin_unlock(&dev->buf_lock);
779 return -EBUSY;
780 }
781 atomic_inc(&dev->buf_alloc);
782 spin_unlock(&dev->buf_lock);
783
784 mutex_lock(&dev->struct_mutex);
785 entry = &dma->bufs[order];
786 if (entry->buf_count) {
787 mutex_unlock(&dev->struct_mutex);
788 atomic_dec(&dev->buf_alloc);
789 return -ENOMEM; /* May only call once for each order */
790 }
791
792 if (count < 0 || count > 4096) {
793 mutex_unlock(&dev->struct_mutex);
794 atomic_dec(&dev->buf_alloc);
795 return -EINVAL;
796 }
797
798 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
799 if (!entry->buflist) {
800 mutex_unlock(&dev->struct_mutex);
801 atomic_dec(&dev->buf_alloc);
802 return -ENOMEM;
803 }
804
805 entry->buf_size = size;
806 entry->page_order = page_order;
807
808 offset = 0;
809
810 while (entry->buf_count < count) {
811 buf = &entry->buflist[entry->buf_count];
812 buf->idx = dma->buf_count + entry->buf_count;
813 buf->total = alignment;
814 buf->order = order;
815 buf->used = 0;
816
817 buf->offset = (dma->byte_count + offset);
818 buf->bus_address = agp_offset + offset;
819 buf->address = (void *)(agp_offset + offset);
820 buf->next = NULL;
821 buf->waiting = 0;
822 buf->pending = 0;
823 buf->file_priv = NULL;
824
825 buf->dev_priv_size = dev->driver->dev_priv_size;
826 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
827 if (!buf->dev_private) {
828 /* Set count correctly so we free the proper amount. */
829 entry->buf_count = count;
830 drm_cleanup_buf_error(dev, entry);
831 mutex_unlock(&dev->struct_mutex);
832 atomic_dec(&dev->buf_alloc);
833 return -ENOMEM;
834 }
835
836 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
837
838 offset += alignment;
839 entry->buf_count++;
840 byte_count += PAGE_SIZE << page_order;
841 }
842
843 DRM_DEBUG("byte_count: %d\n", byte_count);
844
845 temp_buflist = krealloc(dma->buflist,
846 (dma->buf_count + entry->buf_count) *
847 sizeof(*dma->buflist), GFP_KERNEL);
848 if (!temp_buflist) {
849 /* Free the entry because it isn't valid */
850 drm_cleanup_buf_error(dev, entry);
851 mutex_unlock(&dev->struct_mutex);
852 atomic_dec(&dev->buf_alloc);
853 return -ENOMEM;
854 }
855 dma->buflist = temp_buflist;
856
857 for (i = 0; i < entry->buf_count; i++) {
858 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
859 }
860
861 dma->buf_count += entry->buf_count;
862 dma->seg_count += entry->seg_count;
863 dma->page_count += byte_count >> PAGE_SHIFT;
864 dma->byte_count += byte_count;
865
866 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
867 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
868
869 mutex_unlock(&dev->struct_mutex);
870
871 request->count = entry->buf_count;
872 request->size = size;
873
874 dma->flags = _DRM_DMA_USE_AGP;
875
876 atomic_dec(&dev->buf_alloc);
877 return 0;
878}
879EXPORT_SYMBOL(drm_legacy_addbufs_agp);
880#endif /* CONFIG_AGP */
881
882int drm_legacy_addbufs_pci(struct drm_device *dev,
883 struct drm_buf_desc *request)
884{
885 struct drm_device_dma *dma = dev->dma;
886 int count;
887 int order;
888 int size;
889 int total;
890 int page_order;
891 struct drm_buf_entry *entry;
892 drm_dma_handle_t *dmah;
893 struct drm_buf *buf;
894 int alignment;
895 unsigned long offset;
896 int i;
897 int byte_count;
898 int page_count;
899 unsigned long *temp_pagelist;
900 struct drm_buf **temp_buflist;
901
902 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
903 return -EOPNOTSUPP;
904
905 if (!dma)
906 return -EINVAL;
907
908 if (!capable(CAP_SYS_ADMIN))
909 return -EPERM;
910
911 count = request->count;
912 order = order_base_2(request->size);
913 size = 1 << order;
914
915 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
916 request->count, request->size, size, order);
917
918 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
919 return -EINVAL;
920
921 alignment = (request->flags & _DRM_PAGE_ALIGN)
922 ? PAGE_ALIGN(size) : size;
923 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
924 total = PAGE_SIZE << page_order;
925
926 spin_lock(&dev->buf_lock);
927 if (dev->buf_use) {
928 spin_unlock(&dev->buf_lock);
929 return -EBUSY;
930 }
931 atomic_inc(&dev->buf_alloc);
932 spin_unlock(&dev->buf_lock);
933
934 mutex_lock(&dev->struct_mutex);
935 entry = &dma->bufs[order];
936 if (entry->buf_count) {
937 mutex_unlock(&dev->struct_mutex);
938 atomic_dec(&dev->buf_alloc);
939 return -ENOMEM; /* May only call once for each order */
940 }
941
942 if (count < 0 || count > 4096) {
943 mutex_unlock(&dev->struct_mutex);
944 atomic_dec(&dev->buf_alloc);
945 return -EINVAL;
946 }
947
948 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
949 if (!entry->buflist) {
950 mutex_unlock(&dev->struct_mutex);
951 atomic_dec(&dev->buf_alloc);
952 return -ENOMEM;
953 }
954
955 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
956 if (!entry->seglist) {
957 kfree(entry->buflist);
958 mutex_unlock(&dev->struct_mutex);
959 atomic_dec(&dev->buf_alloc);
960 return -ENOMEM;
961 }
962
963 /* Keep the original pagelist until we know all the allocations
964 * have succeeded
965 */
966 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
967 sizeof(*dma->pagelist),
968 GFP_KERNEL);
969 if (!temp_pagelist) {
970 kfree(entry->buflist);
971 kfree(entry->seglist);
972 mutex_unlock(&dev->struct_mutex);
973 atomic_dec(&dev->buf_alloc);
974 return -ENOMEM;
975 }
976 memcpy(temp_pagelist,
977 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
978 DRM_DEBUG("pagelist: %d entries\n",
979 dma->page_count + (count << page_order));
980
981 entry->buf_size = size;
982 entry->page_order = page_order;
983 byte_count = 0;
984 page_count = 0;
985
986 while (entry->buf_count < count) {
987 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
988 if (!dmah) {
989 /* Set count correctly so we free the proper amount. */
990 entry->buf_count = count;
991 entry->seg_count = count;
992 drm_cleanup_buf_error(dev, entry);
993 kfree(temp_pagelist);
994 mutex_unlock(&dev->struct_mutex);
995 atomic_dec(&dev->buf_alloc);
996 return -ENOMEM;
997 }
998
999 dmah->size = total;
1000 dmah->vaddr = dma_alloc_coherent(dev->dev,
1001 dmah->size,
1002 &dmah->busaddr,
1003 GFP_KERNEL);
1004 if (!dmah->vaddr) {
1005 kfree(dmah);
1006
1007 /* Set count correctly so we free the proper amount. */
1008 entry->buf_count = count;
1009 entry->seg_count = count;
1010 drm_cleanup_buf_error(dev, entry);
1011 kfree(temp_pagelist);
1012 mutex_unlock(&dev->struct_mutex);
1013 atomic_dec(&dev->buf_alloc);
1014 return -ENOMEM;
1015 }
1016 entry->seglist[entry->seg_count++] = dmah;
1017 for (i = 0; i < (1 << page_order); i++) {
1018 DRM_DEBUG("page %d @ 0x%08lx\n",
1019 dma->page_count + page_count,
1020 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
1021 temp_pagelist[dma->page_count + page_count++]
1022 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
1023 }
1024 for (offset = 0;
1025 offset + size <= total && entry->buf_count < count;
1026 offset += alignment, ++entry->buf_count) {
1027 buf = &entry->buflist[entry->buf_count];
1028 buf->idx = dma->buf_count + entry->buf_count;
1029 buf->total = alignment;
1030 buf->order = order;
1031 buf->used = 0;
1032 buf->offset = (dma->byte_count + byte_count + offset);
1033 buf->address = (void *)(dmah->vaddr + offset);
1034 buf->bus_address = dmah->busaddr + offset;
1035 buf->next = NULL;
1036 buf->waiting = 0;
1037 buf->pending = 0;
1038 buf->file_priv = NULL;
1039
1040 buf->dev_priv_size = dev->driver->dev_priv_size;
1041 buf->dev_private = kzalloc(buf->dev_priv_size,
1042 GFP_KERNEL);
1043 if (!buf->dev_private) {
1044 /* Set count correctly so we free the proper amount. */
1045 entry->buf_count = count;
1046 entry->seg_count = count;
1047 drm_cleanup_buf_error(dev, entry);
1048 kfree(temp_pagelist);
1049 mutex_unlock(&dev->struct_mutex);
1050 atomic_dec(&dev->buf_alloc);
1051 return -ENOMEM;
1052 }
1053
1054 DRM_DEBUG("buffer %d @ %p\n",
1055 entry->buf_count, buf->address);
1056 }
1057 byte_count += PAGE_SIZE << page_order;
1058 }
1059
1060 temp_buflist = krealloc(dma->buflist,
1061 (dma->buf_count + entry->buf_count) *
1062 sizeof(*dma->buflist), GFP_KERNEL);
1063 if (!temp_buflist) {
1064 /* Free the entry because it isn't valid */
1065 drm_cleanup_buf_error(dev, entry);
1066 kfree(temp_pagelist);
1067 mutex_unlock(&dev->struct_mutex);
1068 atomic_dec(&dev->buf_alloc);
1069 return -ENOMEM;
1070 }
1071 dma->buflist = temp_buflist;
1072
1073 for (i = 0; i < entry->buf_count; i++) {
1074 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1075 }
1076
1077 /* No allocations failed, so now we can replace the original pagelist
1078 * with the new one.
1079 */
1080 if (dma->page_count) {
1081 kfree(dma->pagelist);
1082 }
1083 dma->pagelist = temp_pagelist;
1084
1085 dma->buf_count += entry->buf_count;
1086 dma->seg_count += entry->seg_count;
1087 dma->page_count += entry->seg_count << page_order;
1088 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1089
1090 mutex_unlock(&dev->struct_mutex);
1091
1092 request->count = entry->buf_count;
1093 request->size = size;
1094
1095 if (request->flags & _DRM_PCI_BUFFER_RO)
1096 dma->flags = _DRM_DMA_USE_PCI_RO;
1097
1098 atomic_dec(&dev->buf_alloc);
1099 return 0;
1100
1101}
1102EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1103
1104static int drm_legacy_addbufs_sg(struct drm_device *dev,
1105 struct drm_buf_desc *request)
1106{
1107 struct drm_device_dma *dma = dev->dma;
1108 struct drm_buf_entry *entry;
1109 struct drm_buf *buf;
1110 unsigned long offset;
1111 unsigned long agp_offset;
1112 int count;
1113 int order;
1114 int size;
1115 int alignment;
1116 int page_order;
1117 int total;
1118 int byte_count;
1119 int i;
1120 struct drm_buf **temp_buflist;
1121
1122 if (!drm_core_check_feature(dev, DRIVER_SG))
1123 return -EOPNOTSUPP;
1124
1125 if (!dma)
1126 return -EINVAL;
1127
1128 if (!capable(CAP_SYS_ADMIN))
1129 return -EPERM;
1130
1131 count = request->count;
1132 order = order_base_2(request->size);
1133 size = 1 << order;
1134
1135 alignment = (request->flags & _DRM_PAGE_ALIGN)
1136 ? PAGE_ALIGN(size) : size;
1137 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1138 total = PAGE_SIZE << page_order;
1139
1140 byte_count = 0;
1141 agp_offset = request->agp_start;
1142
1143 DRM_DEBUG("count: %d\n", count);
1144 DRM_DEBUG("order: %d\n", order);
1145 DRM_DEBUG("size: %d\n", size);
1146 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1147 DRM_DEBUG("alignment: %d\n", alignment);
1148 DRM_DEBUG("page_order: %d\n", page_order);
1149 DRM_DEBUG("total: %d\n", total);
1150
1151 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1152 return -EINVAL;
1153
1154 spin_lock(&dev->buf_lock);
1155 if (dev->buf_use) {
1156 spin_unlock(&dev->buf_lock);
1157 return -EBUSY;
1158 }
1159 atomic_inc(&dev->buf_alloc);
1160 spin_unlock(&dev->buf_lock);
1161
1162 mutex_lock(&dev->struct_mutex);
1163 entry = &dma->bufs[order];
1164 if (entry->buf_count) {
1165 mutex_unlock(&dev->struct_mutex);
1166 atomic_dec(&dev->buf_alloc);
1167 return -ENOMEM; /* May only call once for each order */
1168 }
1169
1170 if (count < 0 || count > 4096) {
1171 mutex_unlock(&dev->struct_mutex);
1172 atomic_dec(&dev->buf_alloc);
1173 return -EINVAL;
1174 }
1175
1176 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
1177 if (!entry->buflist) {
1178 mutex_unlock(&dev->struct_mutex);
1179 atomic_dec(&dev->buf_alloc);
1180 return -ENOMEM;
1181 }
1182
1183 entry->buf_size = size;
1184 entry->page_order = page_order;
1185
1186 offset = 0;
1187
1188 while (entry->buf_count < count) {
1189 buf = &entry->buflist[entry->buf_count];
1190 buf->idx = dma->buf_count + entry->buf_count;
1191 buf->total = alignment;
1192 buf->order = order;
1193 buf->used = 0;
1194
1195 buf->offset = (dma->byte_count + offset);
1196 buf->bus_address = agp_offset + offset;
1197 buf->address = (void *)(agp_offset + offset
1198 + (unsigned long)dev->sg->virtual);
1199 buf->next = NULL;
1200 buf->waiting = 0;
1201 buf->pending = 0;
1202 buf->file_priv = NULL;
1203
1204 buf->dev_priv_size = dev->driver->dev_priv_size;
1205 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1206 if (!buf->dev_private) {
1207 /* Set count correctly so we free the proper amount. */
1208 entry->buf_count = count;
1209 drm_cleanup_buf_error(dev, entry);
1210 mutex_unlock(&dev->struct_mutex);
1211 atomic_dec(&dev->buf_alloc);
1212 return -ENOMEM;
1213 }
1214
1215 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1216
1217 offset += alignment;
1218 entry->buf_count++;
1219 byte_count += PAGE_SIZE << page_order;
1220 }
1221
1222 DRM_DEBUG("byte_count: %d\n", byte_count);
1223
1224 temp_buflist = krealloc(dma->buflist,
1225 (dma->buf_count + entry->buf_count) *
1226 sizeof(*dma->buflist), GFP_KERNEL);
1227 if (!temp_buflist) {
1228 /* Free the entry because it isn't valid */
1229 drm_cleanup_buf_error(dev, entry);
1230 mutex_unlock(&dev->struct_mutex);
1231 atomic_dec(&dev->buf_alloc);
1232 return -ENOMEM;
1233 }
1234 dma->buflist = temp_buflist;
1235
1236 for (i = 0; i < entry->buf_count; i++) {
1237 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1238 }
1239
1240 dma->buf_count += entry->buf_count;
1241 dma->seg_count += entry->seg_count;
1242 dma->page_count += byte_count >> PAGE_SHIFT;
1243 dma->byte_count += byte_count;
1244
1245 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1246 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1247
1248 mutex_unlock(&dev->struct_mutex);
1249
1250 request->count = entry->buf_count;
1251 request->size = size;
1252
1253 dma->flags = _DRM_DMA_USE_SG;
1254
1255 atomic_dec(&dev->buf_alloc);
1256 return 0;
1257}
1258
1259/*
1260 * Add buffers for DMA transfers (ioctl).
1261 *
1262 * \param inode device inode.
1263 * \param file_priv DRM file private.
1264 * \param cmd command.
1265 * \param arg pointer to a struct drm_buf_desc request.
1266 * \return zero on success or a negative number on failure.
1267 *
1268 * According with the memory type specified in drm_buf_desc::flags and the
1269 * build options, it dispatches the call either to addbufs_agp(),
1270 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1271 * PCI memory respectively.
1272 */
1273int drm_legacy_addbufs(struct drm_device *dev, void *data,
1274 struct drm_file *file_priv)
1275{
1276 struct drm_buf_desc *request = data;
1277 int ret;
1278
1279 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1280 return -EOPNOTSUPP;
1281
1282 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1283 return -EOPNOTSUPP;
1284
1285#if IS_ENABLED(CONFIG_AGP)
1286 if (request->flags & _DRM_AGP_BUFFER)
1287 ret = drm_legacy_addbufs_agp(dev, request);
1288 else
1289#endif
1290 if (request->flags & _DRM_SG_BUFFER)
1291 ret = drm_legacy_addbufs_sg(dev, request);
1292 else if (request->flags & _DRM_FB_BUFFER)
1293 ret = -EINVAL;
1294 else
1295 ret = drm_legacy_addbufs_pci(dev, request);
1296
1297 return ret;
1298}
1299
1300/*
1301 * Get information about the buffer mappings.
1302 *
1303 * This was originally mean for debugging purposes, or by a sophisticated
1304 * client library to determine how best to use the available buffers (e.g.,
1305 * large buffers can be used for image transfer).
1306 *
1307 * \param inode device inode.
1308 * \param file_priv DRM file private.
1309 * \param cmd command.
1310 * \param arg pointer to a drm_buf_info structure.
1311 * \return zero on success or a negative number on failure.
1312 *
1313 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1314 * lock, preventing of allocating more buffers after this call. Information
1315 * about each requested buffer is then copied into user space.
1316 */
1317int __drm_legacy_infobufs(struct drm_device *dev,
1318 void *data, int *p,
1319 int (*f)(void *, int, struct drm_buf_entry *))
1320{
1321 struct drm_device_dma *dma = dev->dma;
1322 int i;
1323 int count;
1324
1325 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1326 return -EOPNOTSUPP;
1327
1328 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1329 return -EOPNOTSUPP;
1330
1331 if (!dma)
1332 return -EINVAL;
1333
1334 spin_lock(&dev->buf_lock);
1335 if (atomic_read(&dev->buf_alloc)) {
1336 spin_unlock(&dev->buf_lock);
1337 return -EBUSY;
1338 }
1339 ++dev->buf_use; /* Can't allocate more after this call */
1340 spin_unlock(&dev->buf_lock);
1341
1342 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1343 if (dma->bufs[i].buf_count)
1344 ++count;
1345 }
1346
1347 DRM_DEBUG("count = %d\n", count);
1348
1349 if (*p >= count) {
1350 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1351 struct drm_buf_entry *from = &dma->bufs[i];
1352
1353 if (from->buf_count) {
1354 if (f(data, count, from) < 0)
1355 return -EFAULT;
1356 DRM_DEBUG("%d %d %d %d %d\n",
1357 i,
1358 dma->bufs[i].buf_count,
1359 dma->bufs[i].buf_size,
1360 dma->bufs[i].low_mark,
1361 dma->bufs[i].high_mark);
1362 ++count;
1363 }
1364 }
1365 }
1366 *p = count;
1367
1368 return 0;
1369}
1370
1371static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
1372{
1373 struct drm_buf_info *request = data;
1374 struct drm_buf_desc __user *to = &request->list[count];
1375 struct drm_buf_desc v = {.count = from->buf_count,
1376 .size = from->buf_size,
1377 .low_mark = from->low_mark,
1378 .high_mark = from->high_mark};
1379
1380 if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
1381 return -EFAULT;
1382 return 0;
1383}
1384
1385int drm_legacy_infobufs(struct drm_device *dev, void *data,
1386 struct drm_file *file_priv)
1387{
1388 struct drm_buf_info *request = data;
1389
1390 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
1391}
1392
1393/*
1394 * Specifies a low and high water mark for buffer allocation
1395 *
1396 * \param inode device inode.
1397 * \param file_priv DRM file private.
1398 * \param cmd command.
1399 * \param arg a pointer to a drm_buf_desc structure.
1400 * \return zero on success or a negative number on failure.
1401 *
1402 * Verifies that the size order is bounded between the admissible orders and
1403 * updates the respective drm_device_dma::bufs entry low and high water mark.
1404 *
1405 * \note This ioctl is deprecated and mostly never used.
1406 */
1407int drm_legacy_markbufs(struct drm_device *dev, void *data,
1408 struct drm_file *file_priv)
1409{
1410 struct drm_device_dma *dma = dev->dma;
1411 struct drm_buf_desc *request = data;
1412 int order;
1413 struct drm_buf_entry *entry;
1414
1415 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1416 return -EOPNOTSUPP;
1417
1418 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1419 return -EOPNOTSUPP;
1420
1421 if (!dma)
1422 return -EINVAL;
1423
1424 DRM_DEBUG("%d, %d, %d\n",
1425 request->size, request->low_mark, request->high_mark);
1426 order = order_base_2(request->size);
1427 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1428 return -EINVAL;
1429 entry = &dma->bufs[order];
1430
1431 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1432 return -EINVAL;
1433 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1434 return -EINVAL;
1435
1436 entry->low_mark = request->low_mark;
1437 entry->high_mark = request->high_mark;
1438
1439 return 0;
1440}
1441
1442/*
1443 * Unreserve the buffers in list, previously reserved using drmDMA.
1444 *
1445 * \param inode device inode.
1446 * \param file_priv DRM file private.
1447 * \param cmd command.
1448 * \param arg pointer to a drm_buf_free structure.
1449 * \return zero on success or a negative number on failure.
1450 *
1451 * Calls free_buffer() for each used buffer.
1452 * This function is primarily used for debugging.
1453 */
1454int drm_legacy_freebufs(struct drm_device *dev, void *data,
1455 struct drm_file *file_priv)
1456{
1457 struct drm_device_dma *dma = dev->dma;
1458 struct drm_buf_free *request = data;
1459 int i;
1460 int idx;
1461 struct drm_buf *buf;
1462
1463 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1464 return -EOPNOTSUPP;
1465
1466 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1467 return -EOPNOTSUPP;
1468
1469 if (!dma)
1470 return -EINVAL;
1471
1472 DRM_DEBUG("%d\n", request->count);
1473 for (i = 0; i < request->count; i++) {
1474 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1475 return -EFAULT;
1476 if (idx < 0 || idx >= dma->buf_count) {
1477 DRM_ERROR("Index %d (of %d max)\n",
1478 idx, dma->buf_count - 1);
1479 return -EINVAL;
1480 }
1481 idx = array_index_nospec(idx, dma->buf_count);
1482 buf = dma->buflist[idx];
1483 if (buf->file_priv != file_priv) {
1484 DRM_ERROR("Process %d freeing buffer not owned\n",
1485 task_pid_nr(current));
1486 return -EINVAL;
1487 }
1488 drm_legacy_free_buffer(dev, buf);
1489 }
1490
1491 return 0;
1492}
1493
1494/*
1495 * Maps all of the DMA buffers into client-virtual space (ioctl).
1496 *
1497 * \param inode device inode.
1498 * \param file_priv DRM file private.
1499 * \param cmd command.
1500 * \param arg pointer to a drm_buf_map structure.
1501 * \return zero on success or a negative number on failure.
1502 *
1503 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1504 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1505 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1506 * drm_mmap_dma().
1507 */
1508int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
1509 void __user **v,
1510 int (*f)(void *, int, unsigned long,
1511 struct drm_buf *),
1512 struct drm_file *file_priv)
1513{
1514 struct drm_device_dma *dma = dev->dma;
1515 int retcode = 0;
1516 unsigned long virtual;
1517 int i;
1518
1519 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1520 return -EOPNOTSUPP;
1521
1522 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1523 return -EOPNOTSUPP;
1524
1525 if (!dma)
1526 return -EINVAL;
1527
1528 spin_lock(&dev->buf_lock);
1529 if (atomic_read(&dev->buf_alloc)) {
1530 spin_unlock(&dev->buf_lock);
1531 return -EBUSY;
1532 }
1533 dev->buf_use++; /* Can't allocate more after this call */
1534 spin_unlock(&dev->buf_lock);
1535
1536 if (*p >= dma->buf_count) {
1537 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1538 || (drm_core_check_feature(dev, DRIVER_SG)
1539 && (dma->flags & _DRM_DMA_USE_SG))) {
1540 struct drm_local_map *map = dev->agp_buffer_map;
1541 unsigned long token = dev->agp_buffer_token;
1542
1543 if (!map) {
1544 retcode = -EINVAL;
1545 goto done;
1546 }
1547 virtual = vm_mmap(file_priv->filp, 0, map->size,
1548 PROT_READ | PROT_WRITE,
1549 MAP_SHARED,
1550 token);
1551 } else {
1552 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1553 PROT_READ | PROT_WRITE,
1554 MAP_SHARED, 0);
1555 }
1556 if (virtual > -1024UL) {
1557 /* Real error */
1558 retcode = (signed long)virtual;
1559 goto done;
1560 }
1561 *v = (void __user *)virtual;
1562
1563 for (i = 0; i < dma->buf_count; i++) {
1564 if (f(data, i, virtual, dma->buflist[i]) < 0) {
1565 retcode = -EFAULT;
1566 goto done;
1567 }
1568 }
1569 }
1570 done:
1571 *p = dma->buf_count;
1572 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
1573
1574 return retcode;
1575}
1576
1577static int map_one_buf(void *data, int idx, unsigned long virtual,
1578 struct drm_buf *buf)
1579{
1580 struct drm_buf_map *request = data;
1581 unsigned long address = virtual + buf->offset; /* *** */
1582
1583 if (copy_to_user(&request->list[idx].idx, &buf->idx,
1584 sizeof(request->list[0].idx)))
1585 return -EFAULT;
1586 if (copy_to_user(&request->list[idx].total, &buf->total,
1587 sizeof(request->list[0].total)))
1588 return -EFAULT;
1589 if (clear_user(&request->list[idx].used, sizeof(int)))
1590 return -EFAULT;
1591 if (copy_to_user(&request->list[idx].address, &address,
1592 sizeof(address)))
1593 return -EFAULT;
1594 return 0;
1595}
1596
1597int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1598 struct drm_file *file_priv)
1599{
1600 struct drm_buf_map *request = data;
1601
1602 return __drm_legacy_mapbufs(dev, data, &request->count,
1603 &request->virtual, map_one_buf,
1604 file_priv);
1605}
1606
1607int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1608 struct drm_file *file_priv)
1609{
1610 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1611 return -EOPNOTSUPP;
1612
1613 if (dev->driver->dma_ioctl)
1614 return dev->driver->dma_ioctl(dev, data, file_priv);
1615 else
1616 return -EINVAL;
1617}
1618
1619struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1620{
1621 struct drm_map_list *entry;
1622
1623 list_for_each_entry(entry, &dev->maplist, head) {
1624 if (entry->map && entry->map->type == _DRM_SHM &&
1625 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1626 return entry->map;
1627 }
1628 }
1629 return NULL;
1630}
1631EXPORT_SYMBOL(drm_legacy_getsarea);