Loading...
1#ifndef __DRM_DRM_LEGACY_H__
2#define __DRM_DRM_LEGACY_H__
3
4/*
5 * Legacy driver interfaces for the Direct Rendering Manager
6 *
7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
9 * Copyright (c) 2009-2010, Code Aurora Forum.
10 * All rights reserved.
11 * Copyright © 2014 Intel Corporation
12 * Daniel Vetter <daniel.vetter@ffwll.ch>
13 *
14 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
15 * Author: Gareth Hughes <gareth@valinux.com>
16 *
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
23 *
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
26 * Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
35 */
36
37
38/*
39 * Legacy Support for palateontologic DRM drivers
40 *
41 * If you add a new driver and it uses any of these functions or structures,
42 * you're doing it terribly wrong.
43 */
44
45/**
46 * DMA buffer.
47 */
48struct drm_buf {
49 int idx; /**< Index into master buflist */
50 int total; /**< Buffer size */
51 int order; /**< log-base-2(total) */
52 int used; /**< Amount of buffer in use (for DMA) */
53 unsigned long offset; /**< Byte offset (used internally) */
54 void *address; /**< Address of buffer */
55 unsigned long bus_address; /**< Bus address of buffer */
56 struct drm_buf *next; /**< Kernel-only: used for free list */
57 __volatile__ int waiting; /**< On kernel DMA queue */
58 __volatile__ int pending; /**< On hardware DMA queue */
59 struct drm_file *file_priv; /**< Private of holding file descr */
60 int context; /**< Kernel queue for this buffer */
61 int while_locked; /**< Dispatch this buffer while locked */
62 enum {
63 DRM_LIST_NONE = 0,
64 DRM_LIST_FREE = 1,
65 DRM_LIST_WAIT = 2,
66 DRM_LIST_PEND = 3,
67 DRM_LIST_PRIO = 4,
68 DRM_LIST_RECLAIM = 5
69 } list; /**< Which list we're on */
70
71 int dev_priv_size; /**< Size of buffer private storage */
72 void *dev_private; /**< Per-buffer private storage */
73};
74
75typedef struct drm_dma_handle {
76 dma_addr_t busaddr;
77 void *vaddr;
78 size_t size;
79} drm_dma_handle_t;
80
81/**
82 * Buffer entry. There is one of this for each buffer size order.
83 */
84struct drm_buf_entry {
85 int buf_size; /**< size */
86 int buf_count; /**< number of buffers */
87 struct drm_buf *buflist; /**< buffer list */
88 int seg_count;
89 int page_order;
90 struct drm_dma_handle **seglist;
91
92 int low_mark; /**< Low water mark */
93 int high_mark; /**< High water mark */
94};
95
96/**
97 * DMA data.
98 */
99struct drm_device_dma {
100
101 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
102 int buf_count; /**< total number of buffers */
103 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
104 int seg_count;
105 int page_count; /**< number of pages */
106 unsigned long *pagelist; /**< page list */
107 unsigned long byte_count;
108 enum {
109 _DRM_DMA_USE_AGP = 0x01,
110 _DRM_DMA_USE_SG = 0x02,
111 _DRM_DMA_USE_FB = 0x04,
112 _DRM_DMA_USE_PCI_RO = 0x08
113 } flags;
114
115};
116
117/**
118 * Scatter-gather memory.
119 */
120struct drm_sg_mem {
121 unsigned long handle;
122 void *virtual;
123 int pages;
124 struct page **pagelist;
125 dma_addr_t *busaddr;
126};
127
128/**
129 * Kernel side of a mapping
130 */
131struct drm_local_map {
132 resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
133 unsigned long size; /**< Requested physical size (bytes) */
134 enum drm_map_type type; /**< Type of memory to map */
135 enum drm_map_flags flags; /**< Flags */
136 void *handle; /**< User-space: "Handle" to pass to mmap() */
137 /**< Kernel-space: kernel-virtual address */
138 int mtrr; /**< MTRR slot used */
139};
140
141typedef struct drm_local_map drm_local_map_t;
142
143/**
144 * Mappings list
145 */
146struct drm_map_list {
147 struct list_head head; /**< list head */
148 struct drm_hash_item hash;
149 struct drm_local_map *map; /**< mapping */
150 uint64_t user_token;
151 struct drm_master *master;
152};
153
154int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
155 unsigned int size, enum drm_map_type type,
156 enum drm_map_flags flags, struct drm_local_map **map_p);
157int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
158int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
159struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
160int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
161
162int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
163int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
164
165/**
166 * Test that the hardware lock is held by the caller, returning otherwise.
167 *
168 * \param dev DRM device.
169 * \param filp file pointer of the caller.
170 */
171#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
172do { \
173 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
174 _file_priv->master->lock.file_priv != _file_priv) { \
175 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
176 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
177 _file_priv->master->lock.file_priv, _file_priv); \
178 return -EINVAL; \
179 } \
180} while (0)
181
182void drm_legacy_idlelock_take(struct drm_lock_data *lock);
183void drm_legacy_idlelock_release(struct drm_lock_data *lock);
184
185/* drm_pci.c dma alloc wrappers */
186void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
187
188/* drm_memory.c */
189void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
190void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
191void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
192
193static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
194 unsigned int token)
195{
196 struct drm_map_list *_entry;
197 list_for_each_entry(_entry, &dev->maplist, head)
198 if (_entry->user_token == token)
199 return _entry->map;
200 return NULL;
201}
202
203#endif /* __DRM_DRM_LEGACY_H__ */
1#ifndef __DRM_DRM_LEGACY_H__
2#define __DRM_DRM_LEGACY_H__
3/*
4 * Legacy driver interfaces for the Direct Rendering Manager
5 *
6 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Copyright (c) 2009-2010, Code Aurora Forum.
9 * All rights reserved.
10 * Copyright © 2014 Intel Corporation
11 * Daniel Vetter <daniel.vetter@ffwll.ch>
12 *
13 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
14 * Author: Gareth Hughes <gareth@valinux.com>
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <drm/drm.h>
37#include <drm/drm_auth.h>
38#include <drm/drm_hashtab.h>
39
40struct drm_device;
41struct file;
42
43/*
44 * Legacy Support for palateontologic DRM drivers
45 *
46 * If you add a new driver and it uses any of these functions or structures,
47 * you're doing it terribly wrong.
48 */
49
50/**
51 * DMA buffer.
52 */
53struct drm_buf {
54 int idx; /**< Index into master buflist */
55 int total; /**< Buffer size */
56 int order; /**< log-base-2(total) */
57 int used; /**< Amount of buffer in use (for DMA) */
58 unsigned long offset; /**< Byte offset (used internally) */
59 void *address; /**< Address of buffer */
60 unsigned long bus_address; /**< Bus address of buffer */
61 struct drm_buf *next; /**< Kernel-only: used for free list */
62 __volatile__ int waiting; /**< On kernel DMA queue */
63 __volatile__ int pending; /**< On hardware DMA queue */
64 struct drm_file *file_priv; /**< Private of holding file descr */
65 int context; /**< Kernel queue for this buffer */
66 int while_locked; /**< Dispatch this buffer while locked */
67 enum {
68 DRM_LIST_NONE = 0,
69 DRM_LIST_FREE = 1,
70 DRM_LIST_WAIT = 2,
71 DRM_LIST_PEND = 3,
72 DRM_LIST_PRIO = 4,
73 DRM_LIST_RECLAIM = 5
74 } list; /**< Which list we're on */
75
76 int dev_priv_size; /**< Size of buffer private storage */
77 void *dev_private; /**< Per-buffer private storage */
78};
79
80typedef struct drm_dma_handle {
81 dma_addr_t busaddr;
82 void *vaddr;
83 size_t size;
84} drm_dma_handle_t;
85
86/**
87 * Buffer entry. There is one of this for each buffer size order.
88 */
89struct drm_buf_entry {
90 int buf_size; /**< size */
91 int buf_count; /**< number of buffers */
92 struct drm_buf *buflist; /**< buffer list */
93 int seg_count;
94 int page_order;
95 struct drm_dma_handle **seglist;
96
97 int low_mark; /**< Low water mark */
98 int high_mark; /**< High water mark */
99};
100
101/**
102 * DMA data.
103 */
104struct drm_device_dma {
105
106 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
107 int buf_count; /**< total number of buffers */
108 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
109 int seg_count;
110 int page_count; /**< number of pages */
111 unsigned long *pagelist; /**< page list */
112 unsigned long byte_count;
113 enum {
114 _DRM_DMA_USE_AGP = 0x01,
115 _DRM_DMA_USE_SG = 0x02,
116 _DRM_DMA_USE_FB = 0x04,
117 _DRM_DMA_USE_PCI_RO = 0x08
118 } flags;
119
120};
121
122/**
123 * Scatter-gather memory.
124 */
125struct drm_sg_mem {
126 unsigned long handle;
127 void *virtual;
128 int pages;
129 struct page **pagelist;
130 dma_addr_t *busaddr;
131};
132
133/**
134 * Kernel side of a mapping
135 */
136struct drm_local_map {
137 resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
138 unsigned long size; /**< Requested physical size (bytes) */
139 enum drm_map_type type; /**< Type of memory to map */
140 enum drm_map_flags flags; /**< Flags */
141 void *handle; /**< User-space: "Handle" to pass to mmap() */
142 /**< Kernel-space: kernel-virtual address */
143 int mtrr; /**< MTRR slot used */
144};
145
146typedef struct drm_local_map drm_local_map_t;
147
148/**
149 * Mappings list
150 */
151struct drm_map_list {
152 struct list_head head; /**< list head */
153 struct drm_hash_item hash;
154 struct drm_local_map *map; /**< mapping */
155 uint64_t user_token;
156 struct drm_master *master;
157};
158
159int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
160 unsigned int size, enum drm_map_type type,
161 enum drm_map_flags flags, struct drm_local_map **map_p);
162struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
163void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
164int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
165struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
166int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
167
168int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
169int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
170
171/**
172 * Test that the hardware lock is held by the caller, returning otherwise.
173 *
174 * \param dev DRM device.
175 * \param filp file pointer of the caller.
176 */
177#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
178do { \
179 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
180 _file_priv->master->lock.file_priv != _file_priv) { \
181 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
182 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
183 _file_priv->master->lock.file_priv, _file_priv); \
184 return -EINVAL; \
185 } \
186} while (0)
187
188void drm_legacy_idlelock_take(struct drm_lock_data *lock);
189void drm_legacy_idlelock_release(struct drm_lock_data *lock);
190
191/* drm_pci.c dma alloc wrappers */
192void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
193
194/* drm_memory.c */
195void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
196void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
197void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
198
199#endif /* __DRM_DRM_LEGACY_H__ */