Loading...
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29#include <drm/drm_crtc_helper.h>
30#include <linux/io-mapping.h>
31
32int qxl_log_level;
33
34static void qxl_dump_mode(struct qxl_device *qdev, void *p)
35{
36 struct qxl_mode *m = p;
37 DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
38 m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
39 m->y_mili, m->orientation);
40}
41
42static bool qxl_check_device(struct qxl_device *qdev)
43{
44 struct qxl_rom *rom = qdev->rom;
45 int mode_offset;
46 int i;
47
48 if (rom->magic != 0x4f525851) {
49 DRM_ERROR("bad rom signature %x\n", rom->magic);
50 return false;
51 }
52
53 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
54 DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
55 rom->log_level);
56 DRM_INFO("Currently using mode #%d, list at 0x%x\n",
57 rom->mode, rom->modes_offset);
58 DRM_INFO("%d io pages at offset 0x%x\n",
59 rom->num_io_pages, rom->pages_offset);
60 DRM_INFO("%d byte draw area at offset 0x%x\n",
61 rom->surface0_area_size, rom->draw_area_offset);
62
63 qdev->vram_size = rom->surface0_area_size;
64 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
65
66 mode_offset = rom->modes_offset / 4;
67 qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
68 DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
69 qdev->mode_info.num_modes);
70 qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
71 for (i = 0; i < qdev->mode_info.num_modes; i++)
72 qxl_dump_mode(qdev, qdev->mode_info.modes + i);
73 return true;
74}
75
76static void setup_hw_slot(struct qxl_device *qdev, int slot_index,
77 struct qxl_memslot *slot)
78{
79 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
80 qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr;
81 qxl_io_memslot_add(qdev, slot_index);
82}
83
84static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
85 unsigned long start_phys_addr, unsigned long end_phys_addr)
86{
87 uint64_t high_bits;
88 struct qxl_memslot *slot;
89 uint8_t slot_index;
90
91 slot_index = qdev->rom->slots_start + slot_index_offset;
92 slot = &qdev->mem_slots[slot_index];
93 slot->start_phys_addr = start_phys_addr;
94 slot->end_phys_addr = end_phys_addr;
95
96 setup_hw_slot(qdev, slot_index, slot);
97
98 slot->generation = qdev->rom->slot_generation;
99 high_bits = slot_index << qdev->slot_gen_bits;
100 high_bits |= slot->generation;
101 high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
102 slot->high_bits = high_bits;
103 return slot_index;
104}
105
106void qxl_reinit_memslots(struct qxl_device *qdev)
107{
108 setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]);
109 setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]);
110}
111
112static void qxl_gc_work(struct work_struct *work)
113{
114 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
115 qxl_garbage_collect(qdev);
116}
117
118static int qxl_device_init(struct qxl_device *qdev,
119 struct drm_device *ddev,
120 struct pci_dev *pdev,
121 unsigned long flags)
122{
123 int r, sb;
124
125 qdev->dev = &pdev->dev;
126 qdev->ddev = ddev;
127 qdev->pdev = pdev;
128 qdev->flags = flags;
129
130 mutex_init(&qdev->gem.mutex);
131 mutex_init(&qdev->update_area_mutex);
132 mutex_init(&qdev->release_mutex);
133 mutex_init(&qdev->surf_evict_mutex);
134 INIT_LIST_HEAD(&qdev->gem.objects);
135
136 qdev->rom_base = pci_resource_start(pdev, 2);
137 qdev->rom_size = pci_resource_len(pdev, 2);
138 qdev->vram_base = pci_resource_start(pdev, 0);
139 qdev->io_base = pci_resource_start(pdev, 3);
140
141 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
142
143 if (pci_resource_len(pdev, 4) > 0) {
144 /* 64bit surface bar present */
145 sb = 4;
146 qdev->surfaceram_base = pci_resource_start(pdev, sb);
147 qdev->surfaceram_size = pci_resource_len(pdev, sb);
148 qdev->surface_mapping =
149 io_mapping_create_wc(qdev->surfaceram_base,
150 qdev->surfaceram_size);
151 }
152 if (qdev->surface_mapping == NULL) {
153 /* 64bit surface bar not present (or mapping failed) */
154 sb = 1;
155 qdev->surfaceram_base = pci_resource_start(pdev, sb);
156 qdev->surfaceram_size = pci_resource_len(pdev, sb);
157 qdev->surface_mapping =
158 io_mapping_create_wc(qdev->surfaceram_base,
159 qdev->surfaceram_size);
160 }
161
162 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
163 (unsigned long long)qdev->vram_base,
164 (unsigned long long)pci_resource_end(pdev, 0),
165 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
166 (int)pci_resource_len(pdev, 0) / 1024,
167 (unsigned long long)qdev->surfaceram_base,
168 (unsigned long long)pci_resource_end(pdev, sb),
169 (int)qdev->surfaceram_size / 1024 / 1024,
170 (int)qdev->surfaceram_size / 1024,
171 (sb == 4) ? "64bit" : "32bit");
172
173 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
174 if (!qdev->rom) {
175 pr_err("Unable to ioremap ROM\n");
176 return -ENOMEM;
177 }
178
179 qxl_check_device(qdev);
180
181 r = qxl_bo_init(qdev);
182 if (r) {
183 DRM_ERROR("bo init failed %d\n", r);
184 return r;
185 }
186
187 qdev->ram_header = ioremap(qdev->vram_base +
188 qdev->rom->ram_header_offset,
189 sizeof(*qdev->ram_header));
190
191 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
192 sizeof(struct qxl_command),
193 QXL_COMMAND_RING_SIZE,
194 qdev->io_base + QXL_IO_NOTIFY_CMD,
195 false,
196 &qdev->display_event);
197
198 qdev->cursor_ring = qxl_ring_create(
199 &(qdev->ram_header->cursor_ring_hdr),
200 sizeof(struct qxl_command),
201 QXL_CURSOR_RING_SIZE,
202 qdev->io_base + QXL_IO_NOTIFY_CMD,
203 false,
204 &qdev->cursor_event);
205
206 qdev->release_ring = qxl_ring_create(
207 &(qdev->ram_header->release_ring_hdr),
208 sizeof(uint64_t),
209 QXL_RELEASE_RING_SIZE, 0, true,
210 NULL);
211
212 /* TODO - slot initialization should happen on reset. where is our
213 * reset handler? */
214 qdev->n_mem_slots = qdev->rom->slots_end;
215 qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
216 qdev->slot_id_bits = qdev->rom->slot_id_bits;
217 qdev->va_slot_mask =
218 (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
219
220 qdev->mem_slots =
221 kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
222 GFP_KERNEL);
223
224 idr_init(&qdev->release_idr);
225 spin_lock_init(&qdev->release_idr_lock);
226 spin_lock_init(&qdev->release_lock);
227
228 idr_init(&qdev->surf_id_idr);
229 spin_lock_init(&qdev->surf_id_idr_lock);
230
231 mutex_init(&qdev->async_io_mutex);
232
233 /* reset the device into a known state - no memslots, no primary
234 * created, no surfaces. */
235 qxl_io_reset(qdev);
236
237 /* must initialize irq before first async io - slot creation */
238 r = qxl_irq_init(qdev);
239 if (r)
240 return r;
241
242 /*
243 * Note that virtual is surface0. We rely on the single ioremap done
244 * before.
245 */
246 qdev->main_mem_slot = setup_slot(qdev, 0,
247 (unsigned long)qdev->vram_base,
248 (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
249 qdev->surfaces_mem_slot = setup_slot(qdev, 1,
250 (unsigned long)qdev->surfaceram_base,
251 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
252 DRM_INFO("main mem slot %d [%lx,%x]\n",
253 qdev->main_mem_slot,
254 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
255 DRM_INFO("surface mem slot %d [%lx,%lx]\n",
256 qdev->surfaces_mem_slot,
257 (unsigned long)qdev->surfaceram_base,
258 (unsigned long)qdev->surfaceram_size);
259
260
261 qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
262 INIT_WORK(&qdev->gc_work, qxl_gc_work);
263
264 r = qxl_fb_init(qdev);
265 if (r)
266 return r;
267
268 return 0;
269}
270
271static void qxl_device_fini(struct qxl_device *qdev)
272{
273 if (qdev->current_release_bo[0])
274 qxl_bo_unref(&qdev->current_release_bo[0]);
275 if (qdev->current_release_bo[1])
276 qxl_bo_unref(&qdev->current_release_bo[1]);
277 flush_workqueue(qdev->gc_queue);
278 destroy_workqueue(qdev->gc_queue);
279 qdev->gc_queue = NULL;
280
281 qxl_ring_free(qdev->command_ring);
282 qxl_ring_free(qdev->cursor_ring);
283 qxl_ring_free(qdev->release_ring);
284 qxl_bo_fini(qdev);
285 io_mapping_free(qdev->surface_mapping);
286 io_mapping_free(qdev->vram_mapping);
287 iounmap(qdev->ram_header);
288 iounmap(qdev->rom);
289 qdev->rom = NULL;
290 qdev->mode_info.modes = NULL;
291 qdev->mode_info.num_modes = 0;
292 qxl_debugfs_remove_files(qdev);
293}
294
295int qxl_driver_unload(struct drm_device *dev)
296{
297 struct qxl_device *qdev = dev->dev_private;
298
299 if (qdev == NULL)
300 return 0;
301
302 drm_vblank_cleanup(dev);
303
304 qxl_modeset_fini(qdev);
305 qxl_device_fini(qdev);
306
307 kfree(qdev);
308 dev->dev_private = NULL;
309 return 0;
310}
311
312int qxl_driver_load(struct drm_device *dev, unsigned long flags)
313{
314 struct qxl_device *qdev;
315 int r;
316
317 /* require kms */
318 if (!drm_core_check_feature(dev, DRIVER_MODESET))
319 return -ENODEV;
320
321 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
322 if (qdev == NULL)
323 return -ENOMEM;
324
325 dev->dev_private = qdev;
326
327 r = qxl_device_init(qdev, dev, dev->pdev, flags);
328 if (r)
329 goto out;
330
331 r = drm_vblank_init(dev, 1);
332 if (r)
333 goto unload;
334
335 r = qxl_modeset_init(qdev);
336 if (r)
337 goto unload;
338
339 drm_kms_helper_poll_init(qdev->ddev);
340
341 return 0;
342unload:
343 qxl_driver_unload(dev);
344
345out:
346 kfree(qdev);
347 return r;
348}
349
350
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <linux/io-mapping.h>
27#include <linux/pci.h>
28
29#include <drm/drm_drv.h>
30#include <drm/drm_managed.h>
31#include <drm/drm_probe_helper.h>
32
33#include "qxl_drv.h"
34#include "qxl_object.h"
35
36int qxl_log_level;
37
38static bool qxl_check_device(struct qxl_device *qdev)
39{
40 struct qxl_rom *rom = qdev->rom;
41
42 if (rom->magic != 0x4f525851) {
43 DRM_ERROR("bad rom signature %x\n", rom->magic);
44 return false;
45 }
46
47 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
48 DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
49 rom->log_level);
50 DRM_INFO("%d io pages at offset 0x%x\n",
51 rom->num_io_pages, rom->pages_offset);
52 DRM_INFO("%d byte draw area at offset 0x%x\n",
53 rom->surface0_area_size, rom->draw_area_offset);
54
55 qdev->vram_size = rom->surface0_area_size;
56 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
57 return true;
58}
59
60static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
61{
62 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
63 qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
64 qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
65}
66
67static void setup_slot(struct qxl_device *qdev,
68 struct qxl_memslot *slot,
69 unsigned int slot_index,
70 const char *slot_name,
71 unsigned long start_phys_addr,
72 unsigned long size)
73{
74 uint64_t high_bits;
75
76 slot->index = slot_index;
77 slot->name = slot_name;
78 slot->start_phys_addr = start_phys_addr;
79 slot->size = size;
80
81 setup_hw_slot(qdev, slot);
82
83 slot->generation = qdev->rom->slot_generation;
84 high_bits = (qdev->rom->slots_start + slot->index)
85 << qdev->rom->slot_gen_bits;
86 high_bits |= slot->generation;
87 high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
88 slot->high_bits = high_bits;
89
90 DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx\n",
91 slot->index, slot->name,
92 (unsigned long)slot->start_phys_addr,
93 (unsigned long)slot->size);
94}
95
96void qxl_reinit_memslots(struct qxl_device *qdev)
97{
98 setup_hw_slot(qdev, &qdev->main_slot);
99 setup_hw_slot(qdev, &qdev->surfaces_slot);
100}
101
102static void qxl_gc_work(struct work_struct *work)
103{
104 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
105
106 qxl_garbage_collect(qdev);
107}
108
109int qxl_device_init(struct qxl_device *qdev,
110 struct pci_dev *pdev)
111{
112 int r, sb;
113
114 pci_set_drvdata(pdev, &qdev->ddev);
115
116 mutex_init(&qdev->gem.mutex);
117 mutex_init(&qdev->update_area_mutex);
118 mutex_init(&qdev->release_mutex);
119 mutex_init(&qdev->surf_evict_mutex);
120 qxl_gem_init(qdev);
121
122 qdev->rom_base = pci_resource_start(pdev, 2);
123 qdev->rom_size = pci_resource_len(pdev, 2);
124 qdev->vram_base = pci_resource_start(pdev, 0);
125 qdev->io_base = pci_resource_start(pdev, 3);
126
127 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
128 if (!qdev->vram_mapping) {
129 pr_err("Unable to create vram_mapping");
130 return -ENOMEM;
131 }
132
133 if (pci_resource_len(pdev, 4) > 0) {
134 /* 64bit surface bar present */
135 sb = 4;
136 qdev->surfaceram_base = pci_resource_start(pdev, sb);
137 qdev->surfaceram_size = pci_resource_len(pdev, sb);
138 qdev->surface_mapping =
139 io_mapping_create_wc(qdev->surfaceram_base,
140 qdev->surfaceram_size);
141 }
142 if (qdev->surface_mapping == NULL) {
143 /* 64bit surface bar not present (or mapping failed) */
144 sb = 1;
145 qdev->surfaceram_base = pci_resource_start(pdev, sb);
146 qdev->surfaceram_size = pci_resource_len(pdev, sb);
147 qdev->surface_mapping =
148 io_mapping_create_wc(qdev->surfaceram_base,
149 qdev->surfaceram_size);
150 if (!qdev->surface_mapping) {
151 pr_err("Unable to create surface_mapping");
152 r = -ENOMEM;
153 goto vram_mapping_free;
154 }
155 }
156
157 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
158 (unsigned long long)qdev->vram_base,
159 (unsigned long long)pci_resource_end(pdev, 0),
160 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
161 (int)pci_resource_len(pdev, 0) / 1024,
162 (unsigned long long)qdev->surfaceram_base,
163 (unsigned long long)pci_resource_end(pdev, sb),
164 (int)qdev->surfaceram_size / 1024 / 1024,
165 (int)qdev->surfaceram_size / 1024,
166 (sb == 4) ? "64bit" : "32bit");
167
168 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
169 if (!qdev->rom) {
170 pr_err("Unable to ioremap ROM\n");
171 r = -ENOMEM;
172 goto surface_mapping_free;
173 }
174
175 if (!qxl_check_device(qdev)) {
176 r = -ENODEV;
177 goto rom_unmap;
178 }
179
180 r = qxl_bo_init(qdev);
181 if (r) {
182 DRM_ERROR("bo init failed %d\n", r);
183 goto rom_unmap;
184 }
185
186 qdev->ram_header = ioremap(qdev->vram_base +
187 qdev->rom->ram_header_offset,
188 sizeof(*qdev->ram_header));
189 if (!qdev->ram_header) {
190 DRM_ERROR("Unable to ioremap RAM header\n");
191 r = -ENOMEM;
192 goto bo_fini;
193 }
194
195 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
196 sizeof(struct qxl_command),
197 QXL_COMMAND_RING_SIZE,
198 qdev->io_base + QXL_IO_NOTIFY_CMD,
199 false,
200 &qdev->display_event);
201 if (!qdev->command_ring) {
202 DRM_ERROR("Unable to create command ring\n");
203 r = -ENOMEM;
204 goto ram_header_unmap;
205 }
206
207 qdev->cursor_ring = qxl_ring_create(
208 &(qdev->ram_header->cursor_ring_hdr),
209 sizeof(struct qxl_command),
210 QXL_CURSOR_RING_SIZE,
211 qdev->io_base + QXL_IO_NOTIFY_CURSOR,
212 false,
213 &qdev->cursor_event);
214
215 if (!qdev->cursor_ring) {
216 DRM_ERROR("Unable to create cursor ring\n");
217 r = -ENOMEM;
218 goto command_ring_free;
219 }
220
221 qdev->release_ring = qxl_ring_create(
222 &(qdev->ram_header->release_ring_hdr),
223 sizeof(uint64_t),
224 QXL_RELEASE_RING_SIZE, 0, true,
225 NULL);
226
227 if (!qdev->release_ring) {
228 DRM_ERROR("Unable to create release ring\n");
229 r = -ENOMEM;
230 goto cursor_ring_free;
231 }
232
233 idr_init_base(&qdev->release_idr, 1);
234 spin_lock_init(&qdev->release_idr_lock);
235 spin_lock_init(&qdev->release_lock);
236
237 idr_init_base(&qdev->surf_id_idr, 1);
238 spin_lock_init(&qdev->surf_id_idr_lock);
239
240 mutex_init(&qdev->async_io_mutex);
241
242 /* reset the device into a known state - no memslots, no primary
243 * created, no surfaces. */
244 qxl_io_reset(qdev);
245
246 /* must initialize irq before first async io - slot creation */
247 r = qxl_irq_init(qdev);
248 if (r) {
249 DRM_ERROR("Unable to init qxl irq\n");
250 goto release_ring_free;
251 }
252
253 /*
254 * Note that virtual is surface0. We rely on the single ioremap done
255 * before.
256 */
257 setup_slot(qdev, &qdev->main_slot, 0, "main",
258 (unsigned long)qdev->vram_base,
259 (unsigned long)qdev->rom->ram_header_offset);
260 setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
261 (unsigned long)qdev->surfaceram_base,
262 (unsigned long)qdev->surfaceram_size);
263
264 INIT_WORK(&qdev->gc_work, qxl_gc_work);
265
266 return 0;
267
268release_ring_free:
269 qxl_ring_free(qdev->release_ring);
270cursor_ring_free:
271 qxl_ring_free(qdev->cursor_ring);
272command_ring_free:
273 qxl_ring_free(qdev->command_ring);
274ram_header_unmap:
275 iounmap(qdev->ram_header);
276bo_fini:
277 qxl_bo_fini(qdev);
278rom_unmap:
279 iounmap(qdev->rom);
280surface_mapping_free:
281 io_mapping_free(qdev->surface_mapping);
282vram_mapping_free:
283 io_mapping_free(qdev->vram_mapping);
284 return r;
285}
286
287void qxl_device_fini(struct qxl_device *qdev)
288{
289 int cur_idx;
290
291 /* check if qxl_device_init() was successful (gc_work is initialized last) */
292 if (!qdev->gc_work.func)
293 return;
294
295 for (cur_idx = 0; cur_idx < 3; cur_idx++) {
296 if (!qdev->current_release_bo[cur_idx])
297 continue;
298 qxl_bo_unpin(qdev->current_release_bo[cur_idx]);
299 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
300 qdev->current_release_bo_offset[cur_idx] = 0;
301 qdev->current_release_bo[cur_idx] = NULL;
302 }
303
304 /*
305 * Ask host to release resources (+fill release ring),
306 * then wait for the release actually happening.
307 */
308 qxl_io_notify_oom(qdev);
309 wait_event_timeout(qdev->release_event,
310 atomic_read(&qdev->release_count) == 0,
311 HZ);
312 flush_work(&qdev->gc_work);
313 qxl_surf_evict(qdev);
314 qxl_vram_evict(qdev);
315
316 qxl_gem_fini(qdev);
317 qxl_bo_fini(qdev);
318 qxl_ring_free(qdev->command_ring);
319 qxl_ring_free(qdev->cursor_ring);
320 qxl_ring_free(qdev->release_ring);
321 io_mapping_free(qdev->surface_mapping);
322 io_mapping_free(qdev->vram_mapping);
323 iounmap(qdev->ram_header);
324 iounmap(qdev->rom);
325 qdev->rom = NULL;
326}