Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v5.9
  1/*
  2 * Copyright 2013 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 *          Alon Levy
 24 */
 25
 26#include <linux/io-mapping.h>
 27#include <linux/pci.h>
 28
 29#include <drm/drm_drv.h>
 30#include <drm/drm_managed.h>
 31#include <drm/drm_probe_helper.h>
 32
 33#include "qxl_drv.h"
 34#include "qxl_object.h"
 35
 36int qxl_log_level;
 37
 38static bool qxl_check_device(struct qxl_device *qdev)
 39{
 40	struct qxl_rom *rom = qdev->rom;
 41
 42	if (rom->magic != 0x4f525851) {
 43		DRM_ERROR("bad rom signature %x\n", rom->magic);
 44		return false;
 45	}
 46
 47	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
 48	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
 49		 rom->log_level);
 50	DRM_INFO("%d io pages at offset 0x%x\n",
 51		 rom->num_io_pages, rom->pages_offset);
 52	DRM_INFO("%d byte draw area at offset 0x%x\n",
 53		 rom->surface0_area_size, rom->draw_area_offset);
 54
 55	qdev->vram_size = rom->surface0_area_size;
 56	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
 57	return true;
 58}
 59
 60static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
 61{
 62	qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
 63	qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
 64	qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
 65}
 66
 67static void setup_slot(struct qxl_device *qdev,
 68		       struct qxl_memslot *slot,
 69		       unsigned int slot_index,
 70		       const char *slot_name,
 71		       unsigned long start_phys_addr,
 72		       unsigned long size)
 73{
 74	uint64_t high_bits;
 75
 76	slot->index = slot_index;
 77	slot->name = slot_name;
 78	slot->start_phys_addr = start_phys_addr;
 79	slot->size = size;
 80
 81	setup_hw_slot(qdev, slot);
 82
 83	slot->generation = qdev->rom->slot_generation;
 84	high_bits = (qdev->rom->slots_start + slot->index)
 85		<< qdev->rom->slot_gen_bits;
 86	high_bits |= slot->generation;
 87	high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
 88	slot->high_bits = high_bits;
 89
 90	DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx\n",
 91		 slot->index, slot->name,
 92		 (unsigned long)slot->start_phys_addr,
 93		 (unsigned long)slot->size);
 94}
 95
 96void qxl_reinit_memslots(struct qxl_device *qdev)
 97{
 98	setup_hw_slot(qdev, &qdev->main_slot);
 99	setup_hw_slot(qdev, &qdev->surfaces_slot);
100}
101
102static void qxl_gc_work(struct work_struct *work)
103{
104	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
105
106	qxl_garbage_collect(qdev);
107}
108
109int qxl_device_init(struct qxl_device *qdev,
110		    struct pci_dev *pdev)
111{
112	int r, sb;
113
114	qdev->ddev.pdev = pdev;
115	pci_set_drvdata(pdev, &qdev->ddev);
116
117	mutex_init(&qdev->gem.mutex);
118	mutex_init(&qdev->update_area_mutex);
119	mutex_init(&qdev->release_mutex);
120	mutex_init(&qdev->surf_evict_mutex);
121	qxl_gem_init(qdev);
122
123	qdev->rom_base = pci_resource_start(pdev, 2);
124	qdev->rom_size = pci_resource_len(pdev, 2);
125	qdev->vram_base = pci_resource_start(pdev, 0);
126	qdev->io_base = pci_resource_start(pdev, 3);
127
128	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
129	if (!qdev->vram_mapping) {
130		pr_err("Unable to create vram_mapping");
131		return -ENOMEM;
132	}
133
134	if (pci_resource_len(pdev, 4) > 0) {
135		/* 64bit surface bar present */
136		sb = 4;
137		qdev->surfaceram_base = pci_resource_start(pdev, sb);
138		qdev->surfaceram_size = pci_resource_len(pdev, sb);
139		qdev->surface_mapping =
140			io_mapping_create_wc(qdev->surfaceram_base,
141					     qdev->surfaceram_size);
142	}
143	if (qdev->surface_mapping == NULL) {
144		/* 64bit surface bar not present (or mapping failed) */
145		sb = 1;
146		qdev->surfaceram_base = pci_resource_start(pdev, sb);
147		qdev->surfaceram_size = pci_resource_len(pdev, sb);
148		qdev->surface_mapping =
149			io_mapping_create_wc(qdev->surfaceram_base,
150					     qdev->surfaceram_size);
151		if (!qdev->surface_mapping) {
152			pr_err("Unable to create surface_mapping");
153			r = -ENOMEM;
154			goto vram_mapping_free;
155		}
156	}
157
158	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
159		 (unsigned long long)qdev->vram_base,
160		 (unsigned long long)pci_resource_end(pdev, 0),
161		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
162		 (int)pci_resource_len(pdev, 0) / 1024,
163		 (unsigned long long)qdev->surfaceram_base,
164		 (unsigned long long)pci_resource_end(pdev, sb),
165		 (int)qdev->surfaceram_size / 1024 / 1024,
166		 (int)qdev->surfaceram_size / 1024,
167		 (sb == 4) ? "64bit" : "32bit");
168
169	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
170	if (!qdev->rom) {
171		pr_err("Unable to ioremap ROM\n");
172		r = -ENOMEM;
173		goto surface_mapping_free;
174	}
175
176	if (!qxl_check_device(qdev)) {
177		r = -ENODEV;
178		goto rom_unmap;
179	}
180
181	r = qxl_bo_init(qdev);
182	if (r) {
183		DRM_ERROR("bo init failed %d\n", r);
184		goto rom_unmap;
185	}
186
187	qdev->ram_header = ioremap(qdev->vram_base +
188				   qdev->rom->ram_header_offset,
189				   sizeof(*qdev->ram_header));
190	if (!qdev->ram_header) {
191		DRM_ERROR("Unable to ioremap RAM header\n");
192		r = -ENOMEM;
193		goto bo_fini;
194	}
195
196	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
197					     sizeof(struct qxl_command),
198					     QXL_COMMAND_RING_SIZE,
199					     qdev->io_base + QXL_IO_NOTIFY_CMD,
200					     false,
201					     &qdev->display_event);
202	if (!qdev->command_ring) {
203		DRM_ERROR("Unable to create command ring\n");
204		r = -ENOMEM;
205		goto ram_header_unmap;
206	}
207
208	qdev->cursor_ring = qxl_ring_create(
209				&(qdev->ram_header->cursor_ring_hdr),
210				sizeof(struct qxl_command),
211				QXL_CURSOR_RING_SIZE,
212				qdev->io_base + QXL_IO_NOTIFY_CURSOR,
213				false,
214				&qdev->cursor_event);
215
216	if (!qdev->cursor_ring) {
217		DRM_ERROR("Unable to create cursor ring\n");
218		r = -ENOMEM;
219		goto command_ring_free;
220	}
221
222	qdev->release_ring = qxl_ring_create(
223				&(qdev->ram_header->release_ring_hdr),
224				sizeof(uint64_t),
225				QXL_RELEASE_RING_SIZE, 0, true,
226				NULL);
227
228	if (!qdev->release_ring) {
229		DRM_ERROR("Unable to create release ring\n");
230		r = -ENOMEM;
231		goto cursor_ring_free;
232	}
233
234	idr_init(&qdev->release_idr);
235	spin_lock_init(&qdev->release_idr_lock);
236	spin_lock_init(&qdev->release_lock);
237
238	idr_init(&qdev->surf_id_idr);
239	spin_lock_init(&qdev->surf_id_idr_lock);
240
241	mutex_init(&qdev->async_io_mutex);
242
243	/* reset the device into a known state - no memslots, no primary
244	 * created, no surfaces. */
245	qxl_io_reset(qdev);
246
247	/* must initialize irq before first async io - slot creation */
248	r = qxl_irq_init(qdev);
249	if (r) {
250		DRM_ERROR("Unable to init qxl irq\n");
251		goto release_ring_free;
252	}
253
254	/*
255	 * Note that virtual is surface0. We rely on the single ioremap done
256	 * before.
257	 */
258	setup_slot(qdev, &qdev->main_slot, 0, "main",
259		   (unsigned long)qdev->vram_base,
260		   (unsigned long)qdev->rom->ram_header_offset);
261	setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
262		   (unsigned long)qdev->surfaceram_base,
263		   (unsigned long)qdev->surfaceram_size);
264
265	INIT_WORK(&qdev->gc_work, qxl_gc_work);
266
267	return 0;
268
269release_ring_free:
270	qxl_ring_free(qdev->release_ring);
271cursor_ring_free:
272	qxl_ring_free(qdev->cursor_ring);
273command_ring_free:
274	qxl_ring_free(qdev->command_ring);
275ram_header_unmap:
276	iounmap(qdev->ram_header);
277bo_fini:
278	qxl_bo_fini(qdev);
279rom_unmap:
280	iounmap(qdev->rom);
281surface_mapping_free:
282	io_mapping_free(qdev->surface_mapping);
283vram_mapping_free:
284	io_mapping_free(qdev->vram_mapping);
285	return r;
286}
287
288void qxl_device_fini(struct qxl_device *qdev)
289{
290	qxl_bo_unref(&qdev->current_release_bo[0]);
291	qxl_bo_unref(&qdev->current_release_bo[1]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292	qxl_gem_fini(qdev);
293	qxl_bo_fini(qdev);
294	flush_work(&qdev->gc_work);
295	qxl_ring_free(qdev->command_ring);
296	qxl_ring_free(qdev->cursor_ring);
297	qxl_ring_free(qdev->release_ring);
298	io_mapping_free(qdev->surface_mapping);
299	io_mapping_free(qdev->vram_mapping);
300	iounmap(qdev->ram_header);
301	iounmap(qdev->rom);
302	qdev->rom = NULL;
303}
v5.14.15
  1/*
  2 * Copyright 2013 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 *          Alon Levy
 24 */
 25
 26#include <linux/io-mapping.h>
 27#include <linux/pci.h>
 28
 29#include <drm/drm_drv.h>
 30#include <drm/drm_managed.h>
 31#include <drm/drm_probe_helper.h>
 32
 33#include "qxl_drv.h"
 34#include "qxl_object.h"
 35
 36int qxl_log_level;
 37
 38static bool qxl_check_device(struct qxl_device *qdev)
 39{
 40	struct qxl_rom *rom = qdev->rom;
 41
 42	if (rom->magic != 0x4f525851) {
 43		DRM_ERROR("bad rom signature %x\n", rom->magic);
 44		return false;
 45	}
 46
 47	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
 48	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
 49		 rom->log_level);
 50	DRM_INFO("%d io pages at offset 0x%x\n",
 51		 rom->num_io_pages, rom->pages_offset);
 52	DRM_INFO("%d byte draw area at offset 0x%x\n",
 53		 rom->surface0_area_size, rom->draw_area_offset);
 54
 55	qdev->vram_size = rom->surface0_area_size;
 56	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
 57	return true;
 58}
 59
 60static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
 61{
 62	qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
 63	qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
 64	qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
 65}
 66
 67static void setup_slot(struct qxl_device *qdev,
 68		       struct qxl_memslot *slot,
 69		       unsigned int slot_index,
 70		       const char *slot_name,
 71		       unsigned long start_phys_addr,
 72		       unsigned long size)
 73{
 74	uint64_t high_bits;
 75
 76	slot->index = slot_index;
 77	slot->name = slot_name;
 78	slot->start_phys_addr = start_phys_addr;
 79	slot->size = size;
 80
 81	setup_hw_slot(qdev, slot);
 82
 83	slot->generation = qdev->rom->slot_generation;
 84	high_bits = (qdev->rom->slots_start + slot->index)
 85		<< qdev->rom->slot_gen_bits;
 86	high_bits |= slot->generation;
 87	high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
 88	slot->high_bits = high_bits;
 89
 90	DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx\n",
 91		 slot->index, slot->name,
 92		 (unsigned long)slot->start_phys_addr,
 93		 (unsigned long)slot->size);
 94}
 95
 96void qxl_reinit_memslots(struct qxl_device *qdev)
 97{
 98	setup_hw_slot(qdev, &qdev->main_slot);
 99	setup_hw_slot(qdev, &qdev->surfaces_slot);
100}
101
102static void qxl_gc_work(struct work_struct *work)
103{
104	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
105
106	qxl_garbage_collect(qdev);
107}
108
109int qxl_device_init(struct qxl_device *qdev,
110		    struct pci_dev *pdev)
111{
112	int r, sb;
113
 
114	pci_set_drvdata(pdev, &qdev->ddev);
115
116	mutex_init(&qdev->gem.mutex);
117	mutex_init(&qdev->update_area_mutex);
118	mutex_init(&qdev->release_mutex);
119	mutex_init(&qdev->surf_evict_mutex);
120	qxl_gem_init(qdev);
121
122	qdev->rom_base = pci_resource_start(pdev, 2);
123	qdev->rom_size = pci_resource_len(pdev, 2);
124	qdev->vram_base = pci_resource_start(pdev, 0);
125	qdev->io_base = pci_resource_start(pdev, 3);
126
127	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
128	if (!qdev->vram_mapping) {
129		pr_err("Unable to create vram_mapping");
130		return -ENOMEM;
131	}
132
133	if (pci_resource_len(pdev, 4) > 0) {
134		/* 64bit surface bar present */
135		sb = 4;
136		qdev->surfaceram_base = pci_resource_start(pdev, sb);
137		qdev->surfaceram_size = pci_resource_len(pdev, sb);
138		qdev->surface_mapping =
139			io_mapping_create_wc(qdev->surfaceram_base,
140					     qdev->surfaceram_size);
141	}
142	if (qdev->surface_mapping == NULL) {
143		/* 64bit surface bar not present (or mapping failed) */
144		sb = 1;
145		qdev->surfaceram_base = pci_resource_start(pdev, sb);
146		qdev->surfaceram_size = pci_resource_len(pdev, sb);
147		qdev->surface_mapping =
148			io_mapping_create_wc(qdev->surfaceram_base,
149					     qdev->surfaceram_size);
150		if (!qdev->surface_mapping) {
151			pr_err("Unable to create surface_mapping");
152			r = -ENOMEM;
153			goto vram_mapping_free;
154		}
155	}
156
157	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
158		 (unsigned long long)qdev->vram_base,
159		 (unsigned long long)pci_resource_end(pdev, 0),
160		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
161		 (int)pci_resource_len(pdev, 0) / 1024,
162		 (unsigned long long)qdev->surfaceram_base,
163		 (unsigned long long)pci_resource_end(pdev, sb),
164		 (int)qdev->surfaceram_size / 1024 / 1024,
165		 (int)qdev->surfaceram_size / 1024,
166		 (sb == 4) ? "64bit" : "32bit");
167
168	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
169	if (!qdev->rom) {
170		pr_err("Unable to ioremap ROM\n");
171		r = -ENOMEM;
172		goto surface_mapping_free;
173	}
174
175	if (!qxl_check_device(qdev)) {
176		r = -ENODEV;
177		goto rom_unmap;
178	}
179
180	r = qxl_bo_init(qdev);
181	if (r) {
182		DRM_ERROR("bo init failed %d\n", r);
183		goto rom_unmap;
184	}
185
186	qdev->ram_header = ioremap(qdev->vram_base +
187				   qdev->rom->ram_header_offset,
188				   sizeof(*qdev->ram_header));
189	if (!qdev->ram_header) {
190		DRM_ERROR("Unable to ioremap RAM header\n");
191		r = -ENOMEM;
192		goto bo_fini;
193	}
194
195	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
196					     sizeof(struct qxl_command),
197					     QXL_COMMAND_RING_SIZE,
198					     qdev->io_base + QXL_IO_NOTIFY_CMD,
199					     false,
200					     &qdev->display_event);
201	if (!qdev->command_ring) {
202		DRM_ERROR("Unable to create command ring\n");
203		r = -ENOMEM;
204		goto ram_header_unmap;
205	}
206
207	qdev->cursor_ring = qxl_ring_create(
208				&(qdev->ram_header->cursor_ring_hdr),
209				sizeof(struct qxl_command),
210				QXL_CURSOR_RING_SIZE,
211				qdev->io_base + QXL_IO_NOTIFY_CURSOR,
212				false,
213				&qdev->cursor_event);
214
215	if (!qdev->cursor_ring) {
216		DRM_ERROR("Unable to create cursor ring\n");
217		r = -ENOMEM;
218		goto command_ring_free;
219	}
220
221	qdev->release_ring = qxl_ring_create(
222				&(qdev->ram_header->release_ring_hdr),
223				sizeof(uint64_t),
224				QXL_RELEASE_RING_SIZE, 0, true,
225				NULL);
226
227	if (!qdev->release_ring) {
228		DRM_ERROR("Unable to create release ring\n");
229		r = -ENOMEM;
230		goto cursor_ring_free;
231	}
232
233	idr_init_base(&qdev->release_idr, 1);
234	spin_lock_init(&qdev->release_idr_lock);
235	spin_lock_init(&qdev->release_lock);
236
237	idr_init_base(&qdev->surf_id_idr, 1);
238	spin_lock_init(&qdev->surf_id_idr_lock);
239
240	mutex_init(&qdev->async_io_mutex);
241
242	/* reset the device into a known state - no memslots, no primary
243	 * created, no surfaces. */
244	qxl_io_reset(qdev);
245
246	/* must initialize irq before first async io - slot creation */
247	r = qxl_irq_init(qdev);
248	if (r) {
249		DRM_ERROR("Unable to init qxl irq\n");
250		goto release_ring_free;
251	}
252
253	/*
254	 * Note that virtual is surface0. We rely on the single ioremap done
255	 * before.
256	 */
257	setup_slot(qdev, &qdev->main_slot, 0, "main",
258		   (unsigned long)qdev->vram_base,
259		   (unsigned long)qdev->rom->ram_header_offset);
260	setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
261		   (unsigned long)qdev->surfaceram_base,
262		   (unsigned long)qdev->surfaceram_size);
263
264	INIT_WORK(&qdev->gc_work, qxl_gc_work);
265
266	return 0;
267
268release_ring_free:
269	qxl_ring_free(qdev->release_ring);
270cursor_ring_free:
271	qxl_ring_free(qdev->cursor_ring);
272command_ring_free:
273	qxl_ring_free(qdev->command_ring);
274ram_header_unmap:
275	iounmap(qdev->ram_header);
276bo_fini:
277	qxl_bo_fini(qdev);
278rom_unmap:
279	iounmap(qdev->rom);
280surface_mapping_free:
281	io_mapping_free(qdev->surface_mapping);
282vram_mapping_free:
283	io_mapping_free(qdev->vram_mapping);
284	return r;
285}
286
287void qxl_device_fini(struct qxl_device *qdev)
288{
289	int cur_idx;
290
291	/* check if qxl_device_init() was successful (gc_work is initialized last) */
292	if (!qdev->gc_work.func)
293		return;
294
295	for (cur_idx = 0; cur_idx < 3; cur_idx++) {
296		if (!qdev->current_release_bo[cur_idx])
297			continue;
298		qxl_bo_unpin(qdev->current_release_bo[cur_idx]);
299		qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
300		qdev->current_release_bo_offset[cur_idx] = 0;
301		qdev->current_release_bo[cur_idx] = NULL;
302	}
303
304	/*
305	 * Ask host to release resources (+fill release ring),
306	 * then wait for the release actually happening.
307	 */
308	qxl_io_notify_oom(qdev);
309	wait_event_timeout(qdev->release_event,
310			   atomic_read(&qdev->release_count) == 0,
311			   HZ);
312	flush_work(&qdev->gc_work);
313	qxl_surf_evict(qdev);
314	qxl_vram_evict(qdev);
315
316	qxl_gem_fini(qdev);
317	qxl_bo_fini(qdev);
 
318	qxl_ring_free(qdev->command_ring);
319	qxl_ring_free(qdev->cursor_ring);
320	qxl_ring_free(qdev->release_ring);
321	io_mapping_free(qdev->surface_mapping);
322	io_mapping_free(qdev->vram_mapping);
323	iounmap(qdev->ram_header);
324	iounmap(qdev->rom);
325	qdev->rom = NULL;
326}