Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright 2013 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 *          Alon Levy
 24 */
 25
 
 
 
 
 
 
 26#include "qxl_drv.h"
 27#include "qxl_object.h"
 28
 29#include <drm/drm_crtc_helper.h>
 30#include <linux/io-mapping.h>
 31
 32int qxl_log_level;
 33
 34static void qxl_dump_mode(struct qxl_device *qdev, void *p)
 35{
 36	struct qxl_mode *m = p;
 37	DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
 38		      m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
 39		      m->y_mili, m->orientation);
 40}
 41
 42static bool qxl_check_device(struct qxl_device *qdev)
 43{
 44	struct qxl_rom *rom = qdev->rom;
 45	int mode_offset;
 46	int i;
 47
 48	if (rom->magic != 0x4f525851) {
 49		DRM_ERROR("bad rom signature %x\n", rom->magic);
 50		return false;
 51	}
 52
 53	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
 54	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
 55		 rom->log_level);
 56	DRM_INFO("Currently using mode #%d, list at 0x%x\n",
 57		 rom->mode, rom->modes_offset);
 58	DRM_INFO("%d io pages at offset 0x%x\n",
 59		 rom->num_io_pages, rom->pages_offset);
 60	DRM_INFO("%d byte draw area at offset 0x%x\n",
 61		 rom->surface0_area_size, rom->draw_area_offset);
 62
 63	qdev->vram_size = rom->surface0_area_size;
 64	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
 65
 66	mode_offset = rom->modes_offset / 4;
 67	qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
 68	DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
 69		 qdev->mode_info.num_modes);
 70	qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
 71	for (i = 0; i < qdev->mode_info.num_modes; i++)
 72		qxl_dump_mode(qdev, qdev->mode_info.modes + i);
 73	return true;
 74}
 75
 76static void setup_hw_slot(struct qxl_device *qdev, int slot_index,
 77			  struct qxl_memslot *slot)
 78{
 79	qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
 80	qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr;
 81	qxl_io_memslot_add(qdev, slot_index);
 82}
 83
 84static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
 85	unsigned long start_phys_addr, unsigned long end_phys_addr)
 
 
 
 
 86{
 87	uint64_t high_bits;
 88	struct qxl_memslot *slot;
 89	uint8_t slot_index;
 90
 91	slot_index = qdev->rom->slots_start + slot_index_offset;
 92	slot = &qdev->mem_slots[slot_index];
 93	slot->start_phys_addr = start_phys_addr;
 94	slot->end_phys_addr = end_phys_addr;
 95
 96	setup_hw_slot(qdev, slot_index, slot);
 97
 98	slot->generation = qdev->rom->slot_generation;
 99	high_bits = slot_index << qdev->slot_gen_bits;
 
100	high_bits |= slot->generation;
101	high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
102	slot->high_bits = high_bits;
103	return slot_index;
 
 
 
 
 
104}
105
106void qxl_reinit_memslots(struct qxl_device *qdev)
107{
108	setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]);
109	setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]);
110}
111
112static void qxl_gc_work(struct work_struct *work)
113{
114	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
 
115	qxl_garbage_collect(qdev);
116}
117
118static int qxl_device_init(struct qxl_device *qdev,
119		    struct drm_device *ddev,
120		    struct pci_dev *pdev,
121		    unsigned long flags)
122{
123	int r, sb;
124
125	qdev->dev = &pdev->dev;
126	qdev->ddev = ddev;
127	qdev->pdev = pdev;
128	qdev->flags = flags;
 
 
 
 
 
129
130	mutex_init(&qdev->gem.mutex);
131	mutex_init(&qdev->update_area_mutex);
132	mutex_init(&qdev->release_mutex);
133	mutex_init(&qdev->surf_evict_mutex);
134	INIT_LIST_HEAD(&qdev->gem.objects);
135
136	qdev->rom_base = pci_resource_start(pdev, 2);
137	qdev->rom_size = pci_resource_len(pdev, 2);
138	qdev->vram_base = pci_resource_start(pdev, 0);
139	qdev->io_base = pci_resource_start(pdev, 3);
140
141	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
 
 
 
 
 
142
143	if (pci_resource_len(pdev, 4) > 0) {
144		/* 64bit surface bar present */
145		sb = 4;
146		qdev->surfaceram_base = pci_resource_start(pdev, sb);
147		qdev->surfaceram_size = pci_resource_len(pdev, sb);
148		qdev->surface_mapping =
149			io_mapping_create_wc(qdev->surfaceram_base,
150					     qdev->surfaceram_size);
151	}
152	if (qdev->surface_mapping == NULL) {
153		/* 64bit surface bar not present (or mapping failed) */
154		sb = 1;
155		qdev->surfaceram_base = pci_resource_start(pdev, sb);
156		qdev->surfaceram_size = pci_resource_len(pdev, sb);
157		qdev->surface_mapping =
158			io_mapping_create_wc(qdev->surfaceram_base,
159					     qdev->surfaceram_size);
 
 
 
 
 
160	}
161
162	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
163		 (unsigned long long)qdev->vram_base,
164		 (unsigned long long)pci_resource_end(pdev, 0),
165		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
166		 (int)pci_resource_len(pdev, 0) / 1024,
167		 (unsigned long long)qdev->surfaceram_base,
168		 (unsigned long long)pci_resource_end(pdev, sb),
169		 (int)qdev->surfaceram_size / 1024 / 1024,
170		 (int)qdev->surfaceram_size / 1024,
171		 (sb == 4) ? "64bit" : "32bit");
172
173	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
174	if (!qdev->rom) {
175		pr_err("Unable to ioremap ROM\n");
176		return -ENOMEM;
 
177	}
178
179	qxl_check_device(qdev);
 
 
 
180
181	r = qxl_bo_init(qdev);
182	if (r) {
183		DRM_ERROR("bo init failed %d\n", r);
184		return r;
185	}
186
187	qdev->ram_header = ioremap(qdev->vram_base +
188				   qdev->rom->ram_header_offset,
189				   sizeof(*qdev->ram_header));
 
 
 
 
 
190
191	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
192					     sizeof(struct qxl_command),
193					     QXL_COMMAND_RING_SIZE,
194					     qdev->io_base + QXL_IO_NOTIFY_CMD,
195					     false,
196					     &qdev->display_event);
 
 
 
 
 
197
198	qdev->cursor_ring = qxl_ring_create(
199				&(qdev->ram_header->cursor_ring_hdr),
200				sizeof(struct qxl_command),
201				QXL_CURSOR_RING_SIZE,
202				qdev->io_base + QXL_IO_NOTIFY_CMD,
203				false,
204				&qdev->cursor_event);
205
 
 
 
 
 
 
206	qdev->release_ring = qxl_ring_create(
207				&(qdev->ram_header->release_ring_hdr),
208				sizeof(uint64_t),
209				QXL_RELEASE_RING_SIZE, 0, true,
210				NULL);
211
212	/* TODO - slot initialization should happen on reset. where is our
213	 * reset handler? */
214	qdev->n_mem_slots = qdev->rom->slots_end;
215	qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
216	qdev->slot_id_bits = qdev->rom->slot_id_bits;
217	qdev->va_slot_mask =
218		(~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
219
220	qdev->mem_slots =
221		kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
222			GFP_KERNEL);
223
224	idr_init(&qdev->release_idr);
225	spin_lock_init(&qdev->release_idr_lock);
226	spin_lock_init(&qdev->release_lock);
227
228	idr_init(&qdev->surf_id_idr);
229	spin_lock_init(&qdev->surf_id_idr_lock);
230
231	mutex_init(&qdev->async_io_mutex);
232
233	/* reset the device into a known state - no memslots, no primary
234	 * created, no surfaces. */
235	qxl_io_reset(qdev);
236
237	/* must initialize irq before first async io - slot creation */
238	r = qxl_irq_init(qdev);
239	if (r)
240		return r;
 
 
241
242	/*
243	 * Note that virtual is surface0. We rely on the single ioremap done
244	 * before.
245	 */
246	qdev->main_mem_slot = setup_slot(qdev, 0,
247		(unsigned long)qdev->vram_base,
248		(unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
249	qdev->surfaces_mem_slot = setup_slot(qdev, 1,
250		(unsigned long)qdev->surfaceram_base,
251		(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
252	DRM_INFO("main mem slot %d [%lx,%x]\n",
253		 qdev->main_mem_slot,
254		 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
255	DRM_INFO("surface mem slot %d [%lx,%lx]\n",
256		 qdev->surfaces_mem_slot,
257		 (unsigned long)qdev->surfaceram_base,
258		 (unsigned long)qdev->surfaceram_size);
259
260
261	qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
262	INIT_WORK(&qdev->gc_work, qxl_gc_work);
263
264	r = qxl_fb_init(qdev);
265	if (r)
266		return r;
267
268	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269}
270
271static void qxl_device_fini(struct qxl_device *qdev)
272{
273	if (qdev->current_release_bo[0])
274		qxl_bo_unref(&qdev->current_release_bo[0]);
275	if (qdev->current_release_bo[1])
276		qxl_bo_unref(&qdev->current_release_bo[1]);
277	flush_workqueue(qdev->gc_queue);
278	destroy_workqueue(qdev->gc_queue);
279	qdev->gc_queue = NULL;
280
281	qxl_ring_free(qdev->command_ring);
282	qxl_ring_free(qdev->cursor_ring);
283	qxl_ring_free(qdev->release_ring);
 
284	qxl_bo_fini(qdev);
285	io_mapping_free(qdev->surface_mapping);
286	io_mapping_free(qdev->vram_mapping);
287	iounmap(qdev->ram_header);
288	iounmap(qdev->rom);
289	qdev->rom = NULL;
290	qdev->mode_info.modes = NULL;
291	qdev->mode_info.num_modes = 0;
292	qxl_debugfs_remove_files(qdev);
293}
294
295int qxl_driver_unload(struct drm_device *dev)
296{
297	struct qxl_device *qdev = dev->dev_private;
298
299	if (qdev == NULL)
300		return 0;
301
302	drm_vblank_cleanup(dev);
303
304	qxl_modeset_fini(qdev);
305	qxl_device_fini(qdev);
306
307	kfree(qdev);
308	dev->dev_private = NULL;
309	return 0;
310}
311
312int qxl_driver_load(struct drm_device *dev, unsigned long flags)
313{
314	struct qxl_device *qdev;
315	int r;
316
317	/* require kms */
318	if (!drm_core_check_feature(dev, DRIVER_MODESET))
319		return -ENODEV;
320
321	qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
322	if (qdev == NULL)
323		return -ENOMEM;
324
325	dev->dev_private = qdev;
326
327	r = qxl_device_init(qdev, dev, dev->pdev, flags);
328	if (r)
329		goto out;
330
331	r = drm_vblank_init(dev, 1);
332	if (r)
333		goto unload;
334
335	r = qxl_modeset_init(qdev);
336	if (r)
337		goto unload;
338
339	drm_kms_helper_poll_init(qdev->ddev);
340
341	return 0;
342unload:
343	qxl_driver_unload(dev);
344
345out:
346	kfree(qdev);
347	return r;
348}
349
350
v5.4
  1/*
  2 * Copyright 2013 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 *          Alon Levy
 24 */
 25
 26#include <linux/io-mapping.h>
 27#include <linux/pci.h>
 28
 29#include <drm/drm_drv.h>
 30#include <drm/drm_probe_helper.h>
 31
 32#include "qxl_drv.h"
 33#include "qxl_object.h"
 34
 
 
 
 35int qxl_log_level;
 36
 
 
 
 
 
 
 
 
 37static bool qxl_check_device(struct qxl_device *qdev)
 38{
 39	struct qxl_rom *rom = qdev->rom;
 
 
 40
 41	if (rom->magic != 0x4f525851) {
 42		DRM_ERROR("bad rom signature %x\n", rom->magic);
 43		return false;
 44	}
 45
 46	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
 47	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
 48		 rom->log_level);
 
 
 49	DRM_INFO("%d io pages at offset 0x%x\n",
 50		 rom->num_io_pages, rom->pages_offset);
 51	DRM_INFO("%d byte draw area at offset 0x%x\n",
 52		 rom->surface0_area_size, rom->draw_area_offset);
 53
 54	qdev->vram_size = rom->surface0_area_size;
 55	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
 
 
 
 
 
 
 
 
 56	return true;
 57}
 58
 59static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
 
 60{
 61	qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
 62	qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
 63	qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
 64}
 65
 66static void setup_slot(struct qxl_device *qdev,
 67		       struct qxl_memslot *slot,
 68		       unsigned int slot_index,
 69		       const char *slot_name,
 70		       unsigned long start_phys_addr,
 71		       unsigned long size)
 72{
 73	uint64_t high_bits;
 
 
 74
 75	slot->index = slot_index;
 76	slot->name = slot_name;
 77	slot->start_phys_addr = start_phys_addr;
 78	slot->size = size;
 79
 80	setup_hw_slot(qdev, slot);
 81
 82	slot->generation = qdev->rom->slot_generation;
 83	high_bits = (qdev->rom->slots_start + slot->index)
 84		<< qdev->rom->slot_gen_bits;
 85	high_bits |= slot->generation;
 86	high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
 87	slot->high_bits = high_bits;
 88
 89	DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx, gpu_offset 0x%lx\n",
 90		 slot->index, slot->name,
 91		 (unsigned long)slot->start_phys_addr,
 92		 (unsigned long)slot->size,
 93		 (unsigned long)slot->gpu_offset);
 94}
 95
 96void qxl_reinit_memslots(struct qxl_device *qdev)
 97{
 98	setup_hw_slot(qdev, &qdev->main_slot);
 99	setup_hw_slot(qdev, &qdev->surfaces_slot);
100}
101
102static void qxl_gc_work(struct work_struct *work)
103{
104	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
105
106	qxl_garbage_collect(qdev);
107}
108
109int qxl_device_init(struct qxl_device *qdev,
110		    struct drm_driver *drv,
111		    struct pci_dev *pdev)
 
112{
113	int r, sb;
114
115	r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
116	if (r) {
117		pr_err("Unable to init drm dev");
118		goto error;
119	}
120
121	qdev->ddev.pdev = pdev;
122	pci_set_drvdata(pdev, &qdev->ddev);
123	qdev->ddev.dev_private = qdev;
124
125	mutex_init(&qdev->gem.mutex);
126	mutex_init(&qdev->update_area_mutex);
127	mutex_init(&qdev->release_mutex);
128	mutex_init(&qdev->surf_evict_mutex);
129	qxl_gem_init(qdev);
130
131	qdev->rom_base = pci_resource_start(pdev, 2);
132	qdev->rom_size = pci_resource_len(pdev, 2);
133	qdev->vram_base = pci_resource_start(pdev, 0);
134	qdev->io_base = pci_resource_start(pdev, 3);
135
136	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
137	if (!qdev->vram_mapping) {
138		pr_err("Unable to create vram_mapping");
139		r = -ENOMEM;
140		goto error;
141	}
142
143	if (pci_resource_len(pdev, 4) > 0) {
144		/* 64bit surface bar present */
145		sb = 4;
146		qdev->surfaceram_base = pci_resource_start(pdev, sb);
147		qdev->surfaceram_size = pci_resource_len(pdev, sb);
148		qdev->surface_mapping =
149			io_mapping_create_wc(qdev->surfaceram_base,
150					     qdev->surfaceram_size);
151	}
152	if (qdev->surface_mapping == NULL) {
153		/* 64bit surface bar not present (or mapping failed) */
154		sb = 1;
155		qdev->surfaceram_base = pci_resource_start(pdev, sb);
156		qdev->surfaceram_size = pci_resource_len(pdev, sb);
157		qdev->surface_mapping =
158			io_mapping_create_wc(qdev->surfaceram_base,
159					     qdev->surfaceram_size);
160		if (!qdev->surface_mapping) {
161			pr_err("Unable to create surface_mapping");
162			r = -ENOMEM;
163			goto vram_mapping_free;
164		}
165	}
166
167	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
168		 (unsigned long long)qdev->vram_base,
169		 (unsigned long long)pci_resource_end(pdev, 0),
170		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
171		 (int)pci_resource_len(pdev, 0) / 1024,
172		 (unsigned long long)qdev->surfaceram_base,
173		 (unsigned long long)pci_resource_end(pdev, sb),
174		 (int)qdev->surfaceram_size / 1024 / 1024,
175		 (int)qdev->surfaceram_size / 1024,
176		 (sb == 4) ? "64bit" : "32bit");
177
178	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
179	if (!qdev->rom) {
180		pr_err("Unable to ioremap ROM\n");
181		r = -ENOMEM;
182		goto surface_mapping_free;
183	}
184
185	if (!qxl_check_device(qdev)) {
186		r = -ENODEV;
187		goto surface_mapping_free;
188	}
189
190	r = qxl_bo_init(qdev);
191	if (r) {
192		DRM_ERROR("bo init failed %d\n", r);
193		goto rom_unmap;
194	}
195
196	qdev->ram_header = ioremap(qdev->vram_base +
197				   qdev->rom->ram_header_offset,
198				   sizeof(*qdev->ram_header));
199	if (!qdev->ram_header) {
200		DRM_ERROR("Unable to ioremap RAM header\n");
201		r = -ENOMEM;
202		goto bo_fini;
203	}
204
205	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
206					     sizeof(struct qxl_command),
207					     QXL_COMMAND_RING_SIZE,
208					     qdev->io_base + QXL_IO_NOTIFY_CMD,
209					     false,
210					     &qdev->display_event);
211	if (!qdev->command_ring) {
212		DRM_ERROR("Unable to create command ring\n");
213		r = -ENOMEM;
214		goto ram_header_unmap;
215	}
216
217	qdev->cursor_ring = qxl_ring_create(
218				&(qdev->ram_header->cursor_ring_hdr),
219				sizeof(struct qxl_command),
220				QXL_CURSOR_RING_SIZE,
221				qdev->io_base + QXL_IO_NOTIFY_CMD,
222				false,
223				&qdev->cursor_event);
224
225	if (!qdev->cursor_ring) {
226		DRM_ERROR("Unable to create cursor ring\n");
227		r = -ENOMEM;
228		goto command_ring_free;
229	}
230
231	qdev->release_ring = qxl_ring_create(
232				&(qdev->ram_header->release_ring_hdr),
233				sizeof(uint64_t),
234				QXL_RELEASE_RING_SIZE, 0, true,
235				NULL);
236
237	if (!qdev->release_ring) {
238		DRM_ERROR("Unable to create release ring\n");
239		r = -ENOMEM;
240		goto cursor_ring_free;
241	}
 
 
 
 
 
 
242
243	idr_init(&qdev->release_idr);
244	spin_lock_init(&qdev->release_idr_lock);
245	spin_lock_init(&qdev->release_lock);
246
247	idr_init(&qdev->surf_id_idr);
248	spin_lock_init(&qdev->surf_id_idr_lock);
249
250	mutex_init(&qdev->async_io_mutex);
251
252	/* reset the device into a known state - no memslots, no primary
253	 * created, no surfaces. */
254	qxl_io_reset(qdev);
255
256	/* must initialize irq before first async io - slot creation */
257	r = qxl_irq_init(qdev);
258	if (r) {
259		DRM_ERROR("Unable to init qxl irq\n");
260		goto release_ring_free;
261	}
262
263	/*
264	 * Note that virtual is surface0. We rely on the single ioremap done
265	 * before.
266	 */
267	setup_slot(qdev, &qdev->main_slot, 0, "main",
268		   (unsigned long)qdev->vram_base,
269		   (unsigned long)qdev->rom->ram_header_offset);
270	setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
271		   (unsigned long)qdev->surfaceram_base,
272		   (unsigned long)qdev->surfaceram_size);
 
 
 
 
 
 
 
273
 
 
274	INIT_WORK(&qdev->gc_work, qxl_gc_work);
275
 
 
 
 
276	return 0;
277
278release_ring_free:
279	qxl_ring_free(qdev->release_ring);
280cursor_ring_free:
281	qxl_ring_free(qdev->cursor_ring);
282command_ring_free:
283	qxl_ring_free(qdev->command_ring);
284ram_header_unmap:
285	iounmap(qdev->ram_header);
286bo_fini:
287	qxl_bo_fini(qdev);
288rom_unmap:
289	iounmap(qdev->rom);
290surface_mapping_free:
291	io_mapping_free(qdev->surface_mapping);
292vram_mapping_free:
293	io_mapping_free(qdev->vram_mapping);
294error:
295	return r;
296}
297
298void qxl_device_fini(struct qxl_device *qdev)
299{
300	qxl_bo_unref(&qdev->current_release_bo[0]);
301	qxl_bo_unref(&qdev->current_release_bo[1]);
302	flush_work(&qdev->gc_work);
 
 
 
 
 
303	qxl_ring_free(qdev->command_ring);
304	qxl_ring_free(qdev->cursor_ring);
305	qxl_ring_free(qdev->release_ring);
306	qxl_gem_fini(qdev);
307	qxl_bo_fini(qdev);
308	io_mapping_free(qdev->surface_mapping);
309	io_mapping_free(qdev->vram_mapping);
310	iounmap(qdev->ram_header);
311	iounmap(qdev->rom);
312	qdev->rom = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313}