Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright 2008 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 22 * DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors:
 25 *    Jerome Glisse <glisse@freedesktop.org>
 26 */
 27#include "drmP.h"
 28#include "radeon_drm.h"
 29#include "radeon_reg.h"
 
 
 
 
 
 
 30#include "radeon.h"
 
 
 
 
 
 31
 32void r100_cs_dump_packet(struct radeon_cs_parser *p,
 33			 struct radeon_cs_packet *pkt);
 
 
 
 
 
 34
 35int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36{
 37	struct drm_device *ddev = p->rdev->ddev;
 38	struct radeon_cs_chunk *chunk;
 39	unsigned i, j;
 40	bool duplicate;
 
 
 41
 42	if (p->chunk_relocs_idx == -1) {
 43		return 0;
 44	}
 45	chunk = &p->chunks[p->chunk_relocs_idx];
 
 46	/* FIXME: we assume that each relocs use 4 dwords */
 47	p->nrelocs = chunk->length_dw / 4;
 48	p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
 49	if (p->relocs_ptr == NULL) {
 50		return -ENOMEM;
 51	}
 52	p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
 53	if (p->relocs == NULL) {
 54		return -ENOMEM;
 55	}
 
 
 
 56	for (i = 0; i < p->nrelocs; i++) {
 57		struct drm_radeon_cs_reloc *r;
 
 
 58
 59		duplicate = false;
 60		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
 61		for (j = 0; j < p->nrelocs; j++) {
 62			if (r->handle == p->relocs[j].handle) {
 63				p->relocs_ptr[i] = &p->relocs[j];
 64				duplicate = true;
 65				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66			}
 
 
 
 
 
 67		}
 68		if (!duplicate) {
 69			p->relocs[i].gobj = drm_gem_object_lookup(ddev,
 70								  p->filp,
 71								  r->handle);
 72			if (p->relocs[i].gobj == NULL) {
 73				DRM_ERROR("gem object lookup failed 0x%x\n",
 74					  r->handle);
 75				return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 76			}
 77			p->relocs_ptr[i] = &p->relocs[i];
 78			p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
 79			p->relocs[i].lobj.bo = p->relocs[i].robj;
 80			p->relocs[i].lobj.wdomain = r->write_domain;
 81			p->relocs[i].lobj.rdomain = r->read_domains;
 82			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
 83			p->relocs[i].handle = r->handle;
 84			p->relocs[i].flags = r->flags;
 85			radeon_bo_list_add_object(&p->relocs[i].lobj,
 86						  &p->validated);
 87		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88	}
 89	return radeon_bo_list_validate(&p->validated);
 90}
 91
 
 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 93{
 94	struct drm_radeon_cs *cs = data;
 95	uint64_t *chunk_array_ptr;
 96	unsigned size, i;
 
 
 
 
 97
 98	if (!cs->num_chunks) {
 99		return 0;
100	}
 
101	/* get chunks */
102	INIT_LIST_HEAD(&p->validated);
103	p->idx = 0;
104	p->chunk_ib_idx = -1;
105	p->chunk_relocs_idx = -1;
106	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
 
 
 
 
107	if (p->chunks_array == NULL) {
108		return -ENOMEM;
109	}
110	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
111	if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
112			       sizeof(uint64_t)*cs->num_chunks)) {
113		return -EFAULT;
114	}
 
115	p->nchunks = cs->num_chunks;
116	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
117	if (p->chunks == NULL) {
118		return -ENOMEM;
119	}
120	for (i = 0; i < p->nchunks; i++) {
121		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
122		struct drm_radeon_cs_chunk user_chunk;
123		uint32_t __user *cdata;
124
125		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
126		if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
127				       sizeof(struct drm_radeon_cs_chunk))) {
128			return -EFAULT;
129		}
130		p->chunks[i].length_dw = user_chunk.length_dw;
131		p->chunks[i].kdata = NULL;
132		p->chunks[i].chunk_id = user_chunk.chunk_id;
133
134		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
135			p->chunk_relocs_idx = i;
136		}
137		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
138			p->chunk_ib_idx = i;
139			/* zero length IB isn't useful */
140			if (p->chunks[i].length_dw == 0)
141				return -EINVAL;
142		}
 
 
 
 
 
 
 
 
 
 
 
 
143
144		p->chunks[i].length_dw = user_chunk.length_dw;
145		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
 
 
 
 
 
 
 
 
146
147		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
148		if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
149			size = p->chunks[i].length_dw * sizeof(uint32_t);
150			p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
151			if (p->chunks[i].kdata == NULL) {
152				return -ENOMEM;
153			}
154			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
155					       p->chunks[i].user_ptr, size)) {
156				return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157			}
158		} else {
159			p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160			p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
161			if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
162				kfree(p->chunks[i].kpage[0]);
163				kfree(p->chunks[i].kpage[1]);
164				return -ENOMEM;
165			}
166			p->chunks[i].kpage_idx[0] = -1;
167			p->chunks[i].kpage_idx[1] = -1;
168			p->chunks[i].last_copied_page = -1;
169			p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
170		}
171	}
172	if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
173		DRM_ERROR("cs IB too big: %d\n",
174			  p->chunks[p->chunk_ib_idx].length_dw);
175		return -EINVAL;
176	}
 
 
 
 
 
 
 
 
 
 
177	return 0;
178}
179
180/**
181 * cs_parser_fini() - clean parser states
182 * @parser:	parser structure holding parsing context.
183 * @error:	error number
 
184 *
185 * If error is set than unvalidate buffer, otherwise just free memory
186 * used by parsing context.
187 **/
188static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{
190	unsigned i;
191
192
193	if (!error && parser->ib)
194		ttm_eu_fence_buffer_objects(&parser->validated,
195					    parser->ib->fence);
196	else
197		ttm_eu_backoff_reservation(&parser->validated);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
199	if (parser->relocs != NULL) {
200		for (i = 0; i < parser->nrelocs; i++) {
201			if (parser->relocs[i].gobj)
202				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
 
 
 
203		}
204	}
205	kfree(parser->track);
206	kfree(parser->relocs);
207	kfree(parser->relocs_ptr);
208	for (i = 0; i < parser->nchunks; i++) {
209		kfree(parser->chunks[i].kdata);
210		kfree(parser->chunks[i].kpage[0]);
211		kfree(parser->chunks[i].kpage[1]);
212	}
213	kfree(parser->chunks);
214	kfree(parser->chunks_array);
215	radeon_ib_free(parser->rdev, &parser->ib);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216}
217
218int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
219{
220	struct radeon_device *rdev = dev->dev_private;
221	struct radeon_cs_parser parser;
222	struct radeon_cs_chunk *ib_chunk;
223	int r;
224
225	mutex_lock(&rdev->cs_mutex);
 
 
 
 
 
 
 
 
 
 
 
226	/* initialize parser */
227	memset(&parser, 0, sizeof(struct radeon_cs_parser));
228	parser.filp = filp;
229	parser.rdev = rdev;
230	parser.dev = rdev->dev;
231	parser.family = rdev->family;
232	r = radeon_cs_parser_init(&parser, data);
233	if (r) {
234		DRM_ERROR("Failed to initialize parser !\n");
235		radeon_cs_parser_fini(&parser, r);
236		mutex_unlock(&rdev->cs_mutex);
237		return r;
238	}
239	r =  radeon_ib_get(rdev, &parser.ib);
240	if (r) {
241		DRM_ERROR("Failed to get ib !\n");
242		radeon_cs_parser_fini(&parser, r);
243		mutex_unlock(&rdev->cs_mutex);
244		return r;
245	}
246	r = radeon_cs_parser_relocs(&parser);
247	if (r) {
248		if (r != -ERESTARTSYS)
 
 
249			DRM_ERROR("Failed to parse relocation %d!\n", r);
250		radeon_cs_parser_fini(&parser, r);
251		mutex_unlock(&rdev->cs_mutex);
252		return r;
253	}
254	/* Copy the packet into the IB, the parser will read from the
255	 * input memory (cached) and write to the IB (which can be
256	 * uncached). */
257	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
258	parser.ib->length_dw = ib_chunk->length_dw;
259	r = radeon_cs_parse(&parser);
260	if (r || parser.parser_error) {
261		DRM_ERROR("Invalid command stream !\n");
262		radeon_cs_parser_fini(&parser, r);
263		mutex_unlock(&rdev->cs_mutex);
264		return r;
265	}
266	r = radeon_cs_finish_pages(&parser);
 
 
 
267	if (r) {
268		DRM_ERROR("Invalid command stream !\n");
269		radeon_cs_parser_fini(&parser, r);
270		mutex_unlock(&rdev->cs_mutex);
271		return r;
272	}
273	r = radeon_ib_schedule(rdev, parser.ib);
274	if (r) {
275		DRM_ERROR("Failed to schedule IB !\n");
276	}
277	radeon_cs_parser_fini(&parser, r);
278	mutex_unlock(&rdev->cs_mutex);
 
 
279	return r;
280}
281
282int radeon_cs_finish_pages(struct radeon_cs_parser *p)
 
 
 
 
 
 
 
 
 
 
 
283{
284	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
285	int i;
286	int size = PAGE_SIZE;
287
288	for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
289		if (i == ibc->last_page_index) {
290			size = (ibc->length_dw * 4) % PAGE_SIZE;
291			if (size == 0)
292				size = PAGE_SIZE;
293		}
294		
295		if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
296				       ibc->user_ptr + (i * PAGE_SIZE),
297				       size))
298			return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299	}
300	return 0;
301}
302
303int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
304{
305	int new_page;
306	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
307	int i;
308	int size = PAGE_SIZE;
309
310	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
311		if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
312				       ibc->user_ptr + (i * PAGE_SIZE),
313				       PAGE_SIZE)) {
314			p->parser_error = -EFAULT;
315			return 0;
316		}
317	}
 
 
318
319	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
 
 
 
 
 
 
 
 
 
320
321	if (pg_idx == ibc->last_page_index) {
322		size = (ibc->length_dw * 4) % PAGE_SIZE;
323			if (size == 0)
324				size = PAGE_SIZE;
325	}
 
 
 
 
326
327	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
328			       ibc->user_ptr + (pg_idx * PAGE_SIZE),
329			       size)) {
330		p->parser_error = -EFAULT;
331		return 0;
332	}
 
 
 
 
 
 
 
333
334	/* copy to IB here */
335	memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
 
 
 
336
337	ibc->last_copied_page = pg_idx;
338	ibc->kpage_idx[new_page] = pg_idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
340	return new_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341}
v6.2
  1/*
  2 * Copyright 2008 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 22 * DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors:
 25 *    Jerome Glisse <glisse@freedesktop.org>
 26 */
 27
 28#include <linux/list_sort.h>
 29#include <linux/pci.h>
 30#include <linux/uaccess.h>
 31
 32#include <drm/drm_device.h>
 33#include <drm/drm_file.h>
 34#include <drm/radeon_drm.h>
 35
 36#include "radeon.h"
 37#include "radeon_reg.h"
 38#include "radeon_trace.h"
 39
 40#define RADEON_CS_MAX_PRIORITY		32u
 41#define RADEON_CS_NUM_BUCKETS		(RADEON_CS_MAX_PRIORITY + 1)
 42
 43/* This is based on the bucket sort with O(n) time complexity.
 44 * An item with priority "i" is added to bucket[i]. The lists are then
 45 * concatenated in descending order.
 46 */
 47struct radeon_cs_buckets {
 48	struct list_head bucket[RADEON_CS_NUM_BUCKETS];
 49};
 50
 51static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
 52{
 53	unsigned i;
 54
 55	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
 56		INIT_LIST_HEAD(&b->bucket[i]);
 57}
 58
 59static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
 60				  struct list_head *item, unsigned priority)
 61{
 62	/* Since buffers which appear sooner in the relocation list are
 63	 * likely to be used more often than buffers which appear later
 64	 * in the list, the sort mustn't change the ordering of buffers
 65	 * with the same priority, i.e. it must be stable.
 66	 */
 67	list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
 68}
 69
 70static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
 71				       struct list_head *out_list)
 72{
 73	unsigned i;
 74
 75	/* Connect the sorted buckets in the output list. */
 76	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
 77		list_splice(&b->bucket[i], out_list);
 78	}
 79}
 80
 81static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 82{
 
 83	struct radeon_cs_chunk *chunk;
 84	struct radeon_cs_buckets buckets;
 85	unsigned i;
 86	bool need_mmap_lock = false;
 87	int r;
 88
 89	if (p->chunk_relocs == NULL) {
 90		return 0;
 91	}
 92	chunk = p->chunk_relocs;
 93	p->dma_reloc_idx = 0;
 94	/* FIXME: we assume that each relocs use 4 dwords */
 95	p->nrelocs = chunk->length_dw / 4;
 96	p->relocs = kvcalloc(p->nrelocs, sizeof(struct radeon_bo_list),
 97			GFP_KERNEL);
 
 
 
 98	if (p->relocs == NULL) {
 99		return -ENOMEM;
100	}
101
102	radeon_cs_buckets_init(&buckets);
103
104	for (i = 0; i < p->nrelocs; i++) {
105		struct drm_radeon_cs_reloc *r;
106		struct drm_gem_object *gobj;
107		unsigned priority;
108
 
109		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
110		gobj = drm_gem_object_lookup(p->filp, r->handle);
111		if (gobj == NULL) {
112			DRM_ERROR("gem object lookup failed 0x%x\n",
113				  r->handle);
114			return -ENOENT;
115		}
116		p->relocs[i].robj = gem_to_radeon_bo(gobj);
117
118		/* The userspace buffer priorities are from 0 to 15. A higher
119		 * number means the buffer is more important.
120		 * Also, the buffers used for write have a higher priority than
121		 * the buffers used for read only, which doubles the range
122		 * to 0 to 31. 32 is reserved for the kernel driver.
123		 */
124		priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
125			   + !!r->write_domain;
126
127		/* The first reloc of an UVD job is the msg and that must be in
128		 * VRAM, the second reloc is the DPB and for WMV that must be in
129		 * VRAM as well. Also put everything into VRAM on AGP cards and older
130		 * IGP chips to avoid image corruptions
131		 */
132		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
133		    (i <= 0 || pci_find_capability(p->rdev->pdev, PCI_CAP_ID_AGP) ||
134		     p->rdev->family == CHIP_RS780 ||
135		     p->rdev->family == CHIP_RS880)) {
136
137			/* TODO: is this still needed for NI+ ? */
138			p->relocs[i].preferred_domains =
139				RADEON_GEM_DOMAIN_VRAM;
140
141			p->relocs[i].allowed_domains =
142				RADEON_GEM_DOMAIN_VRAM;
143
144			/* prioritize this over any other relocation */
145			priority = RADEON_CS_MAX_PRIORITY;
146		} else {
147			uint32_t domain = r->write_domain ?
148				r->write_domain : r->read_domains;
149
150			if (domain & RADEON_GEM_DOMAIN_CPU) {
151				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
152					  "for command submission\n");
153				return -EINVAL;
154			}
155
156			p->relocs[i].preferred_domains = domain;
157			if (domain == RADEON_GEM_DOMAIN_VRAM)
158				domain |= RADEON_GEM_DOMAIN_GTT;
159			p->relocs[i].allowed_domains = domain;
160		}
161
162		if (radeon_ttm_tt_has_userptr(p->rdev, p->relocs[i].robj->tbo.ttm)) {
163			uint32_t domain = p->relocs[i].preferred_domains;
164			if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
165				DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
166					  "allowed for userptr BOs\n");
167				return -EINVAL;
168			}
169			need_mmap_lock = true;
170			domain = RADEON_GEM_DOMAIN_GTT;
171			p->relocs[i].preferred_domains = domain;
172			p->relocs[i].allowed_domains = domain;
173		}
174
175		/* Objects shared as dma-bufs cannot be moved to VRAM */
176		if (p->relocs[i].robj->prime_shared_count) {
177			p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
178			if (!p->relocs[i].allowed_domains) {
179				DRM_ERROR("BO associated with dma-buf cannot "
180					  "be moved to VRAM\n");
181				return -EINVAL;
182			}
 
 
 
 
 
 
 
 
 
 
183		}
184
185		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186		p->relocs[i].tv.num_shared = !r->write_domain;
187
188		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
189				      priority);
190	}
191
192	radeon_cs_buckets_get_list(&buckets, &p->validated);
193
194	if (p->cs_flags & RADEON_CS_USE_VM)
195		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
196					      &p->validated);
197	if (need_mmap_lock)
198		mmap_read_lock(current->mm);
199
200	r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
201
202	if (need_mmap_lock)
203		mmap_read_unlock(current->mm);
204
205	return r;
206}
207
208static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
209{
210	p->priority = priority;
211
212	switch (ring) {
213	default:
214		DRM_ERROR("unknown ring id: %d\n", ring);
215		return -EINVAL;
216	case RADEON_CS_RING_GFX:
217		p->ring = RADEON_RING_TYPE_GFX_INDEX;
218		break;
219	case RADEON_CS_RING_COMPUTE:
220		if (p->rdev->family >= CHIP_TAHITI) {
221			if (p->priority > 0)
222				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
223			else
224				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
225		} else
226			p->ring = RADEON_RING_TYPE_GFX_INDEX;
227		break;
228	case RADEON_CS_RING_DMA:
229		if (p->rdev->family >= CHIP_CAYMAN) {
230			if (p->priority > 0)
231				p->ring = R600_RING_TYPE_DMA_INDEX;
232			else
233				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
234		} else if (p->rdev->family >= CHIP_RV770) {
235			p->ring = R600_RING_TYPE_DMA_INDEX;
236		} else {
237			return -EINVAL;
238		}
239		break;
240	case RADEON_CS_RING_UVD:
241		p->ring = R600_RING_TYPE_UVD_INDEX;
242		break;
243	case RADEON_CS_RING_VCE:
244		/* TODO: only use the low priority ring for now */
245		p->ring = TN_RING_TYPE_VCE1_INDEX;
246		break;
247	}
248	return 0;
249}
250
251static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
252{
253	struct radeon_bo_list *reloc;
254	int r;
255
256	list_for_each_entry(reloc, &p->validated, tv.head) {
257		struct dma_resv *resv;
258
259		resv = reloc->robj->tbo.base.resv;
260		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
261				     reloc->tv.num_shared);
262		if (r)
263			return r;
264	}
265	return 0;
266}
267
268/* XXX: note that this is called from the legacy UMS CS ioctl as well */
269int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
270{
271	struct drm_radeon_cs *cs = data;
272	uint64_t *chunk_array_ptr;
273	unsigned size, i;
274	u32 ring = RADEON_CS_RING_GFX;
275	s32 priority = 0;
276
277	INIT_LIST_HEAD(&p->validated);
278
279	if (!cs->num_chunks) {
280		return 0;
281	}
282
283	/* get chunks */
 
284	p->idx = 0;
285	p->ib.sa_bo = NULL;
286	p->const_ib.sa_bo = NULL;
287	p->chunk_ib = NULL;
288	p->chunk_relocs = NULL;
289	p->chunk_flags = NULL;
290	p->chunk_const_ib = NULL;
291	p->chunks_array = kvmalloc_array(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
292	if (p->chunks_array == NULL) {
293		return -ENOMEM;
294	}
295	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
296	if (copy_from_user(p->chunks_array, chunk_array_ptr,
297			       sizeof(uint64_t)*cs->num_chunks)) {
298		return -EFAULT;
299	}
300	p->cs_flags = 0;
301	p->nchunks = cs->num_chunks;
302	p->chunks = kvcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
303	if (p->chunks == NULL) {
304		return -ENOMEM;
305	}
306	for (i = 0; i < p->nchunks; i++) {
307		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
308		struct drm_radeon_cs_chunk user_chunk;
309		uint32_t __user *cdata;
310
311		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
312		if (copy_from_user(&user_chunk, chunk_ptr,
313				       sizeof(struct drm_radeon_cs_chunk))) {
314			return -EFAULT;
315		}
316		p->chunks[i].length_dw = user_chunk.length_dw;
317		if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
318			p->chunk_relocs = &p->chunks[i];
 
 
 
319		}
320		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
321			p->chunk_ib = &p->chunks[i];
322			/* zero length IB isn't useful */
323			if (p->chunks[i].length_dw == 0)
324				return -EINVAL;
325		}
326		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
327			p->chunk_const_ib = &p->chunks[i];
328			/* zero length CONST IB isn't useful */
329			if (p->chunks[i].length_dw == 0)
330				return -EINVAL;
331		}
332		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
333			p->chunk_flags = &p->chunks[i];
334			/* zero length flags aren't useful */
335			if (p->chunks[i].length_dw == 0)
336				return -EINVAL;
337		}
338
339		size = p->chunks[i].length_dw;
340		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
341		p->chunks[i].user_ptr = cdata;
342		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
343			continue;
344
345		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
346			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
347				continue;
348		}
349
350		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
351		size *= sizeof(uint32_t);
352		if (p->chunks[i].kdata == NULL) {
353			return -ENOMEM;
354		}
355		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
356			return -EFAULT;
357		}
358		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
359			p->cs_flags = p->chunks[i].kdata[0];
360			if (p->chunks[i].length_dw > 1)
361				ring = p->chunks[i].kdata[1];
362			if (p->chunks[i].length_dw > 2)
363				priority = (s32)p->chunks[i].kdata[2];
364		}
365	}
366
367	/* these are KMS only */
368	if (p->rdev) {
369		if ((p->cs_flags & RADEON_CS_USE_VM) &&
370		    !p->rdev->vm_manager.enabled) {
371			DRM_ERROR("VM not active on asic!\n");
372			return -EINVAL;
373		}
374
375		if (radeon_cs_get_ring(p, ring, priority))
376			return -EINVAL;
377
378		/* we only support VM on some SI+ rings */
379		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
380			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
381				DRM_ERROR("Ring %d requires VM!\n", p->ring);
382				return -EINVAL;
383			}
384		} else {
385			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
386				DRM_ERROR("VM not supported on ring %d!\n",
387					  p->ring);
388				return -EINVAL;
 
 
389			}
 
 
 
 
390		}
391	}
392
393	return 0;
394}
395
396static int cmp_size_smaller_first(void *priv, const struct list_head *a,
397				  const struct list_head *b)
398{
399	struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
400	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
401
402	/* Sort A before B if A is smaller. */
403	if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
404		return 1;
405	if (la->robj->tbo.base.size < lb->robj->tbo.base.size)
406		return -1;
407	return 0;
408}
409
410/**
411 * radeon_cs_parser_fini() - clean parser states
412 * @parser:	parser structure holding parsing context.
413 * @error:	error number
414 * @backoff:	indicator to backoff the reservation
415 *
416 * If error is set than unvalidate buffer, otherwise just free memory
417 * used by parsing context.
418 **/
419static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
420{
421	unsigned i;
422
423	if (!error) {
424		/* Sort the buffer list from the smallest to largest buffer,
425		 * which affects the order of buffers in the LRU list.
426		 * This assures that the smallest buffers are added first
427		 * to the LRU list, so they are likely to be later evicted
428		 * first, instead of large buffers whose eviction is more
429		 * expensive.
430		 *
431		 * This slightly lowers the number of bytes moved by TTM
432		 * per frame under memory pressure.
433		 */
434		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
435
436		ttm_eu_fence_buffer_objects(&parser->ticket,
437					    &parser->validated,
438					    &parser->ib.fence->base);
439	} else if (backoff) {
440		ttm_eu_backoff_reservation(&parser->ticket,
441					   &parser->validated);
442	}
443
444	if (parser->relocs != NULL) {
445		for (i = 0; i < parser->nrelocs; i++) {
446			struct radeon_bo *bo = parser->relocs[i].robj;
447			if (bo == NULL)
448				continue;
449
450			drm_gem_object_put(&bo->tbo.base);
451		}
452	}
453	kfree(parser->track);
454	kvfree(parser->relocs);
455	kvfree(parser->vm_bos);
456	for (i = 0; i < parser->nchunks; i++)
457		kvfree(parser->chunks[i].kdata);
458	kvfree(parser->chunks);
459	kvfree(parser->chunks_array);
 
 
 
460	radeon_ib_free(parser->rdev, &parser->ib);
461	radeon_ib_free(parser->rdev, &parser->const_ib);
462}
463
464static int radeon_cs_ib_chunk(struct radeon_device *rdev,
465			      struct radeon_cs_parser *parser)
466{
467	int r;
468
469	if (parser->chunk_ib == NULL)
470		return 0;
471
472	if (parser->cs_flags & RADEON_CS_USE_VM)
473		return 0;
474
475	r = radeon_cs_parse(rdev, parser->ring, parser);
476	if (r || parser->parser_error) {
477		DRM_ERROR("Invalid command stream !\n");
478		return r;
479	}
480
481	r = radeon_cs_sync_rings(parser);
482	if (r) {
483		if (r != -ERESTARTSYS)
484			DRM_ERROR("Failed to sync rings: %i\n", r);
485		return r;
486	}
487
488	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
489		radeon_uvd_note_usage(rdev);
490	else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
491		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
492		radeon_vce_note_usage(rdev);
493
494	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
495	if (r) {
496		DRM_ERROR("Failed to schedule IB !\n");
497	}
498	return r;
499}
500
501static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
502				   struct radeon_vm *vm)
503{
504	struct radeon_device *rdev = p->rdev;
505	struct radeon_bo_va *bo_va;
506	int i, r;
507
508	r = radeon_vm_update_page_directory(rdev, vm);
509	if (r)
510		return r;
511
512	r = radeon_vm_clear_freed(rdev, vm);
513	if (r)
514		return r;
515
516	if (vm->ib_bo_va == NULL) {
517		DRM_ERROR("Tmp BO not in VM!\n");
518		return -EINVAL;
519	}
520
521	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
522				rdev->ring_tmp_bo.bo->tbo.resource);
523	if (r)
524		return r;
525
526	for (i = 0; i < p->nrelocs; i++) {
527		struct radeon_bo *bo;
528
529		bo = p->relocs[i].robj;
530		bo_va = radeon_vm_bo_find(vm, bo);
531		if (bo_va == NULL) {
532			dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
533			return -EINVAL;
534		}
535
536		r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource);
537		if (r)
538			return r;
539
540		radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
541
542		r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
543		if (r)
544			return r;
545	}
546
547	return radeon_vm_clear_invalids(rdev, vm);
548}
549
550static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
551				 struct radeon_cs_parser *parser)
552{
553	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
554	struct radeon_vm *vm = &fpriv->vm;
555	int r;
556
557	if (parser->chunk_ib == NULL)
558		return 0;
559	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
560		return 0;
561
562	if (parser->const_ib.length_dw) {
563		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
564		if (r) {
565			return r;
566		}
567	}
568
569	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
570	if (r) {
571		return r;
572	}
573
574	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
575		radeon_uvd_note_usage(rdev);
576
577	mutex_lock(&vm->mutex);
578	r = radeon_bo_vm_update_pte(parser, vm);
579	if (r) {
580		goto out;
581	}
582
583	r = radeon_cs_sync_rings(parser);
584	if (r) {
585		if (r != -ERESTARTSYS)
586			DRM_ERROR("Failed to sync rings: %i\n", r);
587		goto out;
588	}
589
590	if ((rdev->family >= CHIP_TAHITI) &&
591	    (parser->chunk_const_ib != NULL)) {
592		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
593	} else {
594		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
595	}
596
597out:
598	mutex_unlock(&vm->mutex);
599	return r;
600}
601
602static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
603{
604	if (r == -EDEADLK) {
605		r = radeon_gpu_reset(rdev);
606		if (!r)
607			r = -EAGAIN;
608	}
609	return r;
610}
611
612static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
613{
614	struct radeon_cs_chunk *ib_chunk;
615	struct radeon_vm *vm = NULL;
616	int r;
617
618	if (parser->chunk_ib == NULL)
619		return 0;
620
621	if (parser->cs_flags & RADEON_CS_USE_VM) {
622		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
623		vm = &fpriv->vm;
624
625		if ((rdev->family >= CHIP_TAHITI) &&
626		    (parser->chunk_const_ib != NULL)) {
627			ib_chunk = parser->chunk_const_ib;
628			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
629				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
630				return -EINVAL;
631			}
632			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
633					   vm, ib_chunk->length_dw * 4);
634			if (r) {
635				DRM_ERROR("Failed to get const ib !\n");
636				return r;
637			}
638			parser->const_ib.is_const_ib = true;
639			parser->const_ib.length_dw = ib_chunk->length_dw;
640			if (copy_from_user(parser->const_ib.ptr,
641					       ib_chunk->user_ptr,
642					       ib_chunk->length_dw * 4))
643				return -EFAULT;
644		}
645
646		ib_chunk = parser->chunk_ib;
647		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
648			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
649			return -EINVAL;
650		}
651	}
652	ib_chunk = parser->chunk_ib;
653
654	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
655			   vm, ib_chunk->length_dw * 4);
656	if (r) {
657		DRM_ERROR("Failed to get ib !\n");
658		return r;
659	}
660	parser->ib.length_dw = ib_chunk->length_dw;
661	if (ib_chunk->kdata)
662		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
663	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
664		return -EFAULT;
665	return 0;
666}
667
668int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
669{
670	struct radeon_device *rdev = dev->dev_private;
671	struct radeon_cs_parser parser;
 
672	int r;
673
674	down_read(&rdev->exclusive_lock);
675	if (!rdev->accel_working) {
676		up_read(&rdev->exclusive_lock);
677		return -EBUSY;
678	}
679	if (rdev->in_reset) {
680		up_read(&rdev->exclusive_lock);
681		r = radeon_gpu_reset(rdev);
682		if (!r)
683			r = -EAGAIN;
684		return r;
685	}
686	/* initialize parser */
687	memset(&parser, 0, sizeof(struct radeon_cs_parser));
688	parser.filp = filp;
689	parser.rdev = rdev;
690	parser.dev = rdev->dev;
691	parser.family = rdev->family;
692	r = radeon_cs_parser_init(&parser, data);
693	if (r) {
694		DRM_ERROR("Failed to initialize parser !\n");
695		radeon_cs_parser_fini(&parser, r, false);
696		up_read(&rdev->exclusive_lock);
697		r = radeon_cs_handle_lockup(rdev, r);
 
 
 
 
 
 
698		return r;
699	}
700
701	r = radeon_cs_ib_fill(rdev, &parser);
702	if (!r) {
703		r = radeon_cs_parser_relocs(&parser);
704		if (r && r != -ERESTARTSYS)
705			DRM_ERROR("Failed to parse relocation %d!\n", r);
 
 
 
706	}
707
708	if (r) {
709		radeon_cs_parser_fini(&parser, r, false);
710		up_read(&rdev->exclusive_lock);
711		r = radeon_cs_handle_lockup(rdev, r);
 
 
 
 
 
712		return r;
713	}
714
715	trace_radeon_cs(&parser);
716
717	r = radeon_cs_ib_chunk(rdev, &parser);
718	if (r) {
719		goto out;
 
 
 
720	}
721	r = radeon_cs_ib_vm_chunk(rdev, &parser);
722	if (r) {
723		goto out;
724	}
725out:
726	radeon_cs_parser_fini(&parser, r, true);
727	up_read(&rdev->exclusive_lock);
728	r = radeon_cs_handle_lockup(rdev, r);
729	return r;
730}
731
732/**
733 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
734 * @p:		parser structure holding parsing context.
735 * @pkt:	where to store packet information
736 * @idx:	packet index
737 *
738 * Assume that chunk_ib_index is properly set. Will return -EINVAL
739 * if packet is bigger than remaining ib size. or if packets is unknown.
740 **/
741int radeon_cs_packet_parse(struct radeon_cs_parser *p,
742			   struct radeon_cs_packet *pkt,
743			   unsigned idx)
744{
745	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
746	struct radeon_device *rdev = p->rdev;
747	uint32_t header;
748	int ret = 0, i;
749
750	if (idx >= ib_chunk->length_dw) {
751		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
752			  idx, ib_chunk->length_dw);
753		return -EINVAL;
754	}
755	header = radeon_get_ib_value(p, idx);
756	pkt->idx = idx;
757	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
758	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
759	pkt->one_reg_wr = 0;
760	switch (pkt->type) {
761	case RADEON_PACKET_TYPE0:
762		if (rdev->family < CHIP_R600) {
763			pkt->reg = R100_CP_PACKET0_GET_REG(header);
764			pkt->one_reg_wr =
765				RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
766		} else
767			pkt->reg = R600_CP_PACKET0_GET_REG(header);
768		break;
769	case RADEON_PACKET_TYPE3:
770		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
771		break;
772	case RADEON_PACKET_TYPE2:
773		pkt->count = -1;
774		break;
775	default:
776		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
777		ret = -EINVAL;
778		goto dump_ib;
779	}
780	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
781		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
782			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
783		ret = -EINVAL;
784		goto dump_ib;
785	}
786	return 0;
 
787
788dump_ib:
789	for (i = 0; i < ib_chunk->length_dw; i++) {
790		if (i == idx)
791			printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
792		else
793			printk("\t0x%08x\n", radeon_get_ib_value(p, i));
 
 
 
 
 
 
 
 
794	}
795	return ret;
796}
797
798/**
799 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
800 * @p:		structure holding the parser context.
801 *
802 * Check if the next packet is NOP relocation packet3.
803 **/
804bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
805{
806	struct radeon_cs_packet p3reloc;
807	int r;
808
809	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
810	if (r)
811		return false;
812	if (p3reloc.type != RADEON_PACKET_TYPE3)
813		return false;
814	if (p3reloc.opcode != RADEON_PACKET3_NOP)
815		return false;
816	return true;
817}
818
819/**
820 * radeon_cs_dump_packet() - dump raw packet context
821 * @p:		structure holding the parser context.
822 * @pkt:	structure holding the packet.
823 *
824 * Used mostly for debugging and error reporting.
825 **/
826void radeon_cs_dump_packet(struct radeon_cs_parser *p,
827			   struct radeon_cs_packet *pkt)
828{
829	volatile uint32_t *ib;
830	unsigned i;
831	unsigned idx;
832
833	ib = p->ib.ptr;
834	idx = pkt->idx;
835	for (i = 0; i <= (pkt->count + 1); i++, idx++)
836		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
837}
838
839/**
840 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
841 * @p:			parser structure holding parsing context.
842 * @cs_reloc:		reloc informations
843 * @nomm:		no memory management for debugging
844 *
845 * Check if next packet is relocation packet3, do bo validation and compute
846 * GPU offset using the provided start.
847 **/
848int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
849				struct radeon_bo_list **cs_reloc,
850				int nomm)
851{
852	struct radeon_cs_chunk *relocs_chunk;
853	struct radeon_cs_packet p3reloc;
854	unsigned idx;
855	int r;
856
857	if (p->chunk_relocs == NULL) {
858		DRM_ERROR("No relocation chunk !\n");
859		return -EINVAL;
860	}
861	*cs_reloc = NULL;
862	relocs_chunk = p->chunk_relocs;
863	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
864	if (r)
865		return r;
866	p->idx += p3reloc.count + 2;
867	if (p3reloc.type != RADEON_PACKET_TYPE3 ||
868	    p3reloc.opcode != RADEON_PACKET3_NOP) {
869		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
870			  p3reloc.idx);
871		radeon_cs_dump_packet(p, &p3reloc);
872		return -EINVAL;
873	}
874	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
875	if (idx >= relocs_chunk->length_dw) {
876		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
877			  idx, relocs_chunk->length_dw);
878		radeon_cs_dump_packet(p, &p3reloc);
879		return -EINVAL;
880	}
881	/* FIXME: we assume reloc size is 4 dwords */
882	if (nomm) {
883		*cs_reloc = p->relocs;
884		(*cs_reloc)->gpu_offset =
885			(u64)relocs_chunk->kdata[idx + 3] << 32;
886		(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
887	} else
888		*cs_reloc = &p->relocs[(idx / 4)];
889	return 0;
890}