Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright 2008 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 22 * DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors:
 25 *    Jerome Glisse <glisse@freedesktop.org>
 26 */
 27#include "drmP.h"
 28#include "radeon_drm.h"
 29#include "radeon_reg.h"
 
 
 
 
 
 
 30#include "radeon.h"
 
 
 31
 32void r100_cs_dump_packet(struct radeon_cs_parser *p,
 33			 struct radeon_cs_packet *pkt);
 34
 35int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36{
 37	struct drm_device *ddev = p->rdev->ddev;
 38	struct radeon_cs_chunk *chunk;
 39	unsigned i, j;
 40	bool duplicate;
 
 
 41
 42	if (p->chunk_relocs_idx == -1) {
 43		return 0;
 44	}
 45	chunk = &p->chunks[p->chunk_relocs_idx];
 
 46	/* FIXME: we assume that each relocs use 4 dwords */
 47	p->nrelocs = chunk->length_dw / 4;
 48	p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
 49	if (p->relocs_ptr == NULL) {
 50		return -ENOMEM;
 51	}
 52	p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
 53	if (p->relocs == NULL) {
 54		return -ENOMEM;
 55	}
 
 
 
 56	for (i = 0; i < p->nrelocs; i++) {
 57		struct drm_radeon_cs_reloc *r;
 
 
 58
 59		duplicate = false;
 60		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
 61		for (j = 0; j < p->nrelocs; j++) {
 62			if (r->handle == p->relocs[j].handle) {
 63				p->relocs_ptr[i] = &p->relocs[j];
 64				duplicate = true;
 65				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67		}
 68		if (!duplicate) {
 69			p->relocs[i].gobj = drm_gem_object_lookup(ddev,
 70								  p->filp,
 71								  r->handle);
 72			if (p->relocs[i].gobj == NULL) {
 73				DRM_ERROR("gem object lookup failed 0x%x\n",
 74					  r->handle);
 75				return -ENOENT;
 76			}
 77			p->relocs_ptr[i] = &p->relocs[i];
 78			p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
 79			p->relocs[i].lobj.bo = p->relocs[i].robj;
 80			p->relocs[i].lobj.wdomain = r->write_domain;
 81			p->relocs[i].lobj.rdomain = r->read_domains;
 82			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
 83			p->relocs[i].handle = r->handle;
 84			p->relocs[i].flags = r->flags;
 85			radeon_bo_list_add_object(&p->relocs[i].lobj,
 86						  &p->validated);
 87		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88	}
 89	return radeon_bo_list_validate(&p->validated);
 90}
 91
 
 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 93{
 94	struct drm_radeon_cs *cs = data;
 95	uint64_t *chunk_array_ptr;
 96	unsigned size, i;
 
 
 
 
 97
 98	if (!cs->num_chunks) {
 99		return 0;
100	}
 
101	/* get chunks */
102	INIT_LIST_HEAD(&p->validated);
103	p->idx = 0;
104	p->chunk_ib_idx = -1;
105	p->chunk_relocs_idx = -1;
 
 
 
 
106	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
107	if (p->chunks_array == NULL) {
108		return -ENOMEM;
109	}
110	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
111	if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
112			       sizeof(uint64_t)*cs->num_chunks)) {
113		return -EFAULT;
114	}
 
115	p->nchunks = cs->num_chunks;
116	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
117	if (p->chunks == NULL) {
118		return -ENOMEM;
119	}
120	for (i = 0; i < p->nchunks; i++) {
121		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
122		struct drm_radeon_cs_chunk user_chunk;
123		uint32_t __user *cdata;
124
125		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
126		if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
127				       sizeof(struct drm_radeon_cs_chunk))) {
128			return -EFAULT;
129		}
130		p->chunks[i].length_dw = user_chunk.length_dw;
131		p->chunks[i].kdata = NULL;
132		p->chunks[i].chunk_id = user_chunk.chunk_id;
133
134		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
135			p->chunk_relocs_idx = i;
136		}
137		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
138			p->chunk_ib_idx = i;
139			/* zero length IB isn't useful */
140			if (p->chunks[i].length_dw == 0)
141				return -EINVAL;
142		}
 
 
 
 
 
 
 
 
 
 
 
 
143
144		p->chunks[i].length_dw = user_chunk.length_dw;
145		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
 
 
 
 
 
 
 
 
146
147		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
148		if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
149			size = p->chunks[i].length_dw * sizeof(uint32_t);
150			p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
151			if (p->chunks[i].kdata == NULL) {
152				return -ENOMEM;
153			}
154			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
155					       p->chunks[i].user_ptr, size)) {
156				return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157			}
158		} else {
159			p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160			p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
161			if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
162				kfree(p->chunks[i].kpage[0]);
163				kfree(p->chunks[i].kpage[1]);
164				return -ENOMEM;
165			}
166			p->chunks[i].kpage_idx[0] = -1;
167			p->chunks[i].kpage_idx[1] = -1;
168			p->chunks[i].last_copied_page = -1;
169			p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
170		}
171	}
172	if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
173		DRM_ERROR("cs IB too big: %d\n",
174			  p->chunks[p->chunk_ib_idx].length_dw);
175		return -EINVAL;
176	}
177	return 0;
178}
179
 
 
 
 
 
 
 
 
 
 
180/**
181 * cs_parser_fini() - clean parser states
182 * @parser:	parser structure holding parsing context.
183 * @error:	error number
184 *
185 * If error is set than unvalidate buffer, otherwise just free memory
186 * used by parsing context.
187 **/
188static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{
190	unsigned i;
191
192
193	if (!error && parser->ib)
194		ttm_eu_fence_buffer_objects(&parser->validated,
195					    parser->ib->fence);
196	else
197		ttm_eu_backoff_reservation(&parser->validated);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
199	if (parser->relocs != NULL) {
200		for (i = 0; i < parser->nrelocs; i++) {
201			if (parser->relocs[i].gobj)
202				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
 
 
 
203		}
204	}
205	kfree(parser->track);
206	kfree(parser->relocs);
207	kfree(parser->relocs_ptr);
208	for (i = 0; i < parser->nchunks; i++) {
209		kfree(parser->chunks[i].kdata);
210		kfree(parser->chunks[i].kpage[0]);
211		kfree(parser->chunks[i].kpage[1]);
212	}
213	kfree(parser->chunks);
214	kfree(parser->chunks_array);
215	radeon_ib_free(parser->rdev, &parser->ib);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216}
217
218int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
219{
220	struct radeon_device *rdev = dev->dev_private;
221	struct radeon_cs_parser parser;
222	struct radeon_cs_chunk *ib_chunk;
223	int r;
224
225	mutex_lock(&rdev->cs_mutex);
 
 
 
 
 
 
 
 
 
 
 
226	/* initialize parser */
227	memset(&parser, 0, sizeof(struct radeon_cs_parser));
228	parser.filp = filp;
229	parser.rdev = rdev;
230	parser.dev = rdev->dev;
231	parser.family = rdev->family;
232	r = radeon_cs_parser_init(&parser, data);
233	if (r) {
234		DRM_ERROR("Failed to initialize parser !\n");
235		radeon_cs_parser_fini(&parser, r);
236		mutex_unlock(&rdev->cs_mutex);
 
237		return r;
238	}
239	r =  radeon_ib_get(rdev, &parser.ib);
240	if (r) {
241		DRM_ERROR("Failed to get ib !\n");
242		radeon_cs_parser_fini(&parser, r);
243		mutex_unlock(&rdev->cs_mutex);
244		return r;
245	}
246	r = radeon_cs_parser_relocs(&parser);
247	if (r) {
248		if (r != -ERESTARTSYS)
249			DRM_ERROR("Failed to parse relocation %d!\n", r);
250		radeon_cs_parser_fini(&parser, r);
251		mutex_unlock(&rdev->cs_mutex);
252		return r;
253	}
254	/* Copy the packet into the IB, the parser will read from the
255	 * input memory (cached) and write to the IB (which can be
256	 * uncached). */
257	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
258	parser.ib->length_dw = ib_chunk->length_dw;
259	r = radeon_cs_parse(&parser);
260	if (r || parser.parser_error) {
261		DRM_ERROR("Invalid command stream !\n");
262		radeon_cs_parser_fini(&parser, r);
263		mutex_unlock(&rdev->cs_mutex);
264		return r;
265	}
266	r = radeon_cs_finish_pages(&parser);
 
 
 
267	if (r) {
268		DRM_ERROR("Invalid command stream !\n");
269		radeon_cs_parser_fini(&parser, r);
270		mutex_unlock(&rdev->cs_mutex);
271		return r;
272	}
273	r = radeon_ib_schedule(rdev, parser.ib);
274	if (r) {
275		DRM_ERROR("Failed to schedule IB !\n");
276	}
277	radeon_cs_parser_fini(&parser, r);
278	mutex_unlock(&rdev->cs_mutex);
 
 
279	return r;
280}
281
282int radeon_cs_finish_pages(struct radeon_cs_parser *p)
 
 
 
 
 
 
 
 
 
 
283{
284	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
285	int i;
286	int size = PAGE_SIZE;
287
288	for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
289		if (i == ibc->last_page_index) {
290			size = (ibc->length_dw * 4) % PAGE_SIZE;
291			if (size == 0)
292				size = PAGE_SIZE;
293		}
294		
295		if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
296				       ibc->user_ptr + (i * PAGE_SIZE),
297				       size))
298			return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299	}
300	return 0;
301}
302
303int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
304{
305	int new_page;
306	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
307	int i;
308	int size = PAGE_SIZE;
309
310	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
311		if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
312				       ibc->user_ptr + (i * PAGE_SIZE),
313				       PAGE_SIZE)) {
314			p->parser_error = -EFAULT;
315			return 0;
316		}
317	}
 
 
318
319	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
 
 
 
 
 
 
 
 
 
320
321	if (pg_idx == ibc->last_page_index) {
322		size = (ibc->length_dw * 4) % PAGE_SIZE;
323			if (size == 0)
324				size = PAGE_SIZE;
325	}
 
 
 
 
326
327	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
328			       ibc->user_ptr + (pg_idx * PAGE_SIZE),
329			       size)) {
330		p->parser_error = -EFAULT;
331		return 0;
332	}
 
 
 
 
 
 
 
333
334	/* copy to IB here */
335	memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
 
 
 
336
337	ibc->last_copied_page = pg_idx;
338	ibc->kpage_idx[new_page] = pg_idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
340	return new_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341}
v5.4
  1/*
  2 * Copyright 2008 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 22 * DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors:
 25 *    Jerome Glisse <glisse@freedesktop.org>
 26 */
 27
 28#include <linux/list_sort.h>
 29#include <linux/uaccess.h>
 30
 31#include <drm/drm_device.h>
 32#include <drm/drm_file.h>
 33#include <drm/drm_pci.h>
 34#include <drm/radeon_drm.h>
 35
 36#include "radeon.h"
 37#include "radeon_reg.h"
 38#include "radeon_trace.h"
 39
 40#define RADEON_CS_MAX_PRIORITY		32u
 41#define RADEON_CS_NUM_BUCKETS		(RADEON_CS_MAX_PRIORITY + 1)
 42
 43/* This is based on the bucket sort with O(n) time complexity.
 44 * An item with priority "i" is added to bucket[i]. The lists are then
 45 * concatenated in descending order.
 46 */
 47struct radeon_cs_buckets {
 48	struct list_head bucket[RADEON_CS_NUM_BUCKETS];
 49};
 50
 51static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
 52{
 53	unsigned i;
 54
 55	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
 56		INIT_LIST_HEAD(&b->bucket[i]);
 57}
 58
 59static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
 60				  struct list_head *item, unsigned priority)
 61{
 62	/* Since buffers which appear sooner in the relocation list are
 63	 * likely to be used more often than buffers which appear later
 64	 * in the list, the sort mustn't change the ordering of buffers
 65	 * with the same priority, i.e. it must be stable.
 66	 */
 67	list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
 68}
 69
 70static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
 71				       struct list_head *out_list)
 72{
 73	unsigned i;
 74
 75	/* Connect the sorted buckets in the output list. */
 76	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
 77		list_splice(&b->bucket[i], out_list);
 78	}
 79}
 80
 81static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 82{
 
 83	struct radeon_cs_chunk *chunk;
 84	struct radeon_cs_buckets buckets;
 85	unsigned i;
 86	bool need_mmap_lock = false;
 87	int r;
 88
 89	if (p->chunk_relocs == NULL) {
 90		return 0;
 91	}
 92	chunk = p->chunk_relocs;
 93	p->dma_reloc_idx = 0;
 94	/* FIXME: we assume that each relocs use 4 dwords */
 95	p->nrelocs = chunk->length_dw / 4;
 96	p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list),
 97			GFP_KERNEL | __GFP_ZERO);
 
 
 
 98	if (p->relocs == NULL) {
 99		return -ENOMEM;
100	}
101
102	radeon_cs_buckets_init(&buckets);
103
104	for (i = 0; i < p->nrelocs; i++) {
105		struct drm_radeon_cs_reloc *r;
106		struct drm_gem_object *gobj;
107		unsigned priority;
108
 
109		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
110		gobj = drm_gem_object_lookup(p->filp, r->handle);
111		if (gobj == NULL) {
112			DRM_ERROR("gem object lookup failed 0x%x\n",
113				  r->handle);
114			return -ENOENT;
115		}
116		p->relocs[i].robj = gem_to_radeon_bo(gobj);
117
118		/* The userspace buffer priorities are from 0 to 15. A higher
119		 * number means the buffer is more important.
120		 * Also, the buffers used for write have a higher priority than
121		 * the buffers used for read only, which doubles the range
122		 * to 0 to 31. 32 is reserved for the kernel driver.
123		 */
124		priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
125			   + !!r->write_domain;
126
127		/* The first reloc of an UVD job is the msg and that must be in
128		 * VRAM, the second reloc is the DPB and for WMV that must be in
129		 * VRAM as well. Also put everything into VRAM on AGP cards and older
130		 * IGP chips to avoid image corruptions
131		 */
132		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
133		    (i <= 0 || pci_find_capability(p->rdev->ddev->pdev,
134						   PCI_CAP_ID_AGP) ||
135		     p->rdev->family == CHIP_RS780 ||
136		     p->rdev->family == CHIP_RS880)) {
137
138			/* TODO: is this still needed for NI+ ? */
139			p->relocs[i].preferred_domains =
140				RADEON_GEM_DOMAIN_VRAM;
141
142			p->relocs[i].allowed_domains =
143				RADEON_GEM_DOMAIN_VRAM;
144
145			/* prioritize this over any other relocation */
146			priority = RADEON_CS_MAX_PRIORITY;
147		} else {
148			uint32_t domain = r->write_domain ?
149				r->write_domain : r->read_domains;
150
151			if (domain & RADEON_GEM_DOMAIN_CPU) {
152				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
153					  "for command submission\n");
154				return -EINVAL;
155			}
156
157			p->relocs[i].preferred_domains = domain;
158			if (domain == RADEON_GEM_DOMAIN_VRAM)
159				domain |= RADEON_GEM_DOMAIN_GTT;
160			p->relocs[i].allowed_domains = domain;
161		}
162
163		if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
164			uint32_t domain = p->relocs[i].preferred_domains;
165			if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
166				DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
167					  "allowed for userptr BOs\n");
168				return -EINVAL;
169			}
170			need_mmap_lock = true;
171			domain = RADEON_GEM_DOMAIN_GTT;
172			p->relocs[i].preferred_domains = domain;
173			p->relocs[i].allowed_domains = domain;
174		}
175
176		/* Objects shared as dma-bufs cannot be moved to VRAM */
177		if (p->relocs[i].robj->prime_shared_count) {
178			p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
179			if (!p->relocs[i].allowed_domains) {
180				DRM_ERROR("BO associated with dma-buf cannot "
181					  "be moved to VRAM\n");
182				return -EINVAL;
183			}
 
 
 
 
 
 
 
 
 
 
184		}
185
186		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
187		p->relocs[i].tv.num_shared = !r->write_domain;
188
189		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
190				      priority);
191	}
192
193	radeon_cs_buckets_get_list(&buckets, &p->validated);
194
195	if (p->cs_flags & RADEON_CS_USE_VM)
196		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
197					      &p->validated);
198	if (need_mmap_lock)
199		down_read(&current->mm->mmap_sem);
200
201	r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
202
203	if (need_mmap_lock)
204		up_read(&current->mm->mmap_sem);
205
206	return r;
207}
208
209static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
210{
211	p->priority = priority;
212
213	switch (ring) {
214	default:
215		DRM_ERROR("unknown ring id: %d\n", ring);
216		return -EINVAL;
217	case RADEON_CS_RING_GFX:
218		p->ring = RADEON_RING_TYPE_GFX_INDEX;
219		break;
220	case RADEON_CS_RING_COMPUTE:
221		if (p->rdev->family >= CHIP_TAHITI) {
222			if (p->priority > 0)
223				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
224			else
225				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
226		} else
227			p->ring = RADEON_RING_TYPE_GFX_INDEX;
228		break;
229	case RADEON_CS_RING_DMA:
230		if (p->rdev->family >= CHIP_CAYMAN) {
231			if (p->priority > 0)
232				p->ring = R600_RING_TYPE_DMA_INDEX;
233			else
234				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
235		} else if (p->rdev->family >= CHIP_RV770) {
236			p->ring = R600_RING_TYPE_DMA_INDEX;
237		} else {
238			return -EINVAL;
239		}
240		break;
241	case RADEON_CS_RING_UVD:
242		p->ring = R600_RING_TYPE_UVD_INDEX;
243		break;
244	case RADEON_CS_RING_VCE:
245		/* TODO: only use the low priority ring for now */
246		p->ring = TN_RING_TYPE_VCE1_INDEX;
247		break;
248	}
249	return 0;
250}
251
252static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
253{
254	struct radeon_bo_list *reloc;
255	int r;
256
257	list_for_each_entry(reloc, &p->validated, tv.head) {
258		struct dma_resv *resv;
259
260		resv = reloc->robj->tbo.base.resv;
261		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
262				     reloc->tv.num_shared);
263		if (r)
264			return r;
265	}
266	return 0;
267}
268
269/* XXX: note that this is called from the legacy UMS CS ioctl as well */
270int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
271{
272	struct drm_radeon_cs *cs = data;
273	uint64_t *chunk_array_ptr;
274	unsigned size, i;
275	u32 ring = RADEON_CS_RING_GFX;
276	s32 priority = 0;
277
278	INIT_LIST_HEAD(&p->validated);
279
280	if (!cs->num_chunks) {
281		return 0;
282	}
283
284	/* get chunks */
 
285	p->idx = 0;
286	p->ib.sa_bo = NULL;
287	p->const_ib.sa_bo = NULL;
288	p->chunk_ib = NULL;
289	p->chunk_relocs = NULL;
290	p->chunk_flags = NULL;
291	p->chunk_const_ib = NULL;
292	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
293	if (p->chunks_array == NULL) {
294		return -ENOMEM;
295	}
296	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
297	if (copy_from_user(p->chunks_array, chunk_array_ptr,
298			       sizeof(uint64_t)*cs->num_chunks)) {
299		return -EFAULT;
300	}
301	p->cs_flags = 0;
302	p->nchunks = cs->num_chunks;
303	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
304	if (p->chunks == NULL) {
305		return -ENOMEM;
306	}
307	for (i = 0; i < p->nchunks; i++) {
308		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
309		struct drm_radeon_cs_chunk user_chunk;
310		uint32_t __user *cdata;
311
312		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
313		if (copy_from_user(&user_chunk, chunk_ptr,
314				       sizeof(struct drm_radeon_cs_chunk))) {
315			return -EFAULT;
316		}
317		p->chunks[i].length_dw = user_chunk.length_dw;
318		if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
319			p->chunk_relocs = &p->chunks[i];
 
 
 
320		}
321		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
322			p->chunk_ib = &p->chunks[i];
323			/* zero length IB isn't useful */
324			if (p->chunks[i].length_dw == 0)
325				return -EINVAL;
326		}
327		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
328			p->chunk_const_ib = &p->chunks[i];
329			/* zero length CONST IB isn't useful */
330			if (p->chunks[i].length_dw == 0)
331				return -EINVAL;
332		}
333		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
334			p->chunk_flags = &p->chunks[i];
335			/* zero length flags aren't useful */
336			if (p->chunks[i].length_dw == 0)
337				return -EINVAL;
338		}
339
340		size = p->chunks[i].length_dw;
341		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
342		p->chunks[i].user_ptr = cdata;
343		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
344			continue;
345
346		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
347			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
348				continue;
349		}
350
351		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
352		size *= sizeof(uint32_t);
353		if (p->chunks[i].kdata == NULL) {
354			return -ENOMEM;
355		}
356		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
357			return -EFAULT;
358		}
359		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
360			p->cs_flags = p->chunks[i].kdata[0];
361			if (p->chunks[i].length_dw > 1)
362				ring = p->chunks[i].kdata[1];
363			if (p->chunks[i].length_dw > 2)
364				priority = (s32)p->chunks[i].kdata[2];
365		}
366	}
367
368	/* these are KMS only */
369	if (p->rdev) {
370		if ((p->cs_flags & RADEON_CS_USE_VM) &&
371		    !p->rdev->vm_manager.enabled) {
372			DRM_ERROR("VM not active on asic!\n");
373			return -EINVAL;
374		}
375
376		if (radeon_cs_get_ring(p, ring, priority))
377			return -EINVAL;
378
379		/* we only support VM on some SI+ rings */
380		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
381			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
382				DRM_ERROR("Ring %d requires VM!\n", p->ring);
383				return -EINVAL;
384			}
385		} else {
386			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
387				DRM_ERROR("VM not supported on ring %d!\n",
388					  p->ring);
389				return -EINVAL;
 
 
390			}
 
 
 
 
391		}
392	}
393
 
 
 
 
394	return 0;
395}
396
397static int cmp_size_smaller_first(void *priv, struct list_head *a,
398				  struct list_head *b)
399{
400	struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
401	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
402
403	/* Sort A before B if A is smaller. */
404	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
405}
406
407/**
408 * cs_parser_fini() - clean parser states
409 * @parser:	parser structure holding parsing context.
410 * @error:	error number
411 *
412 * If error is set than unvalidate buffer, otherwise just free memory
413 * used by parsing context.
414 **/
415static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
416{
417	unsigned i;
418
419	if (!error) {
420		/* Sort the buffer list from the smallest to largest buffer,
421		 * which affects the order of buffers in the LRU list.
422		 * This assures that the smallest buffers are added first
423		 * to the LRU list, so they are likely to be later evicted
424		 * first, instead of large buffers whose eviction is more
425		 * expensive.
426		 *
427		 * This slightly lowers the number of bytes moved by TTM
428		 * per frame under memory pressure.
429		 */
430		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
431
432		ttm_eu_fence_buffer_objects(&parser->ticket,
433					    &parser->validated,
434					    &parser->ib.fence->base);
435	} else if (backoff) {
436		ttm_eu_backoff_reservation(&parser->ticket,
437					   &parser->validated);
438	}
439
440	if (parser->relocs != NULL) {
441		for (i = 0; i < parser->nrelocs; i++) {
442			struct radeon_bo *bo = parser->relocs[i].robj;
443			if (bo == NULL)
444				continue;
445
446			drm_gem_object_put_unlocked(&bo->tbo.base);
447		}
448	}
449	kfree(parser->track);
450	kvfree(parser->relocs);
451	kvfree(parser->vm_bos);
452	for (i = 0; i < parser->nchunks; i++)
453		kvfree(parser->chunks[i].kdata);
 
 
 
454	kfree(parser->chunks);
455	kfree(parser->chunks_array);
456	radeon_ib_free(parser->rdev, &parser->ib);
457	radeon_ib_free(parser->rdev, &parser->const_ib);
458}
459
460static int radeon_cs_ib_chunk(struct radeon_device *rdev,
461			      struct radeon_cs_parser *parser)
462{
463	int r;
464
465	if (parser->chunk_ib == NULL)
466		return 0;
467
468	if (parser->cs_flags & RADEON_CS_USE_VM)
469		return 0;
470
471	r = radeon_cs_parse(rdev, parser->ring, parser);
472	if (r || parser->parser_error) {
473		DRM_ERROR("Invalid command stream !\n");
474		return r;
475	}
476
477	r = radeon_cs_sync_rings(parser);
478	if (r) {
479		if (r != -ERESTARTSYS)
480			DRM_ERROR("Failed to sync rings: %i\n", r);
481		return r;
482	}
483
484	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
485		radeon_uvd_note_usage(rdev);
486	else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
487		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
488		radeon_vce_note_usage(rdev);
489
490	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
491	if (r) {
492		DRM_ERROR("Failed to schedule IB !\n");
493	}
494	return r;
495}
496
497static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
498				   struct radeon_vm *vm)
499{
500	struct radeon_device *rdev = p->rdev;
501	struct radeon_bo_va *bo_va;
502	int i, r;
503
504	r = radeon_vm_update_page_directory(rdev, vm);
505	if (r)
506		return r;
507
508	r = radeon_vm_clear_freed(rdev, vm);
509	if (r)
510		return r;
511
512	if (vm->ib_bo_va == NULL) {
513		DRM_ERROR("Tmp BO not in VM!\n");
514		return -EINVAL;
515	}
516
517	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
518				&rdev->ring_tmp_bo.bo->tbo.mem);
519	if (r)
520		return r;
521
522	for (i = 0; i < p->nrelocs; i++) {
523		struct radeon_bo *bo;
524
525		bo = p->relocs[i].robj;
526		bo_va = radeon_vm_bo_find(vm, bo);
527		if (bo_va == NULL) {
528			dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
529			return -EINVAL;
530		}
531
532		r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
533		if (r)
534			return r;
535
536		radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
537	}
538
539	return radeon_vm_clear_invalids(rdev, vm);
540}
541
542static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
543				 struct radeon_cs_parser *parser)
544{
545	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
546	struct radeon_vm *vm = &fpriv->vm;
547	int r;
548
549	if (parser->chunk_ib == NULL)
550		return 0;
551	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
552		return 0;
553
554	if (parser->const_ib.length_dw) {
555		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
556		if (r) {
557			return r;
558		}
559	}
560
561	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
562	if (r) {
563		return r;
564	}
565
566	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
567		radeon_uvd_note_usage(rdev);
568
569	mutex_lock(&vm->mutex);
570	r = radeon_bo_vm_update_pte(parser, vm);
571	if (r) {
572		goto out;
573	}
574
575	r = radeon_cs_sync_rings(parser);
576	if (r) {
577		if (r != -ERESTARTSYS)
578			DRM_ERROR("Failed to sync rings: %i\n", r);
579		goto out;
580	}
581
582	if ((rdev->family >= CHIP_TAHITI) &&
583	    (parser->chunk_const_ib != NULL)) {
584		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
585	} else {
586		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
587	}
588
589out:
590	mutex_unlock(&vm->mutex);
591	return r;
592}
593
594static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
595{
596	if (r == -EDEADLK) {
597		r = radeon_gpu_reset(rdev);
598		if (!r)
599			r = -EAGAIN;
600	}
601	return r;
602}
603
604static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
605{
606	struct radeon_cs_chunk *ib_chunk;
607	struct radeon_vm *vm = NULL;
608	int r;
609
610	if (parser->chunk_ib == NULL)
611		return 0;
612
613	if (parser->cs_flags & RADEON_CS_USE_VM) {
614		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
615		vm = &fpriv->vm;
616
617		if ((rdev->family >= CHIP_TAHITI) &&
618		    (parser->chunk_const_ib != NULL)) {
619			ib_chunk = parser->chunk_const_ib;
620			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
621				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
622				return -EINVAL;
623			}
624			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
625					   vm, ib_chunk->length_dw * 4);
626			if (r) {
627				DRM_ERROR("Failed to get const ib !\n");
628				return r;
629			}
630			parser->const_ib.is_const_ib = true;
631			parser->const_ib.length_dw = ib_chunk->length_dw;
632			if (copy_from_user(parser->const_ib.ptr,
633					       ib_chunk->user_ptr,
634					       ib_chunk->length_dw * 4))
635				return -EFAULT;
636		}
637
638		ib_chunk = parser->chunk_ib;
639		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
640			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
641			return -EINVAL;
642		}
643	}
644	ib_chunk = parser->chunk_ib;
645
646	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
647			   vm, ib_chunk->length_dw * 4);
648	if (r) {
649		DRM_ERROR("Failed to get ib !\n");
650		return r;
651	}
652	parser->ib.length_dw = ib_chunk->length_dw;
653	if (ib_chunk->kdata)
654		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
655	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
656		return -EFAULT;
657	return 0;
658}
659
660int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
661{
662	struct radeon_device *rdev = dev->dev_private;
663	struct radeon_cs_parser parser;
 
664	int r;
665
666	down_read(&rdev->exclusive_lock);
667	if (!rdev->accel_working) {
668		up_read(&rdev->exclusive_lock);
669		return -EBUSY;
670	}
671	if (rdev->in_reset) {
672		up_read(&rdev->exclusive_lock);
673		r = radeon_gpu_reset(rdev);
674		if (!r)
675			r = -EAGAIN;
676		return r;
677	}
678	/* initialize parser */
679	memset(&parser, 0, sizeof(struct radeon_cs_parser));
680	parser.filp = filp;
681	parser.rdev = rdev;
682	parser.dev = rdev->dev;
683	parser.family = rdev->family;
684	r = radeon_cs_parser_init(&parser, data);
685	if (r) {
686		DRM_ERROR("Failed to initialize parser !\n");
687		radeon_cs_parser_fini(&parser, r, false);
688		up_read(&rdev->exclusive_lock);
689		r = radeon_cs_handle_lockup(rdev, r);
690		return r;
691	}
692
693	r = radeon_cs_ib_fill(rdev, &parser);
694	if (!r) {
695		r = radeon_cs_parser_relocs(&parser);
696		if (r && r != -ERESTARTSYS)
 
 
 
 
 
697			DRM_ERROR("Failed to parse relocation %d!\n", r);
 
 
 
698	}
699
700	if (r) {
701		radeon_cs_parser_fini(&parser, r, false);
702		up_read(&rdev->exclusive_lock);
703		r = radeon_cs_handle_lockup(rdev, r);
 
 
 
 
 
704		return r;
705	}
706
707	trace_radeon_cs(&parser);
708
709	r = radeon_cs_ib_chunk(rdev, &parser);
710	if (r) {
711		goto out;
 
 
 
712	}
713	r = radeon_cs_ib_vm_chunk(rdev, &parser);
714	if (r) {
715		goto out;
716	}
717out:
718	radeon_cs_parser_fini(&parser, r, true);
719	up_read(&rdev->exclusive_lock);
720	r = radeon_cs_handle_lockup(rdev, r);
721	return r;
722}
723
724/**
725 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
726 * @parser:	parser structure holding parsing context.
727 * @pkt:	where to store packet information
728 *
729 * Assume that chunk_ib_index is properly set. Will return -EINVAL
730 * if packet is bigger than remaining ib size. or if packets is unknown.
731 **/
732int radeon_cs_packet_parse(struct radeon_cs_parser *p,
733			   struct radeon_cs_packet *pkt,
734			   unsigned idx)
735{
736	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
737	struct radeon_device *rdev = p->rdev;
738	uint32_t header;
739	int ret = 0, i;
740
741	if (idx >= ib_chunk->length_dw) {
742		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
743			  idx, ib_chunk->length_dw);
744		return -EINVAL;
745	}
746	header = radeon_get_ib_value(p, idx);
747	pkt->idx = idx;
748	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
749	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
750	pkt->one_reg_wr = 0;
751	switch (pkt->type) {
752	case RADEON_PACKET_TYPE0:
753		if (rdev->family < CHIP_R600) {
754			pkt->reg = R100_CP_PACKET0_GET_REG(header);
755			pkt->one_reg_wr =
756				RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
757		} else
758			pkt->reg = R600_CP_PACKET0_GET_REG(header);
759		break;
760	case RADEON_PACKET_TYPE3:
761		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
762		break;
763	case RADEON_PACKET_TYPE2:
764		pkt->count = -1;
765		break;
766	default:
767		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
768		ret = -EINVAL;
769		goto dump_ib;
770	}
771	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
772		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
773			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
774		ret = -EINVAL;
775		goto dump_ib;
776	}
777	return 0;
 
778
779dump_ib:
780	for (i = 0; i < ib_chunk->length_dw; i++) {
781		if (i == idx)
782			printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
783		else
784			printk("\t0x%08x\n", radeon_get_ib_value(p, i));
 
 
 
 
 
 
 
 
785	}
786	return ret;
787}
788
789/**
790 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
791 * @p:		structure holding the parser context.
792 *
793 * Check if the next packet is NOP relocation packet3.
794 **/
795bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
796{
797	struct radeon_cs_packet p3reloc;
798	int r;
799
800	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
801	if (r)
802		return false;
803	if (p3reloc.type != RADEON_PACKET_TYPE3)
804		return false;
805	if (p3reloc.opcode != RADEON_PACKET3_NOP)
806		return false;
807	return true;
808}
809
810/**
811 * radeon_cs_dump_packet() - dump raw packet context
812 * @p:		structure holding the parser context.
813 * @pkt:	structure holding the packet.
814 *
815 * Used mostly for debugging and error reporting.
816 **/
817void radeon_cs_dump_packet(struct radeon_cs_parser *p,
818			   struct radeon_cs_packet *pkt)
819{
820	volatile uint32_t *ib;
821	unsigned i;
822	unsigned idx;
823
824	ib = p->ib.ptr;
825	idx = pkt->idx;
826	for (i = 0; i <= (pkt->count + 1); i++, idx++)
827		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
828}
829
830/**
831 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
832 * @parser:		parser structure holding parsing context.
833 * @data:		pointer to relocation data
834 * @offset_start:	starting offset
835 * @offset_mask:	offset mask (to align start offset on)
836 * @reloc:		reloc informations
837 *
838 * Check if next packet is relocation packet3, do bo validation and compute
839 * GPU offset using the provided start.
840 **/
841int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
842				struct radeon_bo_list **cs_reloc,
843				int nomm)
844{
845	struct radeon_cs_chunk *relocs_chunk;
846	struct radeon_cs_packet p3reloc;
847	unsigned idx;
848	int r;
849
850	if (p->chunk_relocs == NULL) {
851		DRM_ERROR("No relocation chunk !\n");
852		return -EINVAL;
853	}
854	*cs_reloc = NULL;
855	relocs_chunk = p->chunk_relocs;
856	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
857	if (r)
858		return r;
859	p->idx += p3reloc.count + 2;
860	if (p3reloc.type != RADEON_PACKET_TYPE3 ||
861	    p3reloc.opcode != RADEON_PACKET3_NOP) {
862		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
863			  p3reloc.idx);
864		radeon_cs_dump_packet(p, &p3reloc);
865		return -EINVAL;
866	}
867	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
868	if (idx >= relocs_chunk->length_dw) {
869		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
870			  idx, relocs_chunk->length_dw);
871		radeon_cs_dump_packet(p, &p3reloc);
872		return -EINVAL;
873	}
874	/* FIXME: we assume reloc size is 4 dwords */
875	if (nomm) {
876		*cs_reloc = p->relocs;
877		(*cs_reloc)->gpu_offset =
878			(u64)relocs_chunk->kdata[idx + 3] << 32;
879		(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
880	} else
881		*cs_reloc = &p->relocs[(idx / 4)];
882	return 0;
883}