Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright 2008 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 22 * DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors:
 25 *    Jerome Glisse <glisse@freedesktop.org>
 26 */
 27#include "drmP.h"
 28#include "radeon_drm.h"
 29#include "radeon_reg.h"
 30#include "radeon.h"
 31
 32void r100_cs_dump_packet(struct radeon_cs_parser *p,
 33			 struct radeon_cs_packet *pkt);
 34
 35int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 36{
 37	struct drm_device *ddev = p->rdev->ddev;
 38	struct radeon_cs_chunk *chunk;
 39	unsigned i, j;
 40	bool duplicate;
 41
 42	if (p->chunk_relocs_idx == -1) {
 43		return 0;
 44	}
 45	chunk = &p->chunks[p->chunk_relocs_idx];
 46	/* FIXME: we assume that each relocs use 4 dwords */
 47	p->nrelocs = chunk->length_dw / 4;
 48	p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
 49	if (p->relocs_ptr == NULL) {
 50		return -ENOMEM;
 51	}
 52	p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
 53	if (p->relocs == NULL) {
 54		return -ENOMEM;
 55	}
 56	for (i = 0; i < p->nrelocs; i++) {
 57		struct drm_radeon_cs_reloc *r;
 58
 59		duplicate = false;
 60		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
 61		for (j = 0; j < p->nrelocs; j++) {
 62			if (r->handle == p->relocs[j].handle) {
 63				p->relocs_ptr[i] = &p->relocs[j];
 64				duplicate = true;
 65				break;
 66			}
 67		}
 68		if (!duplicate) {
 69			p->relocs[i].gobj = drm_gem_object_lookup(ddev,
 70								  p->filp,
 71								  r->handle);
 72			if (p->relocs[i].gobj == NULL) {
 73				DRM_ERROR("gem object lookup failed 0x%x\n",
 74					  r->handle);
 75				return -ENOENT;
 76			}
 77			p->relocs_ptr[i] = &p->relocs[i];
 78			p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
 79			p->relocs[i].lobj.bo = p->relocs[i].robj;
 80			p->relocs[i].lobj.wdomain = r->write_domain;
 81			p->relocs[i].lobj.rdomain = r->read_domains;
 82			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
 83			p->relocs[i].handle = r->handle;
 84			p->relocs[i].flags = r->flags;
 85			radeon_bo_list_add_object(&p->relocs[i].lobj,
 86						  &p->validated);
 87		}
 
 
 88	}
 89	return radeon_bo_list_validate(&p->validated);
 90}
 91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 93{
 94	struct drm_radeon_cs *cs = data;
 95	uint64_t *chunk_array_ptr;
 96	unsigned size, i;
 
 
 97
 98	if (!cs->num_chunks) {
 99		return 0;
100	}
101	/* get chunks */
102	INIT_LIST_HEAD(&p->validated);
103	p->idx = 0;
 
 
 
 
104	p->chunk_ib_idx = -1;
105	p->chunk_relocs_idx = -1;
 
 
106	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
107	if (p->chunks_array == NULL) {
108		return -ENOMEM;
109	}
110	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
111	if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
112			       sizeof(uint64_t)*cs->num_chunks)) {
113		return -EFAULT;
114	}
 
115	p->nchunks = cs->num_chunks;
116	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
117	if (p->chunks == NULL) {
118		return -ENOMEM;
119	}
120	for (i = 0; i < p->nchunks; i++) {
121		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
122		struct drm_radeon_cs_chunk user_chunk;
123		uint32_t __user *cdata;
124
125		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
126		if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
127				       sizeof(struct drm_radeon_cs_chunk))) {
128			return -EFAULT;
129		}
130		p->chunks[i].length_dw = user_chunk.length_dw;
131		p->chunks[i].kdata = NULL;
132		p->chunks[i].chunk_id = user_chunk.chunk_id;
133
134		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
135			p->chunk_relocs_idx = i;
136		}
137		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
138			p->chunk_ib_idx = i;
139			/* zero length IB isn't useful */
140			if (p->chunks[i].length_dw == 0)
141				return -EINVAL;
142		}
 
 
 
 
 
 
 
 
 
 
 
 
143
144		p->chunks[i].length_dw = user_chunk.length_dw;
145		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
146
147		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
148		if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
 
149			size = p->chunks[i].length_dw * sizeof(uint32_t);
150			p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
151			if (p->chunks[i].kdata == NULL) {
152				return -ENOMEM;
153			}
154			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
155					       p->chunks[i].user_ptr, size)) {
156				return -EFAULT;
157			}
158		} else {
159			p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160			p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
161			if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162				kfree(p->chunks[i].kpage[0]);
163				kfree(p->chunks[i].kpage[1]);
164				return -ENOMEM;
165			}
166			p->chunks[i].kpage_idx[0] = -1;
167			p->chunks[i].kpage_idx[1] = -1;
168			p->chunks[i].last_copied_page = -1;
169			p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
170		}
 
 
 
 
 
171	}
172	if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
173		DRM_ERROR("cs IB too big: %d\n",
174			  p->chunks[p->chunk_ib_idx].length_dw);
175		return -EINVAL;
176	}
177	return 0;
178}
179
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180/**
181 * cs_parser_fini() - clean parser states
182 * @parser:	parser structure holding parsing context.
183 * @error:	error number
184 *
185 * If error is set than unvalidate buffer, otherwise just free memory
186 * used by parsing context.
187 **/
188static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{
190	unsigned i;
191
192
193	if (!error && parser->ib)
 
194		ttm_eu_fence_buffer_objects(&parser->validated,
195					    parser->ib->fence);
196	else
197		ttm_eu_backoff_reservation(&parser->validated);
 
198
199	if (parser->relocs != NULL) {
200		for (i = 0; i < parser->nrelocs; i++) {
201			if (parser->relocs[i].gobj)
202				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
203		}
204	}
205	kfree(parser->track);
206	kfree(parser->relocs);
207	kfree(parser->relocs_ptr);
208	for (i = 0; i < parser->nchunks; i++) {
209		kfree(parser->chunks[i].kdata);
210		kfree(parser->chunks[i].kpage[0]);
211		kfree(parser->chunks[i].kpage[1]);
 
 
212	}
213	kfree(parser->chunks);
214	kfree(parser->chunks_array);
215	radeon_ib_free(parser->rdev, &parser->ib);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216}
217
218int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
219{
220	struct radeon_device *rdev = dev->dev_private;
221	struct radeon_cs_parser parser;
222	struct radeon_cs_chunk *ib_chunk;
223	int r;
224
225	mutex_lock(&rdev->cs_mutex);
 
 
 
 
226	/* initialize parser */
227	memset(&parser, 0, sizeof(struct radeon_cs_parser));
228	parser.filp = filp;
229	parser.rdev = rdev;
230	parser.dev = rdev->dev;
231	parser.family = rdev->family;
232	r = radeon_cs_parser_init(&parser, data);
233	if (r) {
234		DRM_ERROR("Failed to initialize parser !\n");
235		radeon_cs_parser_fini(&parser, r);
236		mutex_unlock(&rdev->cs_mutex);
237		return r;
238	}
239	r =  radeon_ib_get(rdev, &parser.ib);
240	if (r) {
241		DRM_ERROR("Failed to get ib !\n");
242		radeon_cs_parser_fini(&parser, r);
243		mutex_unlock(&rdev->cs_mutex);
244		return r;
245	}
246	r = radeon_cs_parser_relocs(&parser);
247	if (r) {
248		if (r != -ERESTARTSYS)
249			DRM_ERROR("Failed to parse relocation %d!\n", r);
250		radeon_cs_parser_fini(&parser, r);
251		mutex_unlock(&rdev->cs_mutex);
252		return r;
253	}
254	/* Copy the packet into the IB, the parser will read from the
255	 * input memory (cached) and write to the IB (which can be
256	 * uncached). */
257	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
258	parser.ib->length_dw = ib_chunk->length_dw;
259	r = radeon_cs_parse(&parser);
260	if (r || parser.parser_error) {
261		DRM_ERROR("Invalid command stream !\n");
262		radeon_cs_parser_fini(&parser, r);
263		mutex_unlock(&rdev->cs_mutex);
264		return r;
265	}
266	r = radeon_cs_finish_pages(&parser);
267	if (r) {
268		DRM_ERROR("Invalid command stream !\n");
269		radeon_cs_parser_fini(&parser, r);
270		mutex_unlock(&rdev->cs_mutex);
271		return r;
272	}
273	r = radeon_ib_schedule(rdev, parser.ib);
274	if (r) {
275		DRM_ERROR("Failed to schedule IB !\n");
276	}
 
277	radeon_cs_parser_fini(&parser, r);
278	mutex_unlock(&rdev->cs_mutex);
 
279	return r;
280}
281
282int radeon_cs_finish_pages(struct radeon_cs_parser *p)
283{
284	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
285	int i;
286	int size = PAGE_SIZE;
287
288	for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
289		if (i == ibc->last_page_index) {
290			size = (ibc->length_dw * 4) % PAGE_SIZE;
291			if (size == 0)
292				size = PAGE_SIZE;
293		}
294		
295		if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
296				       ibc->user_ptr + (i * PAGE_SIZE),
297				       size))
298			return -EFAULT;
299	}
300	return 0;
301}
302
303int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
304{
305	int new_page;
306	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
307	int i;
308	int size = PAGE_SIZE;
 
309
310	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
311		if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
312				       ibc->user_ptr + (i * PAGE_SIZE),
313				       PAGE_SIZE)) {
314			p->parser_error = -EFAULT;
315			return 0;
316		}
317	}
318
319	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
320
321	if (pg_idx == ibc->last_page_index) {
322		size = (ibc->length_dw * 4) % PAGE_SIZE;
323			if (size == 0)
324				size = PAGE_SIZE;
325	}
326
 
 
 
 
327	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
328			       ibc->user_ptr + (pg_idx * PAGE_SIZE),
329			       size)) {
330		p->parser_error = -EFAULT;
331		return 0;
332	}
333
334	/* copy to IB here */
335	memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
 
336
337	ibc->last_copied_page = pg_idx;
338	ibc->kpage_idx[new_page] = pg_idx;
339
340	return new_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341}
v3.5.6
  1/*
  2 * Copyright 2008 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 22 * DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors:
 25 *    Jerome Glisse <glisse@freedesktop.org>
 26 */
 27#include "drmP.h"
 28#include "radeon_drm.h"
 29#include "radeon_reg.h"
 30#include "radeon.h"
 31
 32void r100_cs_dump_packet(struct radeon_cs_parser *p,
 33			 struct radeon_cs_packet *pkt);
 34
 35int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 36{
 37	struct drm_device *ddev = p->rdev->ddev;
 38	struct radeon_cs_chunk *chunk;
 39	unsigned i, j;
 40	bool duplicate;
 41
 42	if (p->chunk_relocs_idx == -1) {
 43		return 0;
 44	}
 45	chunk = &p->chunks[p->chunk_relocs_idx];
 46	/* FIXME: we assume that each relocs use 4 dwords */
 47	p->nrelocs = chunk->length_dw / 4;
 48	p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
 49	if (p->relocs_ptr == NULL) {
 50		return -ENOMEM;
 51	}
 52	p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
 53	if (p->relocs == NULL) {
 54		return -ENOMEM;
 55	}
 56	for (i = 0; i < p->nrelocs; i++) {
 57		struct drm_radeon_cs_reloc *r;
 58
 59		duplicate = false;
 60		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
 61		for (j = 0; j < i; j++) {
 62			if (r->handle == p->relocs[j].handle) {
 63				p->relocs_ptr[i] = &p->relocs[j];
 64				duplicate = true;
 65				break;
 66			}
 67		}
 68		if (!duplicate) {
 69			p->relocs[i].gobj = drm_gem_object_lookup(ddev,
 70								  p->filp,
 71								  r->handle);
 72			if (p->relocs[i].gobj == NULL) {
 73				DRM_ERROR("gem object lookup failed 0x%x\n",
 74					  r->handle);
 75				return -ENOENT;
 76			}
 77			p->relocs_ptr[i] = &p->relocs[i];
 78			p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
 79			p->relocs[i].lobj.bo = p->relocs[i].robj;
 80			p->relocs[i].lobj.wdomain = r->write_domain;
 81			p->relocs[i].lobj.rdomain = r->read_domains;
 82			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
 83			p->relocs[i].handle = r->handle;
 84			p->relocs[i].flags = r->flags;
 85			radeon_bo_list_add_object(&p->relocs[i].lobj,
 86						  &p->validated);
 87
 88		} else
 89			p->relocs[i].handle = 0;
 90	}
 91	return radeon_bo_list_validate(&p->validated);
 92}
 93
 94static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
 95{
 96	p->priority = priority;
 97
 98	switch (ring) {
 99	default:
100		DRM_ERROR("unknown ring id: %d\n", ring);
101		return -EINVAL;
102	case RADEON_CS_RING_GFX:
103		p->ring = RADEON_RING_TYPE_GFX_INDEX;
104		break;
105	case RADEON_CS_RING_COMPUTE:
106		if (p->rdev->family >= CHIP_TAHITI) {
107			if (p->priority > 0)
108				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
109			else
110				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
111		} else
112			p->ring = RADEON_RING_TYPE_GFX_INDEX;
113		break;
114	}
115	return 0;
116}
117
118static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
119{
120	bool sync_to_ring[RADEON_NUM_RINGS] = { };
121	bool need_sync = false;
122	int i, r;
123
124	for (i = 0; i < p->nrelocs; i++) {
125		struct radeon_fence *fence;
126
127		if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
128			continue;
129
130		fence = p->relocs[i].robj->tbo.sync_obj;
131		if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
132			sync_to_ring[fence->ring] = true;
133			need_sync = true;
134		}
135	}
136
137	if (!need_sync) {
138		return 0;
139	}
140
141	r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
142	if (r) {
143		return r;
144	}
145
146	return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
147					   sync_to_ring, p->ring);
148}
149
150/* XXX: note that this is called from the legacy UMS CS ioctl as well */
151int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
152{
153	struct drm_radeon_cs *cs = data;
154	uint64_t *chunk_array_ptr;
155	unsigned size, i;
156	u32 ring = RADEON_CS_RING_GFX;
157	s32 priority = 0;
158
159	if (!cs->num_chunks) {
160		return 0;
161	}
162	/* get chunks */
163	INIT_LIST_HEAD(&p->validated);
164	p->idx = 0;
165	p->ib.sa_bo = NULL;
166	p->ib.semaphore = NULL;
167	p->const_ib.sa_bo = NULL;
168	p->const_ib.semaphore = NULL;
169	p->chunk_ib_idx = -1;
170	p->chunk_relocs_idx = -1;
171	p->chunk_flags_idx = -1;
172	p->chunk_const_ib_idx = -1;
173	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
174	if (p->chunks_array == NULL) {
175		return -ENOMEM;
176	}
177	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
178	if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
179			       sizeof(uint64_t)*cs->num_chunks)) {
180		return -EFAULT;
181	}
182	p->cs_flags = 0;
183	p->nchunks = cs->num_chunks;
184	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
185	if (p->chunks == NULL) {
186		return -ENOMEM;
187	}
188	for (i = 0; i < p->nchunks; i++) {
189		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
190		struct drm_radeon_cs_chunk user_chunk;
191		uint32_t __user *cdata;
192
193		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
194		if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
195				       sizeof(struct drm_radeon_cs_chunk))) {
196			return -EFAULT;
197		}
198		p->chunks[i].length_dw = user_chunk.length_dw;
199		p->chunks[i].kdata = NULL;
200		p->chunks[i].chunk_id = user_chunk.chunk_id;
201
202		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
203			p->chunk_relocs_idx = i;
204		}
205		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
206			p->chunk_ib_idx = i;
207			/* zero length IB isn't useful */
208			if (p->chunks[i].length_dw == 0)
209				return -EINVAL;
210		}
211		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
212			p->chunk_const_ib_idx = i;
213			/* zero length CONST IB isn't useful */
214			if (p->chunks[i].length_dw == 0)
215				return -EINVAL;
216		}
217		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
218			p->chunk_flags_idx = i;
219			/* zero length flags aren't useful */
220			if (p->chunks[i].length_dw == 0)
221				return -EINVAL;
222		}
223
224		p->chunks[i].length_dw = user_chunk.length_dw;
225		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
226
227		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
228		if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
229		    (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
230			size = p->chunks[i].length_dw * sizeof(uint32_t);
231			p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
232			if (p->chunks[i].kdata == NULL) {
233				return -ENOMEM;
234			}
235			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
236					       p->chunks[i].user_ptr, size)) {
237				return -EFAULT;
238			}
239			if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
240				p->cs_flags = p->chunks[i].kdata[0];
241				if (p->chunks[i].length_dw > 1)
242					ring = p->chunks[i].kdata[1];
243				if (p->chunks[i].length_dw > 2)
244					priority = (s32)p->chunks[i].kdata[2];
245			}
246		}
247	}
248
249	/* these are KMS only */
250	if (p->rdev) {
251		if ((p->cs_flags & RADEON_CS_USE_VM) &&
252		    !p->rdev->vm_manager.enabled) {
253			DRM_ERROR("VM not active on asic!\n");
254			return -EINVAL;
255		}
256
257		/* we only support VM on SI+ */
258		if ((p->rdev->family >= CHIP_TAHITI) &&
259		    ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
260			DRM_ERROR("VM required on SI+!\n");
261			return -EINVAL;
262		}
263
264		if (radeon_cs_get_ring(p, ring, priority))
265			return -EINVAL;
266	}
267
268	/* deal with non-vm */
269	if ((p->chunk_ib_idx != -1) &&
270	    ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
271	    (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
272		if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
273			DRM_ERROR("cs IB too big: %d\n",
274				  p->chunks[p->chunk_ib_idx].length_dw);
275			return -EINVAL;
276		}
277		if ((p->rdev->flags & RADEON_IS_AGP)) {
278			p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
279			p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
280			if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
281			    p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
282				kfree(p->chunks[i].kpage[0]);
283				kfree(p->chunks[i].kpage[1]);
284				return -ENOMEM;
285			}
 
 
 
 
286		}
287		p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
288		p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
289		p->chunks[p->chunk_ib_idx].last_copied_page = -1;
290		p->chunks[p->chunk_ib_idx].last_page_index =
291			((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
292	}
293
 
 
 
 
294	return 0;
295}
296
297static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
298				  struct radeon_fence *fence)
299{
300	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
301	struct radeon_vm *vm = &fpriv->vm;
302	struct radeon_bo_list *lobj;
303
304	if (parser->chunk_ib_idx == -1)
305		return;
306	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
307		return;
308
309	list_for_each_entry(lobj, &parser->validated, tv.head) {
310		struct radeon_bo_va *bo_va;
311		struct radeon_bo *rbo = lobj->bo;
312
313		bo_va = radeon_bo_va(rbo, vm);
314		radeon_fence_unref(&bo_va->fence);
315		bo_va->fence = radeon_fence_ref(fence);
316	}
317}
318
319/**
320 * cs_parser_fini() - clean parser states
321 * @parser:	parser structure holding parsing context.
322 * @error:	error number
323 *
324 * If error is set than unvalidate buffer, otherwise just free memory
325 * used by parsing context.
326 **/
327static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
328{
329	unsigned i;
330
331	if (!error) {
332		/* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
333		radeon_bo_vm_fence_va(parser, parser->ib.fence);
334		ttm_eu_fence_buffer_objects(&parser->validated,
335					    parser->ib.fence);
336	} else {
337		ttm_eu_backoff_reservation(&parser->validated);
338	}
339
340	if (parser->relocs != NULL) {
341		for (i = 0; i < parser->nrelocs; i++) {
342			if (parser->relocs[i].gobj)
343				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
344		}
345	}
346	kfree(parser->track);
347	kfree(parser->relocs);
348	kfree(parser->relocs_ptr);
349	for (i = 0; i < parser->nchunks; i++) {
350		kfree(parser->chunks[i].kdata);
351		if ((parser->rdev->flags & RADEON_IS_AGP)) {
352			kfree(parser->chunks[i].kpage[0]);
353			kfree(parser->chunks[i].kpage[1]);
354		}
355	}
356	kfree(parser->chunks);
357	kfree(parser->chunks_array);
358	radeon_ib_free(parser->rdev, &parser->ib);
359	radeon_ib_free(parser->rdev, &parser->const_ib);
360}
361
362static int radeon_cs_ib_chunk(struct radeon_device *rdev,
363			      struct radeon_cs_parser *parser)
364{
365	struct radeon_cs_chunk *ib_chunk;
366	int r;
367
368	if (parser->chunk_ib_idx == -1)
369		return 0;
370
371	if (parser->cs_flags & RADEON_CS_USE_VM)
372		return 0;
373
374	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
375	/* Copy the packet into the IB, the parser will read from the
376	 * input memory (cached) and write to the IB (which can be
377	 * uncached).
378	 */
379	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
380			   ib_chunk->length_dw * 4);
381	if (r) {
382		DRM_ERROR("Failed to get ib !\n");
383		return r;
384	}
385	parser->ib.length_dw = ib_chunk->length_dw;
386	r = radeon_cs_parse(rdev, parser->ring, parser);
387	if (r || parser->parser_error) {
388		DRM_ERROR("Invalid command stream !\n");
389		return r;
390	}
391	r = radeon_cs_finish_pages(parser);
392	if (r) {
393		DRM_ERROR("Invalid command stream !\n");
394		return r;
395	}
396	r = radeon_cs_sync_rings(parser);
397	if (r) {
398		DRM_ERROR("Failed to synchronize rings !\n");
399	}
400	parser->ib.vm_id = 0;
401	r = radeon_ib_schedule(rdev, &parser->ib);
402	if (r) {
403		DRM_ERROR("Failed to schedule IB !\n");
404	}
405	return r;
406}
407
408static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
409				   struct radeon_vm *vm)
410{
411	struct radeon_bo_list *lobj;
412	struct radeon_bo *bo;
413	int r;
414
415	list_for_each_entry(lobj, &parser->validated, tv.head) {
416		bo = lobj->bo;
417		r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
418		if (r) {
419			return r;
420		}
421	}
422	return 0;
423}
424
425static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
426				 struct radeon_cs_parser *parser)
427{
428	struct radeon_cs_chunk *ib_chunk;
429	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
430	struct radeon_vm *vm = &fpriv->vm;
431	int r;
432
433	if (parser->chunk_ib_idx == -1)
434		return 0;
435	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
436		return 0;
437
438	if ((rdev->family >= CHIP_TAHITI) &&
439	    (parser->chunk_const_ib_idx != -1)) {
440		ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
441		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
442			DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
443			return -EINVAL;
444		}
445		r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
446				   ib_chunk->length_dw * 4);
447		if (r) {
448			DRM_ERROR("Failed to get const ib !\n");
449			return r;
450		}
451		parser->const_ib.is_const_ib = true;
452		parser->const_ib.length_dw = ib_chunk->length_dw;
453		/* Copy the packet into the IB */
454		if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
455				       ib_chunk->length_dw * 4)) {
456			return -EFAULT;
457		}
458		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
459		if (r) {
460			return r;
461		}
462	}
463
464	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
465	if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
466		DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
467		return -EINVAL;
468	}
469	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
470			   ib_chunk->length_dw * 4);
471	if (r) {
472		DRM_ERROR("Failed to get ib !\n");
473		return r;
474	}
475	parser->ib.length_dw = ib_chunk->length_dw;
476	/* Copy the packet into the IB */
477	if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
478			       ib_chunk->length_dw * 4)) {
479		return -EFAULT;
480	}
481	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
482	if (r) {
483		return r;
484	}
485
486	mutex_lock(&vm->mutex);
487	r = radeon_vm_bind(rdev, vm);
488	if (r) {
489		goto out;
490	}
491	r = radeon_bo_vm_update_pte(parser, vm);
492	if (r) {
493		goto out;
494	}
495	r = radeon_cs_sync_rings(parser);
496	if (r) {
497		DRM_ERROR("Failed to synchronize rings !\n");
498	}
499
500	if ((rdev->family >= CHIP_TAHITI) &&
501	    (parser->chunk_const_ib_idx != -1)) {
502		parser->const_ib.vm_id = vm->id;
503		/* ib pool is bind at 0 in virtual address space to gpu_addr is the
504		 * offset inside the pool bo
505		 */
506		parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
507		r = radeon_ib_schedule(rdev, &parser->const_ib);
508		if (r)
509			goto out;
510	}
511
512	parser->ib.vm_id = vm->id;
513	/* ib pool is bind at 0 in virtual address space to gpu_addr is the
514	 * offset inside the pool bo
515	 */
516	parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
517	parser->ib.is_const_ib = false;
518	r = radeon_ib_schedule(rdev, &parser->ib);
519out:
520	if (!r) {
521		if (vm->fence) {
522			radeon_fence_unref(&vm->fence);
523		}
524		vm->fence = radeon_fence_ref(parser->ib.fence);
525	}
526	mutex_unlock(&fpriv->vm.mutex);
527	return r;
528}
529
530static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
531{
532	if (r == -EDEADLK) {
533		r = radeon_gpu_reset(rdev);
534		if (!r)
535			r = -EAGAIN;
536	}
537	return r;
538}
539
540int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
541{
542	struct radeon_device *rdev = dev->dev_private;
543	struct radeon_cs_parser parser;
 
544	int r;
545
546	radeon_mutex_lock(&rdev->cs_mutex);
547	if (!rdev->accel_working) {
548		radeon_mutex_unlock(&rdev->cs_mutex);
549		return -EBUSY;
550	}
551	/* initialize parser */
552	memset(&parser, 0, sizeof(struct radeon_cs_parser));
553	parser.filp = filp;
554	parser.rdev = rdev;
555	parser.dev = rdev->dev;
556	parser.family = rdev->family;
557	r = radeon_cs_parser_init(&parser, data);
558	if (r) {
559		DRM_ERROR("Failed to initialize parser !\n");
560		radeon_cs_parser_fini(&parser, r);
561		r = radeon_cs_handle_lockup(rdev, r);
562		radeon_mutex_unlock(&rdev->cs_mutex);
 
 
 
 
 
 
563		return r;
564	}
565	r = radeon_cs_parser_relocs(&parser);
566	if (r) {
567		if (r != -ERESTARTSYS)
568			DRM_ERROR("Failed to parse relocation %d!\n", r);
569		radeon_cs_parser_fini(&parser, r);
570		r = radeon_cs_handle_lockup(rdev, r);
571		radeon_mutex_unlock(&rdev->cs_mutex);
 
 
 
 
 
 
 
 
 
 
 
572		return r;
573	}
574	r = radeon_cs_ib_chunk(rdev, &parser);
575	if (r) {
576		goto out;
 
 
 
577	}
578	r = radeon_cs_ib_vm_chunk(rdev, &parser);
579	if (r) {
580		goto out;
581	}
582out:
583	radeon_cs_parser_fini(&parser, r);
584	r = radeon_cs_handle_lockup(rdev, r);
585	radeon_mutex_unlock(&rdev->cs_mutex);
586	return r;
587}
588
589int radeon_cs_finish_pages(struct radeon_cs_parser *p)
590{
591	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
592	int i;
593	int size = PAGE_SIZE;
594
595	for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
596		if (i == ibc->last_page_index) {
597			size = (ibc->length_dw * 4) % PAGE_SIZE;
598			if (size == 0)
599				size = PAGE_SIZE;
600		}
601		
602		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
603				       ibc->user_ptr + (i * PAGE_SIZE),
604				       size))
605			return -EFAULT;
606	}
607	return 0;
608}
609
610static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
611{
612	int new_page;
613	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
614	int i;
615	int size = PAGE_SIZE;
616	bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
617
618	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
619		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
620				       ibc->user_ptr + (i * PAGE_SIZE),
621				       PAGE_SIZE)) {
622			p->parser_error = -EFAULT;
623			return 0;
624		}
625	}
626
 
 
627	if (pg_idx == ibc->last_page_index) {
628		size = (ibc->length_dw * 4) % PAGE_SIZE;
629		if (size == 0)
630			size = PAGE_SIZE;
631	}
632
633	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
634	if (copy1)
635		ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
636
637	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
638			       ibc->user_ptr + (pg_idx * PAGE_SIZE),
639			       size)) {
640		p->parser_error = -EFAULT;
641		return 0;
642	}
643
644	/* copy to IB for non single case */
645	if (!copy1)
646		memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
647
648	ibc->last_copied_page = pg_idx;
649	ibc->kpage_idx[new_page] = pg_idx;
650
651	return new_page;
652}
653
654u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
655{
656	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
657	u32 pg_idx, pg_offset;
658	u32 idx_value = 0;
659	int new_page;
660
661	pg_idx = (idx * 4) / PAGE_SIZE;
662	pg_offset = (idx * 4) % PAGE_SIZE;
663
664	if (ibc->kpage_idx[0] == pg_idx)
665		return ibc->kpage[0][pg_offset/4];
666	if (ibc->kpage_idx[1] == pg_idx)
667		return ibc->kpage[1][pg_offset/4];
668
669	new_page = radeon_cs_update_pages(p, pg_idx);
670	if (new_page < 0) {
671		p->parser_error = new_page;
672		return 0;
673	}
674
675	idx_value = ibc->kpage[new_page][pg_offset/4];
676	return idx_value;
677}