Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.13.7
  1/*
  2 * Copyright 2008 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 22 * DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors:
 25 *    Jerome Glisse <glisse@freedesktop.org>
 26 */
 27
 28#include <linux/list_sort.h>
 29#include <linux/pci.h>
 30#include <linux/uaccess.h>
 31
 32#include <drm/drm_device.h>
 33#include <drm/drm_file.h>
 34#include <drm/radeon_drm.h>
 35
 36#include "radeon.h"
 37#include "radeon_reg.h"
 38#include "radeon_trace.h"
 39
 40#define RADEON_CS_MAX_PRIORITY		32u
 41#define RADEON_CS_NUM_BUCKETS		(RADEON_CS_MAX_PRIORITY + 1)
 42
 43/* This is based on the bucket sort with O(n) time complexity.
 44 * An item with priority "i" is added to bucket[i]. The lists are then
 45 * concatenated in descending order.
 46 */
 47struct radeon_cs_buckets {
 48	struct list_head bucket[RADEON_CS_NUM_BUCKETS];
 49};
 50
 51static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
 52{
 53	unsigned i;
 54
 55	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
 56		INIT_LIST_HEAD(&b->bucket[i]);
 57}
 58
 59static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
 60				  struct list_head *item, unsigned priority)
 61{
 62	/* Since buffers which appear sooner in the relocation list are
 63	 * likely to be used more often than buffers which appear later
 64	 * in the list, the sort mustn't change the ordering of buffers
 65	 * with the same priority, i.e. it must be stable.
 66	 */
 67	list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
 68}
 69
 70static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
 71				       struct list_head *out_list)
 72{
 73	unsigned i;
 74
 75	/* Connect the sorted buckets in the output list. */
 76	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
 77		list_splice(&b->bucket[i], out_list);
 78	}
 79}
 80
 81static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 82{
 83	struct radeon_cs_chunk *chunk;
 84	struct radeon_cs_buckets buckets;
 85	unsigned i;
 86	bool need_mmap_lock = false;
 87	int r;
 88
 89	if (p->chunk_relocs == NULL) {
 90		return 0;
 91	}
 92	chunk = p->chunk_relocs;
 93	p->dma_reloc_idx = 0;
 94	/* FIXME: we assume that each relocs use 4 dwords */
 95	p->nrelocs = chunk->length_dw / 4;
 96	p->relocs = kvcalloc(p->nrelocs, sizeof(struct radeon_bo_list),
 97			GFP_KERNEL);
 98	if (p->relocs == NULL) {
 99		return -ENOMEM;
100	}
101
102	radeon_cs_buckets_init(&buckets);
103
104	for (i = 0; i < p->nrelocs; i++) {
105		struct drm_radeon_cs_reloc *r;
106		struct drm_gem_object *gobj;
107		unsigned priority;
108
109		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
110		gobj = drm_gem_object_lookup(p->filp, r->handle);
111		if (gobj == NULL) {
112			DRM_ERROR("gem object lookup failed 0x%x\n",
113				  r->handle);
114			return -ENOENT;
115		}
116		p->relocs[i].robj = gem_to_radeon_bo(gobj);
117
118		/* The userspace buffer priorities are from 0 to 15. A higher
119		 * number means the buffer is more important.
120		 * Also, the buffers used for write have a higher priority than
121		 * the buffers used for read only, which doubles the range
122		 * to 0 to 31. 32 is reserved for the kernel driver.
123		 */
124		priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
125			   + !!r->write_domain;
126
127		/* The first reloc of an UVD job is the msg and that must be in
128		 * VRAM, the second reloc is the DPB and for WMV that must be in
129		 * VRAM as well. Also put everything into VRAM on AGP cards and older
130		 * IGP chips to avoid image corruptions
131		 */
132		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
133		    (i <= 0 || pci_find_capability(p->rdev->pdev, PCI_CAP_ID_AGP) ||
134		     p->rdev->family == CHIP_RS780 ||
135		     p->rdev->family == CHIP_RS880)) {
136
137			/* TODO: is this still needed for NI+ ? */
138			p->relocs[i].preferred_domains =
139				RADEON_GEM_DOMAIN_VRAM;
140
141			p->relocs[i].allowed_domains =
142				RADEON_GEM_DOMAIN_VRAM;
143
144			/* prioritize this over any other relocation */
145			priority = RADEON_CS_MAX_PRIORITY;
146		} else {
147			uint32_t domain = r->write_domain ?
148				r->write_domain : r->read_domains;
149
150			if (domain & RADEON_GEM_DOMAIN_CPU) {
151				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
152					  "for command submission\n");
153				return -EINVAL;
154			}
155
156			p->relocs[i].preferred_domains = domain;
157			if (domain == RADEON_GEM_DOMAIN_VRAM)
158				domain |= RADEON_GEM_DOMAIN_GTT;
159			p->relocs[i].allowed_domains = domain;
160		}
161
162		if (radeon_ttm_tt_has_userptr(p->rdev, p->relocs[i].robj->tbo.ttm)) {
163			uint32_t domain = p->relocs[i].preferred_domains;
164			if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
165				DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
166					  "allowed for userptr BOs\n");
167				return -EINVAL;
168			}
169			need_mmap_lock = true;
170			domain = RADEON_GEM_DOMAIN_GTT;
171			p->relocs[i].preferred_domains = domain;
172			p->relocs[i].allowed_domains = domain;
173		}
174
175		/* Objects shared as dma-bufs cannot be moved to VRAM */
176		if (p->relocs[i].robj->prime_shared_count) {
177			p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
178			if (!p->relocs[i].allowed_domains) {
179				DRM_ERROR("BO associated with dma-buf cannot "
180					  "be moved to VRAM\n");
181				return -EINVAL;
182			}
183		}
184
185		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186		p->relocs[i].tv.num_shared = !r->write_domain;
187
188		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
189				      priority);
190	}
191
192	radeon_cs_buckets_get_list(&buckets, &p->validated);
193
194	if (p->cs_flags & RADEON_CS_USE_VM)
195		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
196					      &p->validated);
197	if (need_mmap_lock)
198		mmap_read_lock(current->mm);
199
200	r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
201
202	if (need_mmap_lock)
203		mmap_read_unlock(current->mm);
204
205	return r;
206}
207
208static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
209{
210	p->priority = priority;
211
212	switch (ring) {
213	default:
214		DRM_ERROR("unknown ring id: %d\n", ring);
215		return -EINVAL;
216	case RADEON_CS_RING_GFX:
217		p->ring = RADEON_RING_TYPE_GFX_INDEX;
218		break;
219	case RADEON_CS_RING_COMPUTE:
220		if (p->rdev->family >= CHIP_TAHITI) {
221			if (p->priority > 0)
222				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
223			else
224				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
225		} else
226			p->ring = RADEON_RING_TYPE_GFX_INDEX;
227		break;
228	case RADEON_CS_RING_DMA:
229		if (p->rdev->family >= CHIP_CAYMAN) {
230			if (p->priority > 0)
231				p->ring = R600_RING_TYPE_DMA_INDEX;
232			else
233				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
234		} else if (p->rdev->family >= CHIP_RV770) {
235			p->ring = R600_RING_TYPE_DMA_INDEX;
236		} else {
237			return -EINVAL;
238		}
239		break;
240	case RADEON_CS_RING_UVD:
241		p->ring = R600_RING_TYPE_UVD_INDEX;
242		break;
243	case RADEON_CS_RING_VCE:
244		/* TODO: only use the low priority ring for now */
245		p->ring = TN_RING_TYPE_VCE1_INDEX;
246		break;
247	}
248	return 0;
249}
250
251static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
252{
253	struct radeon_bo_list *reloc;
254	int r;
255
256	list_for_each_entry(reloc, &p->validated, tv.head) {
257		struct dma_resv *resv;
258
259		resv = reloc->robj->tbo.base.resv;
260		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
261				     reloc->tv.num_shared);
262		if (r)
263			return r;
264	}
265	return 0;
266}
267
268/* XXX: note that this is called from the legacy UMS CS ioctl as well */
269int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
270{
271	struct drm_radeon_cs *cs = data;
272	uint64_t *chunk_array_ptr;
273	u64 size;
274	unsigned i;
275	u32 ring = RADEON_CS_RING_GFX;
276	s32 priority = 0;
277
278	INIT_LIST_HEAD(&p->validated);
279
280	if (!cs->num_chunks) {
281		return 0;
282	}
283
284	/* get chunks */
285	p->idx = 0;
286	p->ib.sa_bo = NULL;
287	p->const_ib.sa_bo = NULL;
288	p->chunk_ib = NULL;
289	p->chunk_relocs = NULL;
290	p->chunk_flags = NULL;
291	p->chunk_const_ib = NULL;
292	p->chunks_array = kvmalloc_array(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
293	if (p->chunks_array == NULL) {
294		return -ENOMEM;
295	}
296	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
297	if (copy_from_user(p->chunks_array, chunk_array_ptr,
298			       sizeof(uint64_t)*cs->num_chunks)) {
299		return -EFAULT;
300	}
301	p->cs_flags = 0;
302	p->nchunks = cs->num_chunks;
303	p->chunks = kvcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
304	if (p->chunks == NULL) {
305		return -ENOMEM;
306	}
307	for (i = 0; i < p->nchunks; i++) {
308		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
309		struct drm_radeon_cs_chunk user_chunk;
310		uint32_t __user *cdata;
311
312		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
313		if (copy_from_user(&user_chunk, chunk_ptr,
314				       sizeof(struct drm_radeon_cs_chunk))) {
315			return -EFAULT;
316		}
317		p->chunks[i].length_dw = user_chunk.length_dw;
318		if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
319			p->chunk_relocs = &p->chunks[i];
320		}
321		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
322			p->chunk_ib = &p->chunks[i];
323			/* zero length IB isn't useful */
324			if (p->chunks[i].length_dw == 0)
325				return -EINVAL;
326		}
327		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
328			p->chunk_const_ib = &p->chunks[i];
329			/* zero length CONST IB isn't useful */
330			if (p->chunks[i].length_dw == 0)
331				return -EINVAL;
332		}
333		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
334			p->chunk_flags = &p->chunks[i];
335			/* zero length flags aren't useful */
336			if (p->chunks[i].length_dw == 0)
337				return -EINVAL;
338		}
339
340		size = p->chunks[i].length_dw;
341		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
342		p->chunks[i].user_ptr = cdata;
343		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
344			continue;
345
346		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
347			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
348				continue;
349		}
350
351		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
352		size *= sizeof(uint32_t);
353		if (p->chunks[i].kdata == NULL) {
354			return -ENOMEM;
355		}
356		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
357			return -EFAULT;
358		}
359		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
360			p->cs_flags = p->chunks[i].kdata[0];
361			if (p->chunks[i].length_dw > 1)
362				ring = p->chunks[i].kdata[1];
363			if (p->chunks[i].length_dw > 2)
364				priority = (s32)p->chunks[i].kdata[2];
365		}
366	}
367
368	/* these are KMS only */
369	if (p->rdev) {
370		if ((p->cs_flags & RADEON_CS_USE_VM) &&
371		    !p->rdev->vm_manager.enabled) {
372			DRM_ERROR("VM not active on asic!\n");
373			return -EINVAL;
374		}
375
376		if (radeon_cs_get_ring(p, ring, priority))
377			return -EINVAL;
378
379		/* we only support VM on some SI+ rings */
380		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
381			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
382				DRM_ERROR("Ring %d requires VM!\n", p->ring);
383				return -EINVAL;
384			}
385		} else {
386			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
387				DRM_ERROR("VM not supported on ring %d!\n",
388					  p->ring);
389				return -EINVAL;
390			}
391		}
392	}
393
394	return 0;
395}
396
397static int cmp_size_smaller_first(void *priv, const struct list_head *a,
398				  const struct list_head *b)
399{
400	struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
401	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
402
403	/* Sort A before B if A is smaller. */
404	if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
405		return 1;
406	if (la->robj->tbo.base.size < lb->robj->tbo.base.size)
407		return -1;
408	return 0;
409}
410
411/**
412 * radeon_cs_parser_fini() - clean parser states
413 * @parser:	parser structure holding parsing context.
414 * @error:	error number
415 * @backoff:	indicator to backoff the reservation
416 *
417 * If error is set than unvalidate buffer, otherwise just free memory
418 * used by parsing context.
419 **/
420static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
421{
422	unsigned i;
423
424	if (!error) {
425		/* Sort the buffer list from the smallest to largest buffer,
426		 * which affects the order of buffers in the LRU list.
427		 * This assures that the smallest buffers are added first
428		 * to the LRU list, so they are likely to be later evicted
429		 * first, instead of large buffers whose eviction is more
430		 * expensive.
431		 *
432		 * This slightly lowers the number of bytes moved by TTM
433		 * per frame under memory pressure.
434		 */
435		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
436
437		ttm_eu_fence_buffer_objects(&parser->ticket,
438					    &parser->validated,
439					    &parser->ib.fence->base);
440	} else if (backoff) {
441		ttm_eu_backoff_reservation(&parser->ticket,
442					   &parser->validated);
443	}
444
445	if (parser->relocs != NULL) {
446		for (i = 0; i < parser->nrelocs; i++) {
447			struct radeon_bo *bo = parser->relocs[i].robj;
448			if (bo == NULL)
449				continue;
450
451			drm_gem_object_put(&bo->tbo.base);
452		}
453	}
454	kfree(parser->track);
455	kvfree(parser->relocs);
456	kvfree(parser->vm_bos);
457	for (i = 0; i < parser->nchunks; i++)
458		kvfree(parser->chunks[i].kdata);
459	kvfree(parser->chunks);
460	kvfree(parser->chunks_array);
461	radeon_ib_free(parser->rdev, &parser->ib);
462	radeon_ib_free(parser->rdev, &parser->const_ib);
463}
464
465static int radeon_cs_ib_chunk(struct radeon_device *rdev,
466			      struct radeon_cs_parser *parser)
467{
468	int r;
469
470	if (parser->chunk_ib == NULL)
471		return 0;
472
473	if (parser->cs_flags & RADEON_CS_USE_VM)
474		return 0;
475
476	r = radeon_cs_parse(rdev, parser->ring, parser);
477	if (r || parser->parser_error) {
478		DRM_ERROR("Invalid command stream !\n");
479		return r;
480	}
481
482	r = radeon_cs_sync_rings(parser);
483	if (r) {
484		if (r != -ERESTARTSYS)
485			DRM_ERROR("Failed to sync rings: %i\n", r);
486		return r;
487	}
488
489	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
490		radeon_uvd_note_usage(rdev);
491	else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
492		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
493		radeon_vce_note_usage(rdev);
494
495	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
496	if (r) {
497		DRM_ERROR("Failed to schedule IB !\n");
498	}
499	return r;
500}
501
502static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
503				   struct radeon_vm *vm)
504{
505	struct radeon_device *rdev = p->rdev;
506	struct radeon_bo_va *bo_va;
507	int i, r;
508
509	r = radeon_vm_update_page_directory(rdev, vm);
510	if (r)
511		return r;
512
513	r = radeon_vm_clear_freed(rdev, vm);
514	if (r)
515		return r;
516
517	if (vm->ib_bo_va == NULL) {
518		DRM_ERROR("Tmp BO not in VM!\n");
519		return -EINVAL;
520	}
521
522	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
523				rdev->ring_tmp_bo.bo->tbo.resource);
524	if (r)
525		return r;
526
527	for (i = 0; i < p->nrelocs; i++) {
528		struct radeon_bo *bo;
529
530		bo = p->relocs[i].robj;
531		bo_va = radeon_vm_bo_find(vm, bo);
532		if (bo_va == NULL) {
533			dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
534			return -EINVAL;
535		}
536
537		r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource);
538		if (r)
539			return r;
540
541		radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
542
543		r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
544		if (r)
545			return r;
546	}
547
548	return radeon_vm_clear_invalids(rdev, vm);
549}
550
551static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
552				 struct radeon_cs_parser *parser)
553{
554	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
555	struct radeon_vm *vm = &fpriv->vm;
556	int r;
557
558	if (parser->chunk_ib == NULL)
559		return 0;
560	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
561		return 0;
562
563	if (parser->const_ib.length_dw) {
564		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
565		if (r) {
566			return r;
567		}
568	}
569
570	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
571	if (r) {
572		return r;
573	}
574
575	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
576		radeon_uvd_note_usage(rdev);
577
578	mutex_lock(&vm->mutex);
579	r = radeon_bo_vm_update_pte(parser, vm);
580	if (r) {
581		goto out;
582	}
583
584	r = radeon_cs_sync_rings(parser);
585	if (r) {
586		if (r != -ERESTARTSYS)
587			DRM_ERROR("Failed to sync rings: %i\n", r);
588		goto out;
589	}
590
591	if ((rdev->family >= CHIP_TAHITI) &&
592	    (parser->chunk_const_ib != NULL)) {
593		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
594	} else {
595		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
596	}
597
598out:
599	mutex_unlock(&vm->mutex);
600	return r;
601}
602
603static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
604{
605	if (r == -EDEADLK) {
606		r = radeon_gpu_reset(rdev);
607		if (!r)
608			r = -EAGAIN;
609	}
610	return r;
611}
612
613static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
614{
615	struct radeon_cs_chunk *ib_chunk;
616	struct radeon_vm *vm = NULL;
617	int r;
618
619	if (parser->chunk_ib == NULL)
620		return 0;
621
622	if (parser->cs_flags & RADEON_CS_USE_VM) {
623		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
624		vm = &fpriv->vm;
625
626		if ((rdev->family >= CHIP_TAHITI) &&
627		    (parser->chunk_const_ib != NULL)) {
628			ib_chunk = parser->chunk_const_ib;
629			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
630				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
631				return -EINVAL;
632			}
633			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
634					   vm, ib_chunk->length_dw * 4);
635			if (r) {
636				DRM_ERROR("Failed to get const ib !\n");
637				return r;
638			}
639			parser->const_ib.is_const_ib = true;
640			parser->const_ib.length_dw = ib_chunk->length_dw;
641			if (copy_from_user(parser->const_ib.ptr,
642					       ib_chunk->user_ptr,
643					       ib_chunk->length_dw * 4))
644				return -EFAULT;
645		}
646
647		ib_chunk = parser->chunk_ib;
648		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
649			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
650			return -EINVAL;
651		}
652	}
653	ib_chunk = parser->chunk_ib;
654
655	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
656			   vm, ib_chunk->length_dw * 4);
657	if (r) {
658		DRM_ERROR("Failed to get ib !\n");
659		return r;
660	}
661	parser->ib.length_dw = ib_chunk->length_dw;
662	if (ib_chunk->kdata)
663		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
664	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
665		return -EFAULT;
666	return 0;
667}
668
669int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
670{
671	struct radeon_device *rdev = dev->dev_private;
672	struct radeon_cs_parser parser;
673	int r;
674
675	down_read(&rdev->exclusive_lock);
676	if (!rdev->accel_working) {
677		up_read(&rdev->exclusive_lock);
678		return -EBUSY;
679	}
680	if (rdev->in_reset) {
681		up_read(&rdev->exclusive_lock);
682		r = radeon_gpu_reset(rdev);
683		if (!r)
684			r = -EAGAIN;
685		return r;
686	}
687	/* initialize parser */
688	memset(&parser, 0, sizeof(struct radeon_cs_parser));
689	parser.filp = filp;
690	parser.rdev = rdev;
691	parser.dev = rdev->dev;
692	parser.family = rdev->family;
693	r = radeon_cs_parser_init(&parser, data);
694	if (r) {
695		DRM_ERROR("Failed to initialize parser !\n");
696		radeon_cs_parser_fini(&parser, r, false);
697		up_read(&rdev->exclusive_lock);
698		r = radeon_cs_handle_lockup(rdev, r);
699		return r;
700	}
701
702	r = radeon_cs_ib_fill(rdev, &parser);
703	if (!r) {
704		r = radeon_cs_parser_relocs(&parser);
705		if (r && r != -ERESTARTSYS)
706			DRM_ERROR("Failed to parse relocation %d!\n", r);
707	}
708
709	if (r) {
710		radeon_cs_parser_fini(&parser, r, false);
711		up_read(&rdev->exclusive_lock);
712		r = radeon_cs_handle_lockup(rdev, r);
713		return r;
714	}
715
716	trace_radeon_cs(&parser);
717
718	r = radeon_cs_ib_chunk(rdev, &parser);
719	if (r) {
720		goto out;
721	}
722	r = radeon_cs_ib_vm_chunk(rdev, &parser);
723	if (r) {
724		goto out;
725	}
726out:
727	radeon_cs_parser_fini(&parser, r, true);
728	up_read(&rdev->exclusive_lock);
729	r = radeon_cs_handle_lockup(rdev, r);
730	return r;
731}
732
733/**
734 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
735 * @p:		parser structure holding parsing context.
736 * @pkt:	where to store packet information
737 * @idx:	packet index
738 *
739 * Assume that chunk_ib_index is properly set. Will return -EINVAL
740 * if packet is bigger than remaining ib size. or if packets is unknown.
741 **/
742int radeon_cs_packet_parse(struct radeon_cs_parser *p,
743			   struct radeon_cs_packet *pkt,
744			   unsigned idx)
745{
746	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
747	struct radeon_device *rdev = p->rdev;
748	uint32_t header;
749	int ret = 0, i;
750
751	if (idx >= ib_chunk->length_dw) {
752		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
753			  idx, ib_chunk->length_dw);
754		return -EINVAL;
755	}
756	header = radeon_get_ib_value(p, idx);
757	pkt->idx = idx;
758	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
759	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
760	pkt->one_reg_wr = 0;
761	switch (pkt->type) {
762	case RADEON_PACKET_TYPE0:
763		if (rdev->family < CHIP_R600) {
764			pkt->reg = R100_CP_PACKET0_GET_REG(header);
765			pkt->one_reg_wr =
766				RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
767		} else
768			pkt->reg = R600_CP_PACKET0_GET_REG(header);
769		break;
770	case RADEON_PACKET_TYPE3:
771		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
772		break;
773	case RADEON_PACKET_TYPE2:
774		pkt->count = -1;
775		break;
776	default:
777		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
778		ret = -EINVAL;
779		goto dump_ib;
780	}
781	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
782		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
783			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
784		ret = -EINVAL;
785		goto dump_ib;
786	}
787	return 0;
788
789dump_ib:
790	for (i = 0; i < ib_chunk->length_dw; i++) {
791		if (i == idx)
792			printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
793		else
794			printk("\t0x%08x\n", radeon_get_ib_value(p, i));
795	}
796	return ret;
797}
798
799/**
800 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
801 * @p:		structure holding the parser context.
802 *
803 * Check if the next packet is NOP relocation packet3.
804 **/
805bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
806{
807	struct radeon_cs_packet p3reloc;
808	int r;
809
810	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
811	if (r)
812		return false;
813	if (p3reloc.type != RADEON_PACKET_TYPE3)
814		return false;
815	if (p3reloc.opcode != RADEON_PACKET3_NOP)
816		return false;
817	return true;
818}
819
820/**
821 * radeon_cs_dump_packet() - dump raw packet context
822 * @p:		structure holding the parser context.
823 * @pkt:	structure holding the packet.
824 *
825 * Used mostly for debugging and error reporting.
826 **/
827void radeon_cs_dump_packet(struct radeon_cs_parser *p,
828			   struct radeon_cs_packet *pkt)
829{
830	volatile uint32_t *ib;
831	unsigned i;
832	unsigned idx;
833
834	ib = p->ib.ptr;
835	idx = pkt->idx;
836	for (i = 0; i <= (pkt->count + 1); i++, idx++)
837		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
838}
839
840/**
841 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
842 * @p:			parser structure holding parsing context.
843 * @cs_reloc:		reloc informations
844 * @nomm:		no memory management for debugging
845 *
846 * Check if next packet is relocation packet3, do bo validation and compute
847 * GPU offset using the provided start.
848 **/
849int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
850				struct radeon_bo_list **cs_reloc,
851				int nomm)
852{
853	struct radeon_cs_chunk *relocs_chunk;
854	struct radeon_cs_packet p3reloc;
855	unsigned idx;
856	int r;
857
858	if (p->chunk_relocs == NULL) {
859		DRM_ERROR("No relocation chunk !\n");
860		return -EINVAL;
861	}
862	*cs_reloc = NULL;
863	relocs_chunk = p->chunk_relocs;
864	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
865	if (r)
866		return r;
867	p->idx += p3reloc.count + 2;
868	if (p3reloc.type != RADEON_PACKET_TYPE3 ||
869	    p3reloc.opcode != RADEON_PACKET3_NOP) {
870		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
871			  p3reloc.idx);
872		radeon_cs_dump_packet(p, &p3reloc);
873		return -EINVAL;
874	}
875	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
876	if (idx >= relocs_chunk->length_dw) {
877		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
878			  idx, relocs_chunk->length_dw);
879		radeon_cs_dump_packet(p, &p3reloc);
880		return -EINVAL;
881	}
882	/* FIXME: we assume reloc size is 4 dwords */
883	if (nomm) {
884		*cs_reloc = p->relocs;
885		(*cs_reloc)->gpu_offset =
886			(u64)relocs_chunk->kdata[idx + 3] << 32;
887		(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
888	} else
889		*cs_reloc = &p->relocs[(idx / 4)];
890	return 0;
891}
v6.2
  1/*
  2 * Copyright 2008 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 22 * DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors:
 25 *    Jerome Glisse <glisse@freedesktop.org>
 26 */
 27
 28#include <linux/list_sort.h>
 29#include <linux/pci.h>
 30#include <linux/uaccess.h>
 31
 32#include <drm/drm_device.h>
 33#include <drm/drm_file.h>
 34#include <drm/radeon_drm.h>
 35
 36#include "radeon.h"
 37#include "radeon_reg.h"
 38#include "radeon_trace.h"
 39
 40#define RADEON_CS_MAX_PRIORITY		32u
 41#define RADEON_CS_NUM_BUCKETS		(RADEON_CS_MAX_PRIORITY + 1)
 42
 43/* This is based on the bucket sort with O(n) time complexity.
 44 * An item with priority "i" is added to bucket[i]. The lists are then
 45 * concatenated in descending order.
 46 */
 47struct radeon_cs_buckets {
 48	struct list_head bucket[RADEON_CS_NUM_BUCKETS];
 49};
 50
 51static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
 52{
 53	unsigned i;
 54
 55	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
 56		INIT_LIST_HEAD(&b->bucket[i]);
 57}
 58
 59static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
 60				  struct list_head *item, unsigned priority)
 61{
 62	/* Since buffers which appear sooner in the relocation list are
 63	 * likely to be used more often than buffers which appear later
 64	 * in the list, the sort mustn't change the ordering of buffers
 65	 * with the same priority, i.e. it must be stable.
 66	 */
 67	list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
 68}
 69
 70static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
 71				       struct list_head *out_list)
 72{
 73	unsigned i;
 74
 75	/* Connect the sorted buckets in the output list. */
 76	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
 77		list_splice(&b->bucket[i], out_list);
 78	}
 79}
 80
 81static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 82{
 83	struct radeon_cs_chunk *chunk;
 84	struct radeon_cs_buckets buckets;
 85	unsigned i;
 86	bool need_mmap_lock = false;
 87	int r;
 88
 89	if (p->chunk_relocs == NULL) {
 90		return 0;
 91	}
 92	chunk = p->chunk_relocs;
 93	p->dma_reloc_idx = 0;
 94	/* FIXME: we assume that each relocs use 4 dwords */
 95	p->nrelocs = chunk->length_dw / 4;
 96	p->relocs = kvcalloc(p->nrelocs, sizeof(struct radeon_bo_list),
 97			GFP_KERNEL);
 98	if (p->relocs == NULL) {
 99		return -ENOMEM;
100	}
101
102	radeon_cs_buckets_init(&buckets);
103
104	for (i = 0; i < p->nrelocs; i++) {
105		struct drm_radeon_cs_reloc *r;
106		struct drm_gem_object *gobj;
107		unsigned priority;
108
109		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
110		gobj = drm_gem_object_lookup(p->filp, r->handle);
111		if (gobj == NULL) {
112			DRM_ERROR("gem object lookup failed 0x%x\n",
113				  r->handle);
114			return -ENOENT;
115		}
116		p->relocs[i].robj = gem_to_radeon_bo(gobj);
117
118		/* The userspace buffer priorities are from 0 to 15. A higher
119		 * number means the buffer is more important.
120		 * Also, the buffers used for write have a higher priority than
121		 * the buffers used for read only, which doubles the range
122		 * to 0 to 31. 32 is reserved for the kernel driver.
123		 */
124		priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
125			   + !!r->write_domain;
126
127		/* The first reloc of an UVD job is the msg and that must be in
128		 * VRAM, the second reloc is the DPB and for WMV that must be in
129		 * VRAM as well. Also put everything into VRAM on AGP cards and older
130		 * IGP chips to avoid image corruptions
131		 */
132		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
133		    (i <= 0 || pci_find_capability(p->rdev->pdev, PCI_CAP_ID_AGP) ||
134		     p->rdev->family == CHIP_RS780 ||
135		     p->rdev->family == CHIP_RS880)) {
136
137			/* TODO: is this still needed for NI+ ? */
138			p->relocs[i].preferred_domains =
139				RADEON_GEM_DOMAIN_VRAM;
140
141			p->relocs[i].allowed_domains =
142				RADEON_GEM_DOMAIN_VRAM;
143
144			/* prioritize this over any other relocation */
145			priority = RADEON_CS_MAX_PRIORITY;
146		} else {
147			uint32_t domain = r->write_domain ?
148				r->write_domain : r->read_domains;
149
150			if (domain & RADEON_GEM_DOMAIN_CPU) {
151				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
152					  "for command submission\n");
153				return -EINVAL;
154			}
155
156			p->relocs[i].preferred_domains = domain;
157			if (domain == RADEON_GEM_DOMAIN_VRAM)
158				domain |= RADEON_GEM_DOMAIN_GTT;
159			p->relocs[i].allowed_domains = domain;
160		}
161
162		if (radeon_ttm_tt_has_userptr(p->rdev, p->relocs[i].robj->tbo.ttm)) {
163			uint32_t domain = p->relocs[i].preferred_domains;
164			if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
165				DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
166					  "allowed for userptr BOs\n");
167				return -EINVAL;
168			}
169			need_mmap_lock = true;
170			domain = RADEON_GEM_DOMAIN_GTT;
171			p->relocs[i].preferred_domains = domain;
172			p->relocs[i].allowed_domains = domain;
173		}
174
175		/* Objects shared as dma-bufs cannot be moved to VRAM */
176		if (p->relocs[i].robj->prime_shared_count) {
177			p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
178			if (!p->relocs[i].allowed_domains) {
179				DRM_ERROR("BO associated with dma-buf cannot "
180					  "be moved to VRAM\n");
181				return -EINVAL;
182			}
183		}
184
185		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186		p->relocs[i].tv.num_shared = !r->write_domain;
187
188		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
189				      priority);
190	}
191
192	radeon_cs_buckets_get_list(&buckets, &p->validated);
193
194	if (p->cs_flags & RADEON_CS_USE_VM)
195		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
196					      &p->validated);
197	if (need_mmap_lock)
198		mmap_read_lock(current->mm);
199
200	r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
201
202	if (need_mmap_lock)
203		mmap_read_unlock(current->mm);
204
205	return r;
206}
207
208static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
209{
210	p->priority = priority;
211
212	switch (ring) {
213	default:
214		DRM_ERROR("unknown ring id: %d\n", ring);
215		return -EINVAL;
216	case RADEON_CS_RING_GFX:
217		p->ring = RADEON_RING_TYPE_GFX_INDEX;
218		break;
219	case RADEON_CS_RING_COMPUTE:
220		if (p->rdev->family >= CHIP_TAHITI) {
221			if (p->priority > 0)
222				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
223			else
224				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
225		} else
226			p->ring = RADEON_RING_TYPE_GFX_INDEX;
227		break;
228	case RADEON_CS_RING_DMA:
229		if (p->rdev->family >= CHIP_CAYMAN) {
230			if (p->priority > 0)
231				p->ring = R600_RING_TYPE_DMA_INDEX;
232			else
233				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
234		} else if (p->rdev->family >= CHIP_RV770) {
235			p->ring = R600_RING_TYPE_DMA_INDEX;
236		} else {
237			return -EINVAL;
238		}
239		break;
240	case RADEON_CS_RING_UVD:
241		p->ring = R600_RING_TYPE_UVD_INDEX;
242		break;
243	case RADEON_CS_RING_VCE:
244		/* TODO: only use the low priority ring for now */
245		p->ring = TN_RING_TYPE_VCE1_INDEX;
246		break;
247	}
248	return 0;
249}
250
251static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
252{
253	struct radeon_bo_list *reloc;
254	int r;
255
256	list_for_each_entry(reloc, &p->validated, tv.head) {
257		struct dma_resv *resv;
258
259		resv = reloc->robj->tbo.base.resv;
260		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
261				     reloc->tv.num_shared);
262		if (r)
263			return r;
264	}
265	return 0;
266}
267
268/* XXX: note that this is called from the legacy UMS CS ioctl as well */
269int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
270{
271	struct drm_radeon_cs *cs = data;
272	uint64_t *chunk_array_ptr;
273	unsigned size, i;
 
274	u32 ring = RADEON_CS_RING_GFX;
275	s32 priority = 0;
276
277	INIT_LIST_HEAD(&p->validated);
278
279	if (!cs->num_chunks) {
280		return 0;
281	}
282
283	/* get chunks */
284	p->idx = 0;
285	p->ib.sa_bo = NULL;
286	p->const_ib.sa_bo = NULL;
287	p->chunk_ib = NULL;
288	p->chunk_relocs = NULL;
289	p->chunk_flags = NULL;
290	p->chunk_const_ib = NULL;
291	p->chunks_array = kvmalloc_array(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
292	if (p->chunks_array == NULL) {
293		return -ENOMEM;
294	}
295	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
296	if (copy_from_user(p->chunks_array, chunk_array_ptr,
297			       sizeof(uint64_t)*cs->num_chunks)) {
298		return -EFAULT;
299	}
300	p->cs_flags = 0;
301	p->nchunks = cs->num_chunks;
302	p->chunks = kvcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
303	if (p->chunks == NULL) {
304		return -ENOMEM;
305	}
306	for (i = 0; i < p->nchunks; i++) {
307		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
308		struct drm_radeon_cs_chunk user_chunk;
309		uint32_t __user *cdata;
310
311		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
312		if (copy_from_user(&user_chunk, chunk_ptr,
313				       sizeof(struct drm_radeon_cs_chunk))) {
314			return -EFAULT;
315		}
316		p->chunks[i].length_dw = user_chunk.length_dw;
317		if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
318			p->chunk_relocs = &p->chunks[i];
319		}
320		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
321			p->chunk_ib = &p->chunks[i];
322			/* zero length IB isn't useful */
323			if (p->chunks[i].length_dw == 0)
324				return -EINVAL;
325		}
326		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
327			p->chunk_const_ib = &p->chunks[i];
328			/* zero length CONST IB isn't useful */
329			if (p->chunks[i].length_dw == 0)
330				return -EINVAL;
331		}
332		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
333			p->chunk_flags = &p->chunks[i];
334			/* zero length flags aren't useful */
335			if (p->chunks[i].length_dw == 0)
336				return -EINVAL;
337		}
338
339		size = p->chunks[i].length_dw;
340		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
341		p->chunks[i].user_ptr = cdata;
342		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
343			continue;
344
345		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
346			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
347				continue;
348		}
349
350		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
351		size *= sizeof(uint32_t);
352		if (p->chunks[i].kdata == NULL) {
353			return -ENOMEM;
354		}
355		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
356			return -EFAULT;
357		}
358		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
359			p->cs_flags = p->chunks[i].kdata[0];
360			if (p->chunks[i].length_dw > 1)
361				ring = p->chunks[i].kdata[1];
362			if (p->chunks[i].length_dw > 2)
363				priority = (s32)p->chunks[i].kdata[2];
364		}
365	}
366
367	/* these are KMS only */
368	if (p->rdev) {
369		if ((p->cs_flags & RADEON_CS_USE_VM) &&
370		    !p->rdev->vm_manager.enabled) {
371			DRM_ERROR("VM not active on asic!\n");
372			return -EINVAL;
373		}
374
375		if (radeon_cs_get_ring(p, ring, priority))
376			return -EINVAL;
377
378		/* we only support VM on some SI+ rings */
379		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
380			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
381				DRM_ERROR("Ring %d requires VM!\n", p->ring);
382				return -EINVAL;
383			}
384		} else {
385			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
386				DRM_ERROR("VM not supported on ring %d!\n",
387					  p->ring);
388				return -EINVAL;
389			}
390		}
391	}
392
393	return 0;
394}
395
396static int cmp_size_smaller_first(void *priv, const struct list_head *a,
397				  const struct list_head *b)
398{
399	struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
400	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
401
402	/* Sort A before B if A is smaller. */
403	if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
404		return 1;
405	if (la->robj->tbo.base.size < lb->robj->tbo.base.size)
406		return -1;
407	return 0;
408}
409
410/**
411 * radeon_cs_parser_fini() - clean parser states
412 * @parser:	parser structure holding parsing context.
413 * @error:	error number
414 * @backoff:	indicator to backoff the reservation
415 *
416 * If error is set than unvalidate buffer, otherwise just free memory
417 * used by parsing context.
418 **/
419static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
420{
421	unsigned i;
422
423	if (!error) {
424		/* Sort the buffer list from the smallest to largest buffer,
425		 * which affects the order of buffers in the LRU list.
426		 * This assures that the smallest buffers are added first
427		 * to the LRU list, so they are likely to be later evicted
428		 * first, instead of large buffers whose eviction is more
429		 * expensive.
430		 *
431		 * This slightly lowers the number of bytes moved by TTM
432		 * per frame under memory pressure.
433		 */
434		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
435
436		ttm_eu_fence_buffer_objects(&parser->ticket,
437					    &parser->validated,
438					    &parser->ib.fence->base);
439	} else if (backoff) {
440		ttm_eu_backoff_reservation(&parser->ticket,
441					   &parser->validated);
442	}
443
444	if (parser->relocs != NULL) {
445		for (i = 0; i < parser->nrelocs; i++) {
446			struct radeon_bo *bo = parser->relocs[i].robj;
447			if (bo == NULL)
448				continue;
449
450			drm_gem_object_put(&bo->tbo.base);
451		}
452	}
453	kfree(parser->track);
454	kvfree(parser->relocs);
455	kvfree(parser->vm_bos);
456	for (i = 0; i < parser->nchunks; i++)
457		kvfree(parser->chunks[i].kdata);
458	kvfree(parser->chunks);
459	kvfree(parser->chunks_array);
460	radeon_ib_free(parser->rdev, &parser->ib);
461	radeon_ib_free(parser->rdev, &parser->const_ib);
462}
463
464static int radeon_cs_ib_chunk(struct radeon_device *rdev,
465			      struct radeon_cs_parser *parser)
466{
467	int r;
468
469	if (parser->chunk_ib == NULL)
470		return 0;
471
472	if (parser->cs_flags & RADEON_CS_USE_VM)
473		return 0;
474
475	r = radeon_cs_parse(rdev, parser->ring, parser);
476	if (r || parser->parser_error) {
477		DRM_ERROR("Invalid command stream !\n");
478		return r;
479	}
480
481	r = radeon_cs_sync_rings(parser);
482	if (r) {
483		if (r != -ERESTARTSYS)
484			DRM_ERROR("Failed to sync rings: %i\n", r);
485		return r;
486	}
487
488	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
489		radeon_uvd_note_usage(rdev);
490	else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
491		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
492		radeon_vce_note_usage(rdev);
493
494	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
495	if (r) {
496		DRM_ERROR("Failed to schedule IB !\n");
497	}
498	return r;
499}
500
501static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
502				   struct radeon_vm *vm)
503{
504	struct radeon_device *rdev = p->rdev;
505	struct radeon_bo_va *bo_va;
506	int i, r;
507
508	r = radeon_vm_update_page_directory(rdev, vm);
509	if (r)
510		return r;
511
512	r = radeon_vm_clear_freed(rdev, vm);
513	if (r)
514		return r;
515
516	if (vm->ib_bo_va == NULL) {
517		DRM_ERROR("Tmp BO not in VM!\n");
518		return -EINVAL;
519	}
520
521	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
522				rdev->ring_tmp_bo.bo->tbo.resource);
523	if (r)
524		return r;
525
526	for (i = 0; i < p->nrelocs; i++) {
527		struct radeon_bo *bo;
528
529		bo = p->relocs[i].robj;
530		bo_va = radeon_vm_bo_find(vm, bo);
531		if (bo_va == NULL) {
532			dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
533			return -EINVAL;
534		}
535
536		r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource);
537		if (r)
538			return r;
539
540		radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
541
542		r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
543		if (r)
544			return r;
545	}
546
547	return radeon_vm_clear_invalids(rdev, vm);
548}
549
550static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
551				 struct radeon_cs_parser *parser)
552{
553	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
554	struct radeon_vm *vm = &fpriv->vm;
555	int r;
556
557	if (parser->chunk_ib == NULL)
558		return 0;
559	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
560		return 0;
561
562	if (parser->const_ib.length_dw) {
563		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
564		if (r) {
565			return r;
566		}
567	}
568
569	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
570	if (r) {
571		return r;
572	}
573
574	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
575		radeon_uvd_note_usage(rdev);
576
577	mutex_lock(&vm->mutex);
578	r = radeon_bo_vm_update_pte(parser, vm);
579	if (r) {
580		goto out;
581	}
582
583	r = radeon_cs_sync_rings(parser);
584	if (r) {
585		if (r != -ERESTARTSYS)
586			DRM_ERROR("Failed to sync rings: %i\n", r);
587		goto out;
588	}
589
590	if ((rdev->family >= CHIP_TAHITI) &&
591	    (parser->chunk_const_ib != NULL)) {
592		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
593	} else {
594		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
595	}
596
597out:
598	mutex_unlock(&vm->mutex);
599	return r;
600}
601
602static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
603{
604	if (r == -EDEADLK) {
605		r = radeon_gpu_reset(rdev);
606		if (!r)
607			r = -EAGAIN;
608	}
609	return r;
610}
611
612static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
613{
614	struct radeon_cs_chunk *ib_chunk;
615	struct radeon_vm *vm = NULL;
616	int r;
617
618	if (parser->chunk_ib == NULL)
619		return 0;
620
621	if (parser->cs_flags & RADEON_CS_USE_VM) {
622		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
623		vm = &fpriv->vm;
624
625		if ((rdev->family >= CHIP_TAHITI) &&
626		    (parser->chunk_const_ib != NULL)) {
627			ib_chunk = parser->chunk_const_ib;
628			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
629				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
630				return -EINVAL;
631			}
632			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
633					   vm, ib_chunk->length_dw * 4);
634			if (r) {
635				DRM_ERROR("Failed to get const ib !\n");
636				return r;
637			}
638			parser->const_ib.is_const_ib = true;
639			parser->const_ib.length_dw = ib_chunk->length_dw;
640			if (copy_from_user(parser->const_ib.ptr,
641					       ib_chunk->user_ptr,
642					       ib_chunk->length_dw * 4))
643				return -EFAULT;
644		}
645
646		ib_chunk = parser->chunk_ib;
647		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
648			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
649			return -EINVAL;
650		}
651	}
652	ib_chunk = parser->chunk_ib;
653
654	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
655			   vm, ib_chunk->length_dw * 4);
656	if (r) {
657		DRM_ERROR("Failed to get ib !\n");
658		return r;
659	}
660	parser->ib.length_dw = ib_chunk->length_dw;
661	if (ib_chunk->kdata)
662		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
663	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
664		return -EFAULT;
665	return 0;
666}
667
668int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
669{
670	struct radeon_device *rdev = dev->dev_private;
671	struct radeon_cs_parser parser;
672	int r;
673
674	down_read(&rdev->exclusive_lock);
675	if (!rdev->accel_working) {
676		up_read(&rdev->exclusive_lock);
677		return -EBUSY;
678	}
679	if (rdev->in_reset) {
680		up_read(&rdev->exclusive_lock);
681		r = radeon_gpu_reset(rdev);
682		if (!r)
683			r = -EAGAIN;
684		return r;
685	}
686	/* initialize parser */
687	memset(&parser, 0, sizeof(struct radeon_cs_parser));
688	parser.filp = filp;
689	parser.rdev = rdev;
690	parser.dev = rdev->dev;
691	parser.family = rdev->family;
692	r = radeon_cs_parser_init(&parser, data);
693	if (r) {
694		DRM_ERROR("Failed to initialize parser !\n");
695		radeon_cs_parser_fini(&parser, r, false);
696		up_read(&rdev->exclusive_lock);
697		r = radeon_cs_handle_lockup(rdev, r);
698		return r;
699	}
700
701	r = radeon_cs_ib_fill(rdev, &parser);
702	if (!r) {
703		r = radeon_cs_parser_relocs(&parser);
704		if (r && r != -ERESTARTSYS)
705			DRM_ERROR("Failed to parse relocation %d!\n", r);
706	}
707
708	if (r) {
709		radeon_cs_parser_fini(&parser, r, false);
710		up_read(&rdev->exclusive_lock);
711		r = radeon_cs_handle_lockup(rdev, r);
712		return r;
713	}
714
715	trace_radeon_cs(&parser);
716
717	r = radeon_cs_ib_chunk(rdev, &parser);
718	if (r) {
719		goto out;
720	}
721	r = radeon_cs_ib_vm_chunk(rdev, &parser);
722	if (r) {
723		goto out;
724	}
725out:
726	radeon_cs_parser_fini(&parser, r, true);
727	up_read(&rdev->exclusive_lock);
728	r = radeon_cs_handle_lockup(rdev, r);
729	return r;
730}
731
732/**
733 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
734 * @p:		parser structure holding parsing context.
735 * @pkt:	where to store packet information
736 * @idx:	packet index
737 *
738 * Assume that chunk_ib_index is properly set. Will return -EINVAL
739 * if packet is bigger than remaining ib size. or if packets is unknown.
740 **/
741int radeon_cs_packet_parse(struct radeon_cs_parser *p,
742			   struct radeon_cs_packet *pkt,
743			   unsigned idx)
744{
745	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
746	struct radeon_device *rdev = p->rdev;
747	uint32_t header;
748	int ret = 0, i;
749
750	if (idx >= ib_chunk->length_dw) {
751		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
752			  idx, ib_chunk->length_dw);
753		return -EINVAL;
754	}
755	header = radeon_get_ib_value(p, idx);
756	pkt->idx = idx;
757	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
758	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
759	pkt->one_reg_wr = 0;
760	switch (pkt->type) {
761	case RADEON_PACKET_TYPE0:
762		if (rdev->family < CHIP_R600) {
763			pkt->reg = R100_CP_PACKET0_GET_REG(header);
764			pkt->one_reg_wr =
765				RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
766		} else
767			pkt->reg = R600_CP_PACKET0_GET_REG(header);
768		break;
769	case RADEON_PACKET_TYPE3:
770		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
771		break;
772	case RADEON_PACKET_TYPE2:
773		pkt->count = -1;
774		break;
775	default:
776		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
777		ret = -EINVAL;
778		goto dump_ib;
779	}
780	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
781		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
782			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
783		ret = -EINVAL;
784		goto dump_ib;
785	}
786	return 0;
787
788dump_ib:
789	for (i = 0; i < ib_chunk->length_dw; i++) {
790		if (i == idx)
791			printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
792		else
793			printk("\t0x%08x\n", radeon_get_ib_value(p, i));
794	}
795	return ret;
796}
797
798/**
799 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
800 * @p:		structure holding the parser context.
801 *
802 * Check if the next packet is NOP relocation packet3.
803 **/
804bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
805{
806	struct radeon_cs_packet p3reloc;
807	int r;
808
809	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
810	if (r)
811		return false;
812	if (p3reloc.type != RADEON_PACKET_TYPE3)
813		return false;
814	if (p3reloc.opcode != RADEON_PACKET3_NOP)
815		return false;
816	return true;
817}
818
819/**
820 * radeon_cs_dump_packet() - dump raw packet context
821 * @p:		structure holding the parser context.
822 * @pkt:	structure holding the packet.
823 *
824 * Used mostly for debugging and error reporting.
825 **/
826void radeon_cs_dump_packet(struct radeon_cs_parser *p,
827			   struct radeon_cs_packet *pkt)
828{
829	volatile uint32_t *ib;
830	unsigned i;
831	unsigned idx;
832
833	ib = p->ib.ptr;
834	idx = pkt->idx;
835	for (i = 0; i <= (pkt->count + 1); i++, idx++)
836		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
837}
838
839/**
840 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
841 * @p:			parser structure holding parsing context.
842 * @cs_reloc:		reloc informations
843 * @nomm:		no memory management for debugging
844 *
845 * Check if next packet is relocation packet3, do bo validation and compute
846 * GPU offset using the provided start.
847 **/
848int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
849				struct radeon_bo_list **cs_reloc,
850				int nomm)
851{
852	struct radeon_cs_chunk *relocs_chunk;
853	struct radeon_cs_packet p3reloc;
854	unsigned idx;
855	int r;
856
857	if (p->chunk_relocs == NULL) {
858		DRM_ERROR("No relocation chunk !\n");
859		return -EINVAL;
860	}
861	*cs_reloc = NULL;
862	relocs_chunk = p->chunk_relocs;
863	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
864	if (r)
865		return r;
866	p->idx += p3reloc.count + 2;
867	if (p3reloc.type != RADEON_PACKET_TYPE3 ||
868	    p3reloc.opcode != RADEON_PACKET3_NOP) {
869		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
870			  p3reloc.idx);
871		radeon_cs_dump_packet(p, &p3reloc);
872		return -EINVAL;
873	}
874	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
875	if (idx >= relocs_chunk->length_dw) {
876		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
877			  idx, relocs_chunk->length_dw);
878		radeon_cs_dump_packet(p, &p3reloc);
879		return -EINVAL;
880	}
881	/* FIXME: we assume reloc size is 4 dwords */
882	if (nomm) {
883		*cs_reloc = p->relocs;
884		(*cs_reloc)->gpu_offset =
885			(u64)relocs_chunk->kdata[idx + 3] << 32;
886		(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
887	} else
888		*cs_reloc = &p->relocs[(idx / 4)];
889	return 0;
890}