Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright 2005-2006 Stephane Marchesin
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 22 * DEALINGS IN THE SOFTWARE.
 23 */
 24
 25#include "drmP.h"
 26#include "drm.h"
 27#include "nouveau_drv.h"
 28#include "nouveau_drm.h"
 29#include "nouveau_dma.h"
 30#include "nouveau_ramht.h"
 31
 32static int
 33nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
 34{
 35	u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
 36	struct drm_device *dev = chan->dev;
 37	struct drm_nouveau_private *dev_priv = dev->dev_private;
 38	int ret;
 39
 40	/* allocate buffer object */
 41	ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
 42	if (ret)
 43		goto out;
 44
 45	ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
 46	if (ret)
 47		goto out;
 48
 49	ret = nouveau_bo_map(chan->pushbuf_bo);
 50	if (ret)
 51		goto out;
 52
 53	/* create DMA object covering the entire memtype where the push
 54	 * buffer resides, userspace can submit its own push buffers from
 55	 * anywhere within the same memtype.
 56	 */
 57	chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
 58	if (dev_priv->card_type >= NV_50) {
 59		ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
 60					 &chan->pushbuf_vma);
 61		if (ret)
 62			goto out;
 63
 64		if (dev_priv->card_type < NV_C0) {
 65			ret = nouveau_gpuobj_dma_new(chan,
 66						     NV_CLASS_DMA_IN_MEMORY, 0,
 67						     (1ULL << 40),
 68						     NV_MEM_ACCESS_RO,
 69						     NV_MEM_TARGET_VM,
 70						     &chan->pushbuf);
 71		}
 72		chan->pushbuf_base = chan->pushbuf_vma.offset;
 73	} else
 74	if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
 75		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
 76					     dev_priv->gart_info.aper_size,
 77					     NV_MEM_ACCESS_RO,
 78					     NV_MEM_TARGET_GART,
 79					     &chan->pushbuf);
 80	} else
 81	if (dev_priv->card_type != NV_04) {
 82		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
 83					     dev_priv->fb_available_size,
 84					     NV_MEM_ACCESS_RO,
 85					     NV_MEM_TARGET_VRAM,
 86					     &chan->pushbuf);
 87	} else {
 88		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
 89		 * exact reason for existing :)  PCI access to cmdbuf in
 90		 * VRAM.
 91		 */
 92		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 93					     pci_resource_start(dev->pdev, 1),
 94					     dev_priv->fb_available_size,
 95					     NV_MEM_ACCESS_RO,
 96					     NV_MEM_TARGET_PCI,
 97					     &chan->pushbuf);
 98	}
 99
100out:
101	if (ret) {
102		NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
103		nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
104		nouveau_gpuobj_ref(NULL, &chan->pushbuf);
105		if (chan->pushbuf_bo) {
106			nouveau_bo_unmap(chan->pushbuf_bo);
107			nouveau_bo_ref(NULL, &chan->pushbuf_bo);
108		}
109	}
110
111	return 0;
112}
113
114/* allocates and initializes a fifo for user space consumption */
115int
116nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
117		      struct drm_file *file_priv,
118		      uint32_t vram_handle, uint32_t gart_handle)
119{
120	struct drm_nouveau_private *dev_priv = dev->dev_private;
121	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
122	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
123	struct nouveau_channel *chan;
124	unsigned long flags;
125	int ret;
126
127	/* allocate and lock channel structure */
128	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
129	if (!chan)
130		return -ENOMEM;
131	chan->dev = dev;
132	chan->file_priv = file_priv;
133	chan->vram_handle = vram_handle;
134	chan->gart_handle = gart_handle;
135
136	kref_init(&chan->ref);
137	atomic_set(&chan->users, 1);
138	mutex_init(&chan->mutex);
139	mutex_lock(&chan->mutex);
140
141	/* allocate hw channel id */
142	spin_lock_irqsave(&dev_priv->channels.lock, flags);
143	for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
144		if (!dev_priv->channels.ptr[chan->id]) {
145			nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
146			break;
147		}
148	}
149	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
150
151	if (chan->id == pfifo->channels) {
152		mutex_unlock(&chan->mutex);
153		kfree(chan);
154		return -ENODEV;
155	}
156
157	NV_DEBUG(dev, "initialising channel %d\n", chan->id);
158	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
159	INIT_LIST_HEAD(&chan->nvsw.flip);
160	INIT_LIST_HEAD(&chan->fence.pending);
161
162	/* setup channel's memory and vm */
163	ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
164	if (ret) {
165		NV_ERROR(dev, "gpuobj %d\n", ret);
166		nouveau_channel_put(&chan);
167		return ret;
168	}
169
170	/* Allocate space for per-channel fixed notifier memory */
171	ret = nouveau_notifier_init_channel(chan);
172	if (ret) {
173		NV_ERROR(dev, "ntfy %d\n", ret);
174		nouveau_channel_put(&chan);
175		return ret;
176	}
177
178	/* Allocate DMA push buffer */
179	ret = nouveau_channel_pushbuf_init(chan);
180	if (ret) {
181		NV_ERROR(dev, "pushbuf %d\n", ret);
182		nouveau_channel_put(&chan);
183		return ret;
184	}
185
186	nouveau_dma_pre_init(chan);
187	chan->user_put = 0x40;
188	chan->user_get = 0x44;
189
190	/* disable the fifo caches */
191	pfifo->reassign(dev, false);
192
193	/* Construct initial RAMFC for new channel */
194	ret = pfifo->create_context(chan);
195	if (ret) {
196		nouveau_channel_put(&chan);
197		return ret;
198	}
199
200	pfifo->reassign(dev, true);
201
202	ret = nouveau_dma_init(chan);
203	if (!ret)
204		ret = nouveau_fence_channel_init(chan);
205	if (ret) {
206		nouveau_channel_put(&chan);
207		return ret;
208	}
209
210	nouveau_debugfs_channel_init(chan);
211
212	NV_DEBUG(dev, "channel %d initialised\n", chan->id);
213	if (fpriv) {
214		spin_lock(&fpriv->lock);
215		list_add(&chan->list, &fpriv->channels);
216		spin_unlock(&fpriv->lock);
217	}
218	*chan_ret = chan;
219	return 0;
220}
221
222struct nouveau_channel *
223nouveau_channel_get_unlocked(struct nouveau_channel *ref)
224{
225	struct nouveau_channel *chan = NULL;
226
227	if (likely(ref && atomic_inc_not_zero(&ref->users)))
228		nouveau_channel_ref(ref, &chan);
229
230	return chan;
231}
232
233struct nouveau_channel *
234nouveau_channel_get(struct drm_file *file_priv, int id)
235{
236	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
237	struct nouveau_channel *chan;
238
239	spin_lock(&fpriv->lock);
240	list_for_each_entry(chan, &fpriv->channels, list) {
241		if (chan->id == id) {
242			chan = nouveau_channel_get_unlocked(chan);
243			spin_unlock(&fpriv->lock);
244			mutex_lock(&chan->mutex);
245			return chan;
246		}
247	}
248	spin_unlock(&fpriv->lock);
249
250	return ERR_PTR(-EINVAL);
251}
252
253void
254nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
255{
256	struct nouveau_channel *chan = *pchan;
257	struct drm_device *dev = chan->dev;
258	struct drm_nouveau_private *dev_priv = dev->dev_private;
259	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
260	unsigned long flags;
261	int i;
262
263	/* decrement the refcount, and we're done if there's still refs */
264	if (likely(!atomic_dec_and_test(&chan->users))) {
265		nouveau_channel_ref(NULL, pchan);
266		return;
267	}
268
269	/* no one wants the channel anymore */
270	NV_DEBUG(dev, "freeing channel %d\n", chan->id);
271	nouveau_debugfs_channel_fini(chan);
272
273	/* give it chance to idle */
274	nouveau_channel_idle(chan);
275
276	/* ensure all outstanding fences are signaled.  they should be if the
277	 * above attempts at idling were OK, but if we failed this'll tell TTM
278	 * we're done with the buffers.
279	 */
280	nouveau_fence_channel_fini(chan);
281
282	/* boot it off the hardware */
283	pfifo->reassign(dev, false);
284
285	/* destroy the engine specific contexts */
286	pfifo->destroy_context(chan);
287	for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
288		if (chan->engctx[i])
289			dev_priv->eng[i]->context_del(chan, i);
290	}
291
292	pfifo->reassign(dev, true);
293
294	/* aside from its resources, the channel should now be dead,
295	 * remove it from the channel list
296	 */
297	spin_lock_irqsave(&dev_priv->channels.lock, flags);
298	nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
299	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
300
301	/* destroy any resources the channel owned */
302	nouveau_gpuobj_ref(NULL, &chan->pushbuf);
303	if (chan->pushbuf_bo) {
304		nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
305		nouveau_bo_unmap(chan->pushbuf_bo);
306		nouveau_bo_unpin(chan->pushbuf_bo);
307		nouveau_bo_ref(NULL, &chan->pushbuf_bo);
308	}
309	nouveau_ramht_ref(NULL, &chan->ramht, chan);
310	nouveau_notifier_takedown_channel(chan);
311	nouveau_gpuobj_channel_takedown(chan);
312
313	nouveau_channel_ref(NULL, pchan);
314}
315
316void
317nouveau_channel_put(struct nouveau_channel **pchan)
318{
319	mutex_unlock(&(*pchan)->mutex);
320	nouveau_channel_put_unlocked(pchan);
321}
322
323static void
324nouveau_channel_del(struct kref *ref)
325{
326	struct nouveau_channel *chan =
327		container_of(ref, struct nouveau_channel, ref);
328
329	kfree(chan);
330}
331
332void
333nouveau_channel_ref(struct nouveau_channel *chan,
334		    struct nouveau_channel **pchan)
335{
336	if (chan)
337		kref_get(&chan->ref);
338
339	if (*pchan)
340		kref_put(&(*pchan)->ref, nouveau_channel_del);
341
342	*pchan = chan;
343}
344
345void
346nouveau_channel_idle(struct nouveau_channel *chan)
347{
348	struct drm_device *dev = chan->dev;
349	struct nouveau_fence *fence = NULL;
350	int ret;
351
352	nouveau_fence_update(chan);
353
354	if (chan->fence.sequence != chan->fence.sequence_ack) {
355		ret = nouveau_fence_new(chan, &fence, true);
356		if (!ret) {
357			ret = nouveau_fence_wait(fence, false, false);
358			nouveau_fence_unref(&fence);
359		}
360
361		if (ret)
362			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
363	}
364}
365
366/* cleans up all the fifos from file_priv */
367void
368nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
369{
370	struct drm_nouveau_private *dev_priv = dev->dev_private;
371	struct nouveau_engine *engine = &dev_priv->engine;
372	struct nouveau_channel *chan;
373	int i;
374
375	NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
376	for (i = 0; i < engine->fifo.channels; i++) {
377		chan = nouveau_channel_get(file_priv, i);
378		if (IS_ERR(chan))
379			continue;
380
381		list_del(&chan->list);
382		atomic_dec(&chan->users);
383		nouveau_channel_put(&chan);
384	}
385}
386
387
388/***********************************
389 * ioctls wrapping the functions
390 ***********************************/
391
392static int
393nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
394			 struct drm_file *file_priv)
395{
396	struct drm_nouveau_private *dev_priv = dev->dev_private;
397	struct drm_nouveau_channel_alloc *init = data;
398	struct nouveau_channel *chan;
399	int ret;
400
401	if (!dev_priv->eng[NVOBJ_ENGINE_GR])
402		return -ENODEV;
403
404	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
405		return -EINVAL;
406
407	ret = nouveau_channel_alloc(dev, &chan, file_priv,
408				    init->fb_ctxdma_handle,
409				    init->tt_ctxdma_handle);
410	if (ret)
411		return ret;
412	init->channel  = chan->id;
413
414	if (chan->dma.ib_max)
415		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
416					NOUVEAU_GEM_DOMAIN_GART;
417	else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
418		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
419	else
420		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
421
422	if (dev_priv->card_type < NV_C0) {
423		init->subchan[0].handle = NvM2MF;
424		if (dev_priv->card_type < NV_50)
425			init->subchan[0].grclass = 0x0039;
426		else
427			init->subchan[0].grclass = 0x5039;
428		init->subchan[1].handle = NvSw;
429		init->subchan[1].grclass = NV_SW;
430		init->nr_subchan = 2;
431	} else {
432		init->subchan[0].handle  = 0x9039;
433		init->subchan[0].grclass = 0x9039;
434		init->nr_subchan = 1;
435	}
436
437	/* Named memory object area */
438	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
439				    &init->notifier_handle);
440
441	if (ret == 0)
442		atomic_inc(&chan->users); /* userspace reference */
443	nouveau_channel_put(&chan);
444	return ret;
445}
446
447static int
448nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
449			struct drm_file *file_priv)
450{
451	struct drm_nouveau_channel_free *req = data;
452	struct nouveau_channel *chan;
453
454	chan = nouveau_channel_get(file_priv, req->channel);
455	if (IS_ERR(chan))
456		return PTR_ERR(chan);
457
458	list_del(&chan->list);
459	atomic_dec(&chan->users);
460	nouveau_channel_put(&chan);
461	return 0;
462}
463
464/***********************************
465 * finally, the ioctl table
466 ***********************************/
467
468struct drm_ioctl_desc nouveau_ioctls[] = {
469	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
470	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
471	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
472	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
473	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
474	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
475	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
476	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
477	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
478	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
479	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
480	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
481};
482
483int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);