Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2012 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Ben Skeggs
 23 */
 24#include <nvif/push006c.h>
 25
 26#include <nvif/class.h>
 27#include <nvif/cl0002.h>
 28#include <nvif/if0020.h>
 29
 30#include "nouveau_drv.h"
 31#include "nouveau_dma.h"
 32#include "nouveau_bo.h"
 33#include "nouveau_chan.h"
 34#include "nouveau_fence.h"
 35#include "nouveau_abi16.h"
 36#include "nouveau_vmm.h"
 37#include "nouveau_svm.h"
 38
 39MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
 40int nouveau_vram_pushbuf;
 41module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
 42
 43void
 44nouveau_channel_kill(struct nouveau_channel *chan)
 45{
 46	atomic_set(&chan->killed, 1);
 47	if (chan->fence)
 48		nouveau_fence_context_kill(chan->fence, -ENODEV);
 49}
 50
 51static int
 52nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
 53{
 54	struct nouveau_channel *chan = container_of(event, typeof(*chan), kill);
 55	struct nouveau_cli *cli = chan->cli;
 56
 57	NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
 58
 59	if (unlikely(!atomic_read(&chan->killed)))
 60		nouveau_channel_kill(chan);
 61
 62	return NVIF_EVENT_DROP;
 63}
 64
 65int
 66nouveau_channel_idle(struct nouveau_channel *chan)
 67{
 68	if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
 69		struct nouveau_cli *cli = chan->cli;
 70		struct nouveau_fence *fence = NULL;
 71		int ret;
 72
 73		ret = nouveau_fence_new(&fence, chan);
 74		if (!ret) {
 75			ret = nouveau_fence_wait(fence, false, false);
 76			nouveau_fence_unref(&fence);
 77		}
 78
 79		if (ret) {
 80			NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n",
 81				  chan->chid, cli->name);
 82			return ret;
 83		}
 84	}
 85	return 0;
 86}
 87
 88void
 89nouveau_channel_del(struct nouveau_channel **pchan)
 90{
 91	struct nouveau_channel *chan = *pchan;
 92	if (chan) {
 93		if (chan->fence)
 94			nouveau_fence(chan->cli->drm)->context_del(chan);
 95
 96		if (nvif_object_constructed(&chan->user))
 97			nouveau_svmm_part(chan->vmm->svmm, chan->inst);
 98
 99		nvif_object_dtor(&chan->blit);
100		nvif_object_dtor(&chan->nvsw);
101		nvif_object_dtor(&chan->gart);
102		nvif_object_dtor(&chan->vram);
103		nvif_event_dtor(&chan->kill);
104		nvif_object_dtor(&chan->user);
105		nvif_mem_dtor(&chan->mem_userd);
106		nvif_object_dtor(&chan->push.ctxdma);
107		nouveau_vma_del(&chan->push.vma);
108		nouveau_bo_unmap(chan->push.buffer);
109		if (chan->push.buffer && chan->push.buffer->bo.pin_count)
110			nouveau_bo_unpin(chan->push.buffer);
111		nouveau_bo_fini(chan->push.buffer);
112		kfree(chan);
113	}
114	*pchan = NULL;
115}
116
117static void
118nouveau_channel_kick(struct nvif_push *push)
119{
120	struct nouveau_channel *chan = container_of(push, typeof(*chan), chan.push);
121	chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
122	FIRE_RING(chan);
123	chan->chan.push.bgn = chan->chan.push.cur;
124}
125
126static int
127nouveau_channel_wait(struct nvif_push *push, u32 size)
128{
129	struct nouveau_channel *chan = container_of(push, typeof(*chan), chan.push);
130	int ret;
131	chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
132	ret = RING_SPACE(chan, size);
133	if (ret == 0) {
134		chan->chan.push.bgn = chan->chan.push.mem.object.map.ptr;
135		chan->chan.push.bgn = chan->chan.push.bgn + chan->dma.cur;
136		chan->chan.push.cur = chan->chan.push.bgn;
137		chan->chan.push.end = chan->chan.push.bgn + size;
138	}
139	return ret;
140}
141
142static int
143nouveau_channel_prep(struct nouveau_cli *cli,
144		     u32 size, struct nouveau_channel **pchan)
145{
146	struct nouveau_drm *drm = cli->drm;
147	struct nvif_device *device = &cli->device;
148	struct nv_dma_v0 args = {};
149	struct nouveau_channel *chan;
150	u32 target;
151	int ret;
152
153	chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
154	if (!chan)
155		return -ENOMEM;
156
157	chan->cli = cli;
158	chan->vmm = nouveau_cli_vmm(cli);
159	atomic_set(&chan->killed, 0);
160
161	/* allocate memory for dma push buffer */
162	target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
163	if (nouveau_vram_pushbuf)
164		target = NOUVEAU_GEM_DOMAIN_VRAM;
165
166	ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
167			    &chan->push.buffer);
168	if (ret == 0) {
169		ret = nouveau_bo_pin(chan->push.buffer, target, false);
170		if (ret == 0)
171			ret = nouveau_bo_map(chan->push.buffer);
172	}
173
174	if (ret) {
175		nouveau_channel_del(pchan);
176		return ret;
177	}
178
179	chan->chan.push.mem.object.parent = cli->base.object.parent;
180	chan->chan.push.mem.object.client = &cli->base;
181	chan->chan.push.mem.object.name = "chanPush";
182	chan->chan.push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
183	chan->chan.push.wait = nouveau_channel_wait;
184	chan->chan.push.kick = nouveau_channel_kick;
185
186	/* create dma object covering the *entire* memory space that the
187	 * pushbuf lives in, this is because the GEM code requires that
188	 * we be able to call out to other (indirect) push buffers
189	 */
190	chan->push.addr = chan->push.buffer->offset;
191
192	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
193		ret = nouveau_vma_new(chan->push.buffer, chan->vmm,
194				      &chan->push.vma);
195		if (ret) {
196			nouveau_channel_del(pchan);
197			return ret;
198		}
199
200		chan->push.addr = chan->push.vma->addr;
201
202		if (device->info.family >= NV_DEVICE_INFO_V0_FERMI)
203			return 0;
204
205		args.target = NV_DMA_V0_TARGET_VM;
206		args.access = NV_DMA_V0_ACCESS_VM;
207		args.start = 0;
208		args.limit = chan->vmm->vmm.limit - 1;
209	} else
210	if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
211		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
212			/* nv04 vram pushbuf hack, retarget to its location in
213			 * the framebuffer bar rather than direct vram access..
214			 * nfi why this exists, it came from the -nv ddx.
215			 */
216			args.target = NV_DMA_V0_TARGET_PCI;
217			args.access = NV_DMA_V0_ACCESS_RDWR;
218			args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1);
219			args.limit = args.start + device->info.ram_user - 1;
220		} else {
221			args.target = NV_DMA_V0_TARGET_VRAM;
222			args.access = NV_DMA_V0_ACCESS_RDWR;
223			args.start = 0;
224			args.limit = device->info.ram_user - 1;
225		}
226	} else {
227		if (drm->agp.bridge) {
228			args.target = NV_DMA_V0_TARGET_AGP;
229			args.access = NV_DMA_V0_ACCESS_RDWR;
230			args.start = drm->agp.base;
231			args.limit = drm->agp.base + drm->agp.size - 1;
232		} else {
233			args.target = NV_DMA_V0_TARGET_VM;
234			args.access = NV_DMA_V0_ACCESS_RDWR;
235			args.start = 0;
236			args.limit = chan->vmm->vmm.limit - 1;
237		}
238	}
239
240	ret = nvif_object_ctor(&device->object, "abi16PushCtxDma", 0,
241			       NV_DMA_FROM_MEMORY, &args, sizeof(args),
242			       &chan->push.ctxdma);
243	if (ret) {
244		nouveau_channel_del(pchan);
245		return ret;
246	}
247
248	return 0;
249}
250
251static int
252nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
253		     struct nouveau_channel **pchan)
254{
255	const struct nvif_mclass hosts[] = {
256		{  AMPERE_CHANNEL_GPFIFO_B, 0 },
257		{  AMPERE_CHANNEL_GPFIFO_A, 0 },
258		{  TURING_CHANNEL_GPFIFO_A, 0 },
259		{   VOLTA_CHANNEL_GPFIFO_A, 0 },
260		{  PASCAL_CHANNEL_GPFIFO_A, 0 },
261		{ MAXWELL_CHANNEL_GPFIFO_A, 0 },
262		{  KEPLER_CHANNEL_GPFIFO_B, 0 },
263		{  KEPLER_CHANNEL_GPFIFO_A, 0 },
264		{   FERMI_CHANNEL_GPFIFO  , 0 },
265		{     G82_CHANNEL_GPFIFO  , 0 },
266		{    NV50_CHANNEL_GPFIFO  , 0 },
267		{    NV40_CHANNEL_DMA     , 0 },
268		{    NV17_CHANNEL_DMA     , 0 },
269		{    NV10_CHANNEL_DMA     , 0 },
270		{    NV03_CHANNEL_DMA     , 0 },
271		{}
272	};
273	struct {
274		struct nvif_chan_v0 chan;
275		char name[TASK_COMM_LEN+16];
276	} args;
277	struct nvif_device *device = &cli->device;
278	struct nouveau_channel *chan;
279	const u64 plength = 0x10000;
280	const u64 ioffset = plength;
281	const u64 ilength = 0x02000;
282	char name[TASK_COMM_LEN];
283	int cid, ret;
284	u64 size;
285
286	cid = nvif_mclass(&device->object, hosts);
287	if (cid < 0)
288		return cid;
289
290	if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO)
291		size = plength;
292	else
293		size = ioffset + ilength;
294
295	/* allocate dma push buffer */
296	ret = nouveau_channel_prep(cli, size, &chan);
297	*pchan = chan;
298	if (ret)
299		return ret;
300
301	/* create channel object */
302	args.chan.version = 0;
303	args.chan.namelen = sizeof(args.name);
304	args.chan.runlist = __ffs64(runm);
305	args.chan.runq = 0;
306	args.chan.priv = priv;
307	args.chan.devm = BIT(0);
308	if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO) {
309		args.chan.vmm = 0;
310		args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
311		args.chan.offset = chan->push.addr;
312		args.chan.length = 0;
313	} else {
314		args.chan.vmm = nvif_handle(&chan->vmm->vmm.object);
315		if (hosts[cid].oclass < FERMI_CHANNEL_GPFIFO)
316			args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
317		else
318			args.chan.ctxdma = 0;
319		args.chan.offset = ioffset + chan->push.addr;
320		args.chan.length = ilength;
321	}
322	args.chan.huserd = 0;
323	args.chan.ouserd = 0;
324
325	/* allocate userd */
326	if (hosts[cid].oclass >= VOLTA_CHANNEL_GPFIFO_A) {
327		ret = nvif_mem_ctor(&cli->mmu, "abi16ChanUSERD", NVIF_CLASS_MEM_GF100,
328				    NVIF_MEM_VRAM | NVIF_MEM_COHERENT | NVIF_MEM_MAPPABLE,
329				    0, PAGE_SIZE, NULL, 0, &chan->mem_userd);
330		if (ret)
331			return ret;
332
333		args.chan.huserd = nvif_handle(&chan->mem_userd.object);
334		args.chan.ouserd = 0;
335
336		chan->userd = &chan->mem_userd.object;
337	} else {
338		chan->userd = &chan->user;
339	}
340
341	get_task_comm(name, current);
342	snprintf(args.name, sizeof(args.name), "%s[%d]", name, task_pid_nr(current));
343
344	ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
345			       &args, sizeof(args), &chan->user);
346	if (ret) {
347		nouveau_channel_del(pchan);
348		return ret;
349	}
350
351	chan->runlist = args.chan.runlist;
352	chan->chid = args.chan.chid;
353	chan->inst = args.chan.inst;
354	chan->token = args.chan.token;
355	return 0;
356}
357
358static int
359nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
360{
361	struct nouveau_cli *cli = chan->cli;
362	struct nouveau_drm *drm = cli->drm;
363	struct nvif_device *device = &cli->device;
364	struct nv_dma_v0 args = {};
365	int ret, i;
366
367	ret = nvif_object_map(chan->userd, NULL, 0);
368	if (ret)
369		return ret;
370
371	if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
372		struct {
373			struct nvif_event_v0 base;
374			struct nvif_chan_event_v0 host;
375		} args;
376
377		args.host.version = 0;
378		args.host.type = NVIF_CHAN_EVENT_V0_KILLED;
379
380		ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid,
381				      nouveau_channel_killed, false,
382				      &args.base, sizeof(args), &chan->kill);
383		if (ret == 0)
384			ret = nvif_event_allow(&chan->kill);
385		if (ret) {
386			NV_ERROR(drm, "Failed to request channel kill "
387				      "notification: %d\n", ret);
388			return ret;
389		}
390	}
391
392	/* allocate dma objects to cover all allowed vram, and gart */
393	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
394		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
395			args.target = NV_DMA_V0_TARGET_VM;
396			args.access = NV_DMA_V0_ACCESS_VM;
397			args.start = 0;
398			args.limit = chan->vmm->vmm.limit - 1;
399		} else {
400			args.target = NV_DMA_V0_TARGET_VRAM;
401			args.access = NV_DMA_V0_ACCESS_RDWR;
402			args.start = 0;
403			args.limit = device->info.ram_user - 1;
404		}
405
406		ret = nvif_object_ctor(&chan->user, "abi16ChanVramCtxDma", vram,
407				       NV_DMA_IN_MEMORY, &args, sizeof(args),
408				       &chan->vram);
409		if (ret)
410			return ret;
411
412		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
413			args.target = NV_DMA_V0_TARGET_VM;
414			args.access = NV_DMA_V0_ACCESS_VM;
415			args.start = 0;
416			args.limit = chan->vmm->vmm.limit - 1;
417		} else
418		if (drm->agp.bridge) {
419			args.target = NV_DMA_V0_TARGET_AGP;
420			args.access = NV_DMA_V0_ACCESS_RDWR;
421			args.start = drm->agp.base;
422			args.limit = drm->agp.base + drm->agp.size - 1;
423		} else {
424			args.target = NV_DMA_V0_TARGET_VM;
425			args.access = NV_DMA_V0_ACCESS_RDWR;
426			args.start = 0;
427			args.limit = chan->vmm->vmm.limit - 1;
428		}
429
430		ret = nvif_object_ctor(&chan->user, "abi16ChanGartCtxDma", gart,
431				       NV_DMA_IN_MEMORY, &args, sizeof(args),
432				       &chan->gart);
433		if (ret)
434			return ret;
435	}
436
437	/* initialise dma tracking parameters */
438	switch (chan->user.oclass) {
439	case NV03_CHANNEL_DMA:
440	case NV10_CHANNEL_DMA:
441	case NV17_CHANNEL_DMA:
442	case NV40_CHANNEL_DMA:
443		chan->user_put = 0x40;
444		chan->user_get = 0x44;
445		chan->dma.max = (0x10000 / 4) - 2;
446		break;
447	default:
448		chan->user_put = 0x40;
449		chan->user_get = 0x44;
450		chan->user_get_hi = 0x60;
451		chan->dma.ib_base =  0x10000 / 4;
452		chan->dma.ib_max  = NV50_DMA_IB_MAX;
453		chan->dma.ib_put  = 0;
454		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
455		chan->dma.max = chan->dma.ib_base;
456		break;
457	}
458
459	chan->dma.put = 0;
460	chan->dma.cur = chan->dma.put;
461	chan->dma.free = chan->dma.max - chan->dma.cur;
462
463	ret = PUSH_WAIT(&chan->chan.push, NOUVEAU_DMA_SKIPS);
464	if (ret)
465		return ret;
466
467	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
468		PUSH_DATA(&chan->chan.push, 0x00000000);
469
470	/* allocate software object class (used for fences on <= nv05) */
471	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
472		ret = nvif_object_ctor(&chan->user, "abi16NvswFence", 0x006e,
473				       NVIF_CLASS_SW_NV04,
474				       NULL, 0, &chan->nvsw);
475		if (ret)
476			return ret;
477
478		ret = PUSH_WAIT(&chan->chan.push, 2);
479		if (ret)
480			return ret;
481
482		PUSH_NVSQ(&chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
483		PUSH_KICK(&chan->chan.push);
484	}
485
486	/* initialise synchronisation */
487	return nouveau_fence(drm)->context_new(chan);
488}
489
490int
491nouveau_channel_new(struct nouveau_cli *cli,
492		    bool priv, u64 runm, u32 vram, u32 gart, struct nouveau_channel **pchan)
493{
494	int ret;
495
496	ret = nouveau_channel_ctor(cli, priv, runm, pchan);
497	if (ret) {
498		NV_PRINTK(dbg, cli, "channel create, %d\n", ret);
499		return ret;
500	}
501
502	ret = nouveau_channel_init(*pchan, vram, gart);
503	if (ret) {
504		NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
505		nouveau_channel_del(pchan);
506		return ret;
507	}
508
509	ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
510	if (ret)
511		nouveau_channel_del(pchan);
512
513	return ret;
514}
515
516void
517nouveau_channels_fini(struct nouveau_drm *drm)
518{
519	kfree(drm->runl);
520}
521
522int
523nouveau_channels_init(struct nouveau_drm *drm)
524{
525	struct {
526		struct nv_device_info_v1 m;
527		struct {
528			struct nv_device_info_v1_data channels;
529			struct nv_device_info_v1_data runlists;
530		} v;
531	} args = {
532		.m.version = 1,
533		.m.count = sizeof(args.v) / sizeof(args.v.channels),
534		.v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
535		.v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS,
536	};
537	struct nvif_object *device = &drm->client.device.object;
538	int ret, i;
539
540	ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
541	if (ret ||
542	    args.v.runlists.mthd == NV_DEVICE_INFO_INVALID || !args.v.runlists.data ||
543	    args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
544		return -ENODEV;
545
546	drm->chan_nr = drm->chan_total = args.v.channels.data;
547	drm->runl_nr = fls64(args.v.runlists.data);
548	drm->runl = kcalloc(drm->runl_nr, sizeof(*drm->runl), GFP_KERNEL);
549	if (!drm->runl)
550		return -ENOMEM;
551
552	if (drm->chan_nr == 0) {
553		for (i = 0; i < drm->runl_nr; i++) {
554			if (!(args.v.runlists.data & BIT(i)))
555				continue;
556
557			args.v.channels.mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
558			args.v.channels.data = i;
559
560			ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
561			if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
562				return -ENODEV;
563
564			drm->runl[i].chan_nr = args.v.channels.data;
565			drm->runl[i].chan_id_base = drm->chan_total;
566			drm->runl[i].context_base = dma_fence_context_alloc(drm->runl[i].chan_nr);
567
568			drm->chan_total += drm->runl[i].chan_nr;
569		}
570	} else {
571		drm->runl[0].context_base = dma_fence_context_alloc(drm->chan_nr);
572		for (i = 1; i < drm->runl_nr; i++)
573			drm->runl[i].context_base = drm->runl[0].context_base;
574
575	}
576
577	return 0;
578}