Loading...
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <nvif/client.h>
25#include <nvif/driver.h>
26#include <nvif/fifo.h>
27#include <nvif/ioctl.h>
28#include <nvif/class.h>
29#include <nvif/cl0002.h>
30#include <nvif/unpack.h>
31
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_exec.h"
35#include "nouveau_gem.h"
36#include "nouveau_chan.h"
37#include "nouveau_abi16.h"
38#include "nouveau_vmm.h"
39#include "nouveau_sched.h"
40
41static struct nouveau_abi16 *
42nouveau_abi16(struct drm_file *file_priv)
43{
44 struct nouveau_cli *cli = nouveau_cli(file_priv);
45 if (!cli->abi16) {
46 struct nouveau_abi16 *abi16;
47 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
48 if (cli->abi16) {
49 struct nv_device_v0 args = {
50 .device = ~0ULL,
51 };
52
53 INIT_LIST_HEAD(&abi16->channels);
54
55 /* allocate device object targeting client's default
56 * device (ie. the one that belongs to the fd it
57 * opened)
58 */
59 if (nvif_device_ctor(&cli->base.object, "abi16Device",
60 0, NV_DEVICE, &args, sizeof(args),
61 &abi16->device) == 0)
62 return cli->abi16;
63
64 kfree(cli->abi16);
65 cli->abi16 = NULL;
66 }
67 }
68 return cli->abi16;
69}
70
71struct nouveau_abi16 *
72nouveau_abi16_get(struct drm_file *file_priv)
73{
74 struct nouveau_cli *cli = nouveau_cli(file_priv);
75 mutex_lock(&cli->mutex);
76 if (nouveau_abi16(file_priv))
77 return cli->abi16;
78 mutex_unlock(&cli->mutex);
79 return NULL;
80}
81
82int
83nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
84{
85 struct nouveau_cli *cli = (void *)abi16->device.object.client;
86 mutex_unlock(&cli->mutex);
87 return ret;
88}
89
90s32
91nouveau_abi16_swclass(struct nouveau_drm *drm)
92{
93 switch (drm->client.device.info.family) {
94 case NV_DEVICE_INFO_V0_TNT:
95 return NVIF_CLASS_SW_NV04;
96 case NV_DEVICE_INFO_V0_CELSIUS:
97 case NV_DEVICE_INFO_V0_KELVIN:
98 case NV_DEVICE_INFO_V0_RANKINE:
99 case NV_DEVICE_INFO_V0_CURIE:
100 return NVIF_CLASS_SW_NV10;
101 case NV_DEVICE_INFO_V0_TESLA:
102 return NVIF_CLASS_SW_NV50;
103 case NV_DEVICE_INFO_V0_FERMI:
104 case NV_DEVICE_INFO_V0_KEPLER:
105 case NV_DEVICE_INFO_V0_MAXWELL:
106 case NV_DEVICE_INFO_V0_PASCAL:
107 case NV_DEVICE_INFO_V0_VOLTA:
108 return NVIF_CLASS_SW_GF100;
109 }
110
111 return 0x0000;
112}
113
114static void
115nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
116 struct nouveau_abi16_ntfy *ntfy)
117{
118 nvif_object_dtor(&ntfy->object);
119 nvkm_mm_free(&chan->heap, &ntfy->node);
120 list_del(&ntfy->head);
121 kfree(ntfy);
122}
123
124static void
125nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
126 struct nouveau_abi16_chan *chan)
127{
128 struct nouveau_abi16_ntfy *ntfy, *temp;
129
130 /* Cancel all jobs from the entity's queue. */
131 if (chan->sched)
132 drm_sched_entity_fini(&chan->sched->entity);
133
134 if (chan->chan)
135 nouveau_channel_idle(chan->chan);
136
137 if (chan->sched)
138 nouveau_sched_destroy(&chan->sched);
139
140 /* cleanup notifier state */
141 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
142 nouveau_abi16_ntfy_fini(chan, ntfy);
143 }
144
145 if (chan->ntfy) {
146 nouveau_vma_del(&chan->ntfy_vma);
147 nouveau_bo_unpin(chan->ntfy);
148 drm_gem_object_put(&chan->ntfy->bo.base);
149 }
150
151 if (chan->heap.block_size)
152 nvkm_mm_fini(&chan->heap);
153
154 /* destroy channel object, all children will be killed too */
155 if (chan->chan) {
156 nvif_object_dtor(&chan->ce);
157 nouveau_channel_del(&chan->chan);
158 }
159
160 list_del(&chan->head);
161 kfree(chan);
162}
163
164void
165nouveau_abi16_fini(struct nouveau_abi16 *abi16)
166{
167 struct nouveau_cli *cli = (void *)abi16->device.object.client;
168 struct nouveau_abi16_chan *chan, *temp;
169
170 /* cleanup channels */
171 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
172 nouveau_abi16_chan_fini(abi16, chan);
173 }
174
175 /* destroy the device object */
176 nvif_device_dtor(&abi16->device);
177
178 kfree(cli->abi16);
179 cli->abi16 = NULL;
180}
181
182static inline int
183getparam_dma_ib_max(struct nvif_device *device)
184{
185 const struct nvif_mclass dmas[] = {
186 { NV03_CHANNEL_DMA, 0 },
187 { NV10_CHANNEL_DMA, 0 },
188 { NV17_CHANNEL_DMA, 0 },
189 { NV40_CHANNEL_DMA, 0 },
190 {}
191 };
192
193 return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0;
194}
195
196int
197nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
198{
199 struct nouveau_cli *cli = nouveau_cli(file_priv);
200 struct nouveau_drm *drm = nouveau_drm(dev);
201 struct nvif_device *device = &drm->client.device;
202 struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
203 struct nvkm_gr *gr = nvxx_gr(device);
204 struct drm_nouveau_getparam *getparam = data;
205 struct pci_dev *pdev = to_pci_dev(dev->dev);
206
207 switch (getparam->param) {
208 case NOUVEAU_GETPARAM_CHIPSET_ID:
209 getparam->value = device->info.chipset;
210 break;
211 case NOUVEAU_GETPARAM_PCI_VENDOR:
212 if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
213 getparam->value = pdev->vendor;
214 else
215 getparam->value = 0;
216 break;
217 case NOUVEAU_GETPARAM_PCI_DEVICE:
218 if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
219 getparam->value = pdev->device;
220 else
221 getparam->value = 0;
222 break;
223 case NOUVEAU_GETPARAM_BUS_TYPE:
224 switch (device->info.platform) {
225 case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
226 case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
227 case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
228 case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
229 case NV_DEVICE_INFO_V0_IGP :
230 if (!pci_is_pcie(pdev))
231 getparam->value = 1;
232 else
233 getparam->value = 2;
234 break;
235 default:
236 WARN_ON(1);
237 break;
238 }
239 break;
240 case NOUVEAU_GETPARAM_FB_SIZE:
241 getparam->value = drm->gem.vram_available;
242 break;
243 case NOUVEAU_GETPARAM_AGP_SIZE:
244 getparam->value = drm->gem.gart_available;
245 break;
246 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
247 getparam->value = 0; /* deprecated */
248 break;
249 case NOUVEAU_GETPARAM_PTIMER_TIME:
250 getparam->value = nvif_device_time(device);
251 break;
252 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
253 getparam->value = 1;
254 break;
255 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
256 getparam->value = 1;
257 break;
258 case NOUVEAU_GETPARAM_GRAPH_UNITS:
259 getparam->value = nvkm_gr_units(gr);
260 break;
261 case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: {
262 int ib_max = getparam_dma_ib_max(device);
263
264 getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
265 break;
266 }
267 case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
268 getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
269 break;
270 case NOUVEAU_GETPARAM_VRAM_USED: {
271 struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
272 getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
273 break;
274 }
275 default:
276 NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
277 return -EINVAL;
278 }
279
280 return 0;
281}
282
283int
284nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
285{
286 struct drm_nouveau_channel_alloc *init = data;
287 struct nouveau_cli *cli = nouveau_cli(file_priv);
288 struct nouveau_drm *drm = nouveau_drm(dev);
289 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
290 struct nouveau_abi16_chan *chan;
291 struct nvif_device *device;
292 u64 engine, runm;
293 int ret;
294
295 if (unlikely(!abi16))
296 return -ENOMEM;
297
298 if (!drm->channel)
299 return nouveau_abi16_put(abi16, -ENODEV);
300
301 /* If uvmm wasn't initialized until now disable it completely to prevent
302 * userspace from mixing up UAPIs.
303 *
304 * The client lock is already acquired by nouveau_abi16_get().
305 */
306 __nouveau_cli_disable_uvmm_noinit(cli);
307
308 device = &abi16->device;
309 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
310
311 /* hack to allow channel engine type specification on kepler */
312 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
313 if (init->fb_ctxdma_handle == ~0) {
314 switch (init->tt_ctxdma_handle) {
315 case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break;
316 case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
317 case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
318 case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
319 case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break;
320 default:
321 return nouveau_abi16_put(abi16, -ENOSYS);
322 }
323
324 init->fb_ctxdma_handle = 0;
325 init->tt_ctxdma_handle = 0;
326 }
327 }
328
329 if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
330 runm = nvif_fifo_runlist(device, engine);
331 else
332 runm = nvif_fifo_runlist_ce(device);
333
334 if (!runm || init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
335 return nouveau_abi16_put(abi16, -EINVAL);
336
337 /* allocate "abi16 channel" data and make up a handle for it */
338 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
339 if (!chan)
340 return nouveau_abi16_put(abi16, -ENOMEM);
341
342 INIT_LIST_HEAD(&chan->notifiers);
343 list_add(&chan->head, &abi16->channels);
344
345 /* create channel object and initialise dma and fence management */
346 ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
347 init->tt_ctxdma_handle, &chan->chan);
348 if (ret)
349 goto done;
350
351 /* If we're not using the VM_BIND uAPI, we don't need a scheduler.
352 *
353 * The client lock is already acquired by nouveau_abi16_get().
354 */
355 if (nouveau_cli_uvmm(cli)) {
356 ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
357 chan->chan->dma.ib_max);
358 if (ret)
359 goto done;
360 }
361
362 init->channel = chan->chan->chid;
363
364 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
365 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
366 NOUVEAU_GEM_DOMAIN_GART;
367 else
368 if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
369 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
370 else
371 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
372
373 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
374 init->subchan[0].handle = 0x00000000;
375 init->subchan[0].grclass = 0x0000;
376 init->subchan[1].handle = chan->chan->nvsw.handle;
377 init->subchan[1].grclass = 0x506e;
378 init->nr_subchan = 2;
379 }
380
381 /* Workaround "nvc0" gallium driver using classes it doesn't allocate on
382 * Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of
383 * channel init, now we know what that stuff actually is.
384 *
385 * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
386 *
387 * Userspace was fixed prior to adding Ampere support.
388 */
389 switch (device->info.family) {
390 case NV_DEVICE_INFO_V0_VOLTA:
391 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
392 NULL, 0, &chan->ce);
393 if (ret)
394 goto done;
395 break;
396 case NV_DEVICE_INFO_V0_TURING:
397 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
398 NULL, 0, &chan->ce);
399 if (ret)
400 goto done;
401 break;
402 default:
403 break;
404 }
405
406 /* Named memory object area */
407 ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
408 0, 0, &chan->ntfy);
409 if (ret == 0)
410 ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
411 false);
412 if (ret)
413 goto done;
414
415 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
416 ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
417 &chan->ntfy_vma);
418 if (ret)
419 goto done;
420 }
421
422 ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
423 &init->notifier_handle);
424 if (ret)
425 goto done;
426
427 ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
428done:
429 if (ret)
430 nouveau_abi16_chan_fini(abi16, chan);
431 return nouveau_abi16_put(abi16, ret);
432}
433
434static struct nouveau_abi16_chan *
435nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
436{
437 struct nouveau_abi16_chan *chan;
438
439 list_for_each_entry(chan, &abi16->channels, head) {
440 if (chan->chan->chid == channel)
441 return chan;
442 }
443
444 return NULL;
445}
446
447int
448nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
449{
450 union {
451 struct nvif_ioctl_v0 v0;
452 } *args = data;
453 struct nouveau_abi16_chan *chan;
454 struct nouveau_abi16 *abi16;
455 int ret = -ENOSYS;
456
457 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
458 switch (args->v0.type) {
459 case NVIF_IOCTL_V0_NEW:
460 case NVIF_IOCTL_V0_MTHD:
461 case NVIF_IOCTL_V0_SCLASS:
462 break;
463 default:
464 return -EACCES;
465 }
466 } else
467 return ret;
468
469 if (!(abi16 = nouveau_abi16(file_priv)))
470 return -ENOMEM;
471
472 if (args->v0.token != ~0ULL) {
473 if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
474 return -EINVAL;
475 args->v0.object = nvif_handle(&chan->chan->user);
476 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
477 return 0;
478 }
479
480 args->v0.object = nvif_handle(&abi16->device.object);
481 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
482 return 0;
483}
484
485int
486nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
487{
488 struct drm_nouveau_channel_free *req = data;
489 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
490 struct nouveau_abi16_chan *chan;
491
492 if (unlikely(!abi16))
493 return -ENOMEM;
494
495 chan = nouveau_abi16_chan(abi16, req->channel);
496 if (!chan)
497 return nouveau_abi16_put(abi16, -ENOENT);
498 nouveau_abi16_chan_fini(abi16, chan);
499 return nouveau_abi16_put(abi16, 0);
500}
501
502int
503nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
504{
505 struct drm_nouveau_grobj_alloc *init = data;
506 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
507 struct nouveau_abi16_chan *chan;
508 struct nouveau_abi16_ntfy *ntfy;
509 struct nvif_client *client;
510 struct nvif_sclass *sclass;
511 s32 oclass = 0;
512 int ret, i;
513
514 if (unlikely(!abi16))
515 return -ENOMEM;
516
517 if (init->handle == ~0)
518 return nouveau_abi16_put(abi16, -EINVAL);
519 client = abi16->device.object.client;
520
521 chan = nouveau_abi16_chan(abi16, init->channel);
522 if (!chan)
523 return nouveau_abi16_put(abi16, -ENOENT);
524
525 ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
526 if (ret < 0)
527 return nouveau_abi16_put(abi16, ret);
528
529 if ((init->class & 0x00ff) == 0x006e) {
530 /* nvsw: compatibility with older 0x*6e class identifier */
531 for (i = 0; !oclass && i < ret; i++) {
532 switch (sclass[i].oclass) {
533 case NVIF_CLASS_SW_NV04:
534 case NVIF_CLASS_SW_NV10:
535 case NVIF_CLASS_SW_NV50:
536 case NVIF_CLASS_SW_GF100:
537 oclass = sclass[i].oclass;
538 break;
539 default:
540 break;
541 }
542 }
543 } else
544 if ((init->class & 0x00ff) == 0x00b1) {
545 /* msvld: compatibility with incorrect version exposure */
546 for (i = 0; i < ret; i++) {
547 if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
548 oclass = sclass[i].oclass;
549 break;
550 }
551 }
552 } else
553 if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
554 /* mspdec: compatibility with incorrect version exposure */
555 for (i = 0; i < ret; i++) {
556 if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
557 oclass = sclass[i].oclass;
558 break;
559 }
560 }
561 } else
562 if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
563 /* msppp: compatibility with incorrect version exposure */
564 for (i = 0; i < ret; i++) {
565 if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
566 oclass = sclass[i].oclass;
567 break;
568 }
569 }
570 } else {
571 oclass = init->class;
572 }
573
574 nvif_object_sclass_put(&sclass);
575 if (!oclass)
576 return nouveau_abi16_put(abi16, -EINVAL);
577
578 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
579 if (!ntfy)
580 return nouveau_abi16_put(abi16, -ENOMEM);
581
582 list_add(&ntfy->head, &chan->notifiers);
583
584 client->route = NVDRM_OBJECT_ABI16;
585 ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
586 oclass, NULL, 0, &ntfy->object);
587 client->route = NVDRM_OBJECT_NVIF;
588
589 if (ret)
590 nouveau_abi16_ntfy_fini(chan, ntfy);
591 return nouveau_abi16_put(abi16, ret);
592}
593
594int
595nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
596{
597 struct drm_nouveau_notifierobj_alloc *info = data;
598 struct nouveau_drm *drm = nouveau_drm(dev);
599 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
600 struct nouveau_abi16_chan *chan;
601 struct nouveau_abi16_ntfy *ntfy;
602 struct nvif_device *device = &abi16->device;
603 struct nvif_client *client;
604 struct nv_dma_v0 args = {};
605 int ret;
606
607 if (unlikely(!abi16))
608 return -ENOMEM;
609
610 /* completely unnecessary for these chipsets... */
611 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
612 return nouveau_abi16_put(abi16, -EINVAL);
613 client = abi16->device.object.client;
614
615 chan = nouveau_abi16_chan(abi16, info->channel);
616 if (!chan)
617 return nouveau_abi16_put(abi16, -ENOENT);
618
619 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
620 if (!ntfy)
621 return nouveau_abi16_put(abi16, -ENOMEM);
622
623 list_add(&ntfy->head, &chan->notifiers);
624
625 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
626 &ntfy->node);
627 if (ret)
628 goto done;
629
630 args.start = ntfy->node->offset;
631 args.limit = ntfy->node->offset + ntfy->node->length - 1;
632 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
633 args.target = NV_DMA_V0_TARGET_VM;
634 args.access = NV_DMA_V0_ACCESS_VM;
635 args.start += chan->ntfy_vma->addr;
636 args.limit += chan->ntfy_vma->addr;
637 } else
638 if (drm->agp.bridge) {
639 args.target = NV_DMA_V0_TARGET_AGP;
640 args.access = NV_DMA_V0_ACCESS_RDWR;
641 args.start += drm->agp.base + chan->ntfy->offset;
642 args.limit += drm->agp.base + chan->ntfy->offset;
643 } else {
644 args.target = NV_DMA_V0_TARGET_VM;
645 args.access = NV_DMA_V0_ACCESS_RDWR;
646 args.start += chan->ntfy->offset;
647 args.limit += chan->ntfy->offset;
648 }
649
650 client->route = NVDRM_OBJECT_ABI16;
651 ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
652 NV_DMA_IN_MEMORY, &args, sizeof(args),
653 &ntfy->object);
654 client->route = NVDRM_OBJECT_NVIF;
655 if (ret)
656 goto done;
657
658 info->offset = ntfy->node->offset;
659done:
660 if (ret)
661 nouveau_abi16_ntfy_fini(chan, ntfy);
662 return nouveau_abi16_put(abi16, ret);
663}
664
665int
666nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
667{
668 struct drm_nouveau_gpuobj_free *fini = data;
669 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
670 struct nouveau_abi16_chan *chan;
671 struct nouveau_abi16_ntfy *ntfy;
672 int ret = -ENOENT;
673
674 if (unlikely(!abi16))
675 return -ENOMEM;
676
677 chan = nouveau_abi16_chan(abi16, fini->channel);
678 if (!chan)
679 return nouveau_abi16_put(abi16, -EINVAL);
680
681 /* synchronize with the user channel and destroy the gpu object */
682 nouveau_channel_idle(chan->chan);
683
684 list_for_each_entry(ntfy, &chan->notifiers, head) {
685 if (ntfy->object.handle == fini->handle) {
686 nouveau_abi16_ntfy_fini(chan, ntfy);
687 ret = 0;
688 break;
689 }
690 }
691
692 return nouveau_abi16_put(abi16, ret);
693}
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <nvif/client.h>
25#include <nvif/driver.h>
26#include <nvif/ioctl.h>
27#include <nvif/class.h>
28#include <nvif/cl0002.h>
29#include <nvif/cla06f.h>
30#include <nvif/unpack.h>
31
32#include "nouveau_drm.h"
33#include "nouveau_dma.h"
34#include "nouveau_gem.h"
35#include "nouveau_chan.h"
36#include "nouveau_abi16.h"
37
38static struct nouveau_abi16 *
39nouveau_abi16(struct drm_file *file_priv)
40{
41 struct nouveau_cli *cli = nouveau_cli(file_priv);
42 if (!cli->abi16) {
43 struct nouveau_abi16 *abi16;
44 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
45 if (cli->abi16) {
46 struct nv_device_v0 args = {
47 .device = ~0ULL,
48 };
49
50 INIT_LIST_HEAD(&abi16->channels);
51
52 /* allocate device object targeting client's default
53 * device (ie. the one that belongs to the fd it
54 * opened)
55 */
56 if (nvif_device_init(&cli->base.object, 0, NV_DEVICE,
57 &args, sizeof(args),
58 &abi16->device) == 0)
59 return cli->abi16;
60
61 kfree(cli->abi16);
62 cli->abi16 = NULL;
63 }
64 }
65 return cli->abi16;
66}
67
68struct nouveau_abi16 *
69nouveau_abi16_get(struct drm_file *file_priv)
70{
71 struct nouveau_cli *cli = nouveau_cli(file_priv);
72 mutex_lock(&cli->mutex);
73 if (nouveau_abi16(file_priv))
74 return cli->abi16;
75 mutex_unlock(&cli->mutex);
76 return NULL;
77}
78
79int
80nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
81{
82 struct nouveau_cli *cli = (void *)abi16->device.object.client;
83 mutex_unlock(&cli->mutex);
84 return ret;
85}
86
87s32
88nouveau_abi16_swclass(struct nouveau_drm *drm)
89{
90 switch (drm->device.info.family) {
91 case NV_DEVICE_INFO_V0_TNT:
92 return NVIF_CLASS_SW_NV04;
93 case NV_DEVICE_INFO_V0_CELSIUS:
94 case NV_DEVICE_INFO_V0_KELVIN:
95 case NV_DEVICE_INFO_V0_RANKINE:
96 case NV_DEVICE_INFO_V0_CURIE:
97 return NVIF_CLASS_SW_NV10;
98 case NV_DEVICE_INFO_V0_TESLA:
99 return NVIF_CLASS_SW_NV50;
100 case NV_DEVICE_INFO_V0_FERMI:
101 case NV_DEVICE_INFO_V0_KEPLER:
102 case NV_DEVICE_INFO_V0_MAXWELL:
103 return NVIF_CLASS_SW_GF100;
104 }
105
106 return 0x0000;
107}
108
109static void
110nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
111 struct nouveau_abi16_ntfy *ntfy)
112{
113 nvif_object_fini(&ntfy->object);
114 nvkm_mm_free(&chan->heap, &ntfy->node);
115 list_del(&ntfy->head);
116 kfree(ntfy);
117}
118
119static void
120nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
121 struct nouveau_abi16_chan *chan)
122{
123 struct nouveau_abi16_ntfy *ntfy, *temp;
124
125 /* wait for all activity to stop before releasing notify object, which
126 * may be still in use */
127 if (chan->chan && chan->ntfy)
128 nouveau_channel_idle(chan->chan);
129
130 /* cleanup notifier state */
131 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
132 nouveau_abi16_ntfy_fini(chan, ntfy);
133 }
134
135 if (chan->ntfy) {
136 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
137 nouveau_bo_unpin(chan->ntfy);
138 drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
139 }
140
141 if (chan->heap.block_size)
142 nvkm_mm_fini(&chan->heap);
143
144 /* destroy channel object, all children will be killed too */
145 if (chan->chan) {
146 nouveau_channel_idle(chan->chan);
147 nouveau_channel_del(&chan->chan);
148 }
149
150 list_del(&chan->head);
151 kfree(chan);
152}
153
154void
155nouveau_abi16_fini(struct nouveau_abi16 *abi16)
156{
157 struct nouveau_cli *cli = (void *)abi16->device.object.client;
158 struct nouveau_abi16_chan *chan, *temp;
159
160 /* cleanup channels */
161 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
162 nouveau_abi16_chan_fini(abi16, chan);
163 }
164
165 /* destroy the device object */
166 nvif_device_fini(&abi16->device);
167
168 kfree(cli->abi16);
169 cli->abi16 = NULL;
170}
171
172int
173nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
174{
175 struct nouveau_cli *cli = nouveau_cli(file_priv);
176 struct nouveau_drm *drm = nouveau_drm(dev);
177 struct nvif_device *device = &drm->device;
178 struct nvkm_gr *gr = nvxx_gr(device);
179 struct drm_nouveau_getparam *getparam = data;
180
181 switch (getparam->param) {
182 case NOUVEAU_GETPARAM_CHIPSET_ID:
183 getparam->value = device->info.chipset;
184 break;
185 case NOUVEAU_GETPARAM_PCI_VENDOR:
186 if (nvxx_device(device)->func->pci)
187 getparam->value = dev->pdev->vendor;
188 else
189 getparam->value = 0;
190 break;
191 case NOUVEAU_GETPARAM_PCI_DEVICE:
192 if (nvxx_device(device)->func->pci)
193 getparam->value = dev->pdev->device;
194 else
195 getparam->value = 0;
196 break;
197 case NOUVEAU_GETPARAM_BUS_TYPE:
198 if (!nvxx_device(device)->func->pci)
199 getparam->value = 3;
200 else
201 if (drm_pci_device_is_agp(dev))
202 getparam->value = 0;
203 else
204 if (!pci_is_pcie(dev->pdev))
205 getparam->value = 1;
206 else
207 getparam->value = 2;
208 break;
209 case NOUVEAU_GETPARAM_FB_SIZE:
210 getparam->value = drm->gem.vram_available;
211 break;
212 case NOUVEAU_GETPARAM_AGP_SIZE:
213 getparam->value = drm->gem.gart_available;
214 break;
215 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
216 getparam->value = 0; /* deprecated */
217 break;
218 case NOUVEAU_GETPARAM_PTIMER_TIME:
219 getparam->value = nvif_device_time(device);
220 break;
221 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
222 getparam->value = 1;
223 break;
224 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
225 getparam->value = 1;
226 break;
227 case NOUVEAU_GETPARAM_GRAPH_UNITS:
228 getparam->value = nvkm_gr_units(gr);
229 break;
230 default:
231 NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
232 return -EINVAL;
233 }
234
235 return 0;
236}
237
238int
239nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
240{
241 return -EINVAL;
242}
243
244int
245nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
246{
247 struct drm_nouveau_channel_alloc *init = data;
248 struct nouveau_cli *cli = nouveau_cli(file_priv);
249 struct nouveau_drm *drm = nouveau_drm(dev);
250 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
251 struct nouveau_abi16_chan *chan;
252 struct nvif_device *device;
253 int ret;
254
255 if (unlikely(!abi16))
256 return -ENOMEM;
257
258 if (!drm->channel)
259 return nouveau_abi16_put(abi16, -ENODEV);
260
261 device = &abi16->device;
262
263 /* hack to allow channel engine type specification on kepler */
264 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
265 if (init->fb_ctxdma_handle != ~0)
266 init->fb_ctxdma_handle = NVA06F_V0_ENGINE_GR;
267 else {
268 init->fb_ctxdma_handle = 0;
269#define _(A,B) if (init->tt_ctxdma_handle & (A)) init->fb_ctxdma_handle |= (B)
270 _(0x01, NVA06F_V0_ENGINE_GR);
271 _(0x02, NVA06F_V0_ENGINE_MSPDEC);
272 _(0x04, NVA06F_V0_ENGINE_MSPPP);
273 _(0x08, NVA06F_V0_ENGINE_MSVLD);
274 _(0x10, NVA06F_V0_ENGINE_CE0);
275 _(0x20, NVA06F_V0_ENGINE_CE1);
276 _(0x40, NVA06F_V0_ENGINE_MSENC);
277#undef _
278 }
279
280 /* allow flips to be executed if this is a graphics channel */
281 init->tt_ctxdma_handle = 0;
282 if (init->fb_ctxdma_handle == NVA06F_V0_ENGINE_GR)
283 init->tt_ctxdma_handle = 1;
284 }
285
286 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
287 return nouveau_abi16_put(abi16, -EINVAL);
288
289 /* allocate "abi16 channel" data and make up a handle for it */
290 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
291 if (!chan)
292 return nouveau_abi16_put(abi16, -ENOMEM);
293
294 INIT_LIST_HEAD(&chan->notifiers);
295 list_add(&chan->head, &abi16->channels);
296
297 /* create channel object and initialise dma and fence management */
298 ret = nouveau_channel_new(drm, device, init->fb_ctxdma_handle,
299 init->tt_ctxdma_handle, &chan->chan);
300 if (ret)
301 goto done;
302
303 init->channel = chan->chan->chid;
304
305 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
306 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
307 NOUVEAU_GEM_DOMAIN_GART;
308 else
309 if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
310 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
311 else
312 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
313
314 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
315 init->subchan[0].handle = 0x00000000;
316 init->subchan[0].grclass = 0x0000;
317 init->subchan[1].handle = chan->chan->nvsw.handle;
318 init->subchan[1].grclass = 0x506e;
319 init->nr_subchan = 2;
320 }
321
322 /* Named memory object area */
323 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
324 0, 0, &chan->ntfy);
325 if (ret == 0)
326 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
327 if (ret)
328 goto done;
329
330 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
331 ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
332 &chan->ntfy_vma);
333 if (ret)
334 goto done;
335 }
336
337 ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
338 &init->notifier_handle);
339 if (ret)
340 goto done;
341
342 ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
343done:
344 if (ret)
345 nouveau_abi16_chan_fini(abi16, chan);
346 return nouveau_abi16_put(abi16, ret);
347}
348
349static struct nouveau_abi16_chan *
350nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
351{
352 struct nouveau_abi16_chan *chan;
353
354 list_for_each_entry(chan, &abi16->channels, head) {
355 if (chan->chan->chid == channel)
356 return chan;
357 }
358
359 return NULL;
360}
361
362int
363nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
364{
365 union {
366 struct nvif_ioctl_v0 v0;
367 } *args = data;
368 struct nouveau_abi16_chan *chan;
369 struct nouveau_abi16 *abi16;
370 int ret = -ENOSYS;
371
372 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
373 switch (args->v0.type) {
374 case NVIF_IOCTL_V0_NEW:
375 case NVIF_IOCTL_V0_MTHD:
376 case NVIF_IOCTL_V0_SCLASS:
377 break;
378 default:
379 return -EACCES;
380 }
381 } else
382 return ret;
383
384 if (!(abi16 = nouveau_abi16(file_priv)))
385 return -ENOMEM;
386
387 if (args->v0.token != ~0ULL) {
388 if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
389 return -EINVAL;
390 args->v0.object = nvif_handle(&chan->chan->user);
391 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
392 return 0;
393 }
394
395 args->v0.object = nvif_handle(&abi16->device.object);
396 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
397 return 0;
398}
399
400int
401nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
402{
403 struct drm_nouveau_channel_free *req = data;
404 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
405 struct nouveau_abi16_chan *chan;
406
407 if (unlikely(!abi16))
408 return -ENOMEM;
409
410 chan = nouveau_abi16_chan(abi16, req->channel);
411 if (!chan)
412 return nouveau_abi16_put(abi16, -ENOENT);
413 nouveau_abi16_chan_fini(abi16, chan);
414 return nouveau_abi16_put(abi16, 0);
415}
416
417int
418nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
419{
420 struct drm_nouveau_grobj_alloc *init = data;
421 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
422 struct nouveau_abi16_chan *chan;
423 struct nouveau_abi16_ntfy *ntfy;
424 struct nvif_client *client;
425 struct nvif_sclass *sclass;
426 s32 oclass = 0;
427 int ret, i;
428
429 if (unlikely(!abi16))
430 return -ENOMEM;
431
432 if (init->handle == ~0)
433 return nouveau_abi16_put(abi16, -EINVAL);
434 client = abi16->device.object.client;
435
436 chan = nouveau_abi16_chan(abi16, init->channel);
437 if (!chan)
438 return nouveau_abi16_put(abi16, -ENOENT);
439
440 ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
441 if (ret < 0)
442 return nouveau_abi16_put(abi16, ret);
443
444 if ((init->class & 0x00ff) == 0x006e) {
445 /* nvsw: compatibility with older 0x*6e class identifier */
446 for (i = 0; !oclass && i < ret; i++) {
447 switch (sclass[i].oclass) {
448 case NVIF_CLASS_SW_NV04:
449 case NVIF_CLASS_SW_NV10:
450 case NVIF_CLASS_SW_NV50:
451 case NVIF_CLASS_SW_GF100:
452 oclass = sclass[i].oclass;
453 break;
454 default:
455 break;
456 }
457 }
458 } else
459 if ((init->class & 0x00ff) == 0x00b1) {
460 /* msvld: compatibility with incorrect version exposure */
461 for (i = 0; i < ret; i++) {
462 if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
463 oclass = sclass[i].oclass;
464 break;
465 }
466 }
467 } else
468 if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
469 /* mspdec: compatibility with incorrect version exposure */
470 for (i = 0; i < ret; i++) {
471 if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
472 oclass = sclass[i].oclass;
473 break;
474 }
475 }
476 } else
477 if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
478 /* msppp: compatibility with incorrect version exposure */
479 for (i = 0; i < ret; i++) {
480 if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
481 oclass = sclass[i].oclass;
482 break;
483 }
484 }
485 } else {
486 oclass = init->class;
487 }
488
489 nvif_object_sclass_put(&sclass);
490 if (!oclass)
491 return nouveau_abi16_put(abi16, -EINVAL);
492
493 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
494 if (!ntfy)
495 return nouveau_abi16_put(abi16, -ENOMEM);
496
497 list_add(&ntfy->head, &chan->notifiers);
498
499 client->route = NVDRM_OBJECT_ABI16;
500 ret = nvif_object_init(&chan->chan->user, init->handle, oclass,
501 NULL, 0, &ntfy->object);
502 client->route = NVDRM_OBJECT_NVIF;
503
504 if (ret)
505 nouveau_abi16_ntfy_fini(chan, ntfy);
506 return nouveau_abi16_put(abi16, ret);
507}
508
509int
510nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
511{
512 struct drm_nouveau_notifierobj_alloc *info = data;
513 struct nouveau_drm *drm = nouveau_drm(dev);
514 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
515 struct nouveau_abi16_chan *chan;
516 struct nouveau_abi16_ntfy *ntfy;
517 struct nvif_device *device = &abi16->device;
518 struct nvif_client *client;
519 struct nv_dma_v0 args = {};
520 int ret;
521
522 if (unlikely(!abi16))
523 return -ENOMEM;
524
525 /* completely unnecessary for these chipsets... */
526 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
527 return nouveau_abi16_put(abi16, -EINVAL);
528 client = abi16->device.object.client;
529
530 chan = nouveau_abi16_chan(abi16, info->channel);
531 if (!chan)
532 return nouveau_abi16_put(abi16, -ENOENT);
533
534 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
535 if (!ntfy)
536 return nouveau_abi16_put(abi16, -ENOMEM);
537
538 list_add(&ntfy->head, &chan->notifiers);
539
540 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
541 &ntfy->node);
542 if (ret)
543 goto done;
544
545 args.start = ntfy->node->offset;
546 args.limit = ntfy->node->offset + ntfy->node->length - 1;
547 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
548 args.target = NV_DMA_V0_TARGET_VM;
549 args.access = NV_DMA_V0_ACCESS_VM;
550 args.start += chan->ntfy_vma.offset;
551 args.limit += chan->ntfy_vma.offset;
552 } else
553 if (drm->agp.bridge) {
554 args.target = NV_DMA_V0_TARGET_AGP;
555 args.access = NV_DMA_V0_ACCESS_RDWR;
556 args.start += drm->agp.base + chan->ntfy->bo.offset;
557 args.limit += drm->agp.base + chan->ntfy->bo.offset;
558 } else {
559 args.target = NV_DMA_V0_TARGET_VM;
560 args.access = NV_DMA_V0_ACCESS_RDWR;
561 args.start += chan->ntfy->bo.offset;
562 args.limit += chan->ntfy->bo.offset;
563 }
564
565 client->route = NVDRM_OBJECT_ABI16;
566 client->super = true;
567 ret = nvif_object_init(&chan->chan->user, info->handle,
568 NV_DMA_IN_MEMORY, &args, sizeof(args),
569 &ntfy->object);
570 client->super = false;
571 client->route = NVDRM_OBJECT_NVIF;
572 if (ret)
573 goto done;
574
575 info->offset = ntfy->node->offset;
576done:
577 if (ret)
578 nouveau_abi16_ntfy_fini(chan, ntfy);
579 return nouveau_abi16_put(abi16, ret);
580}
581
582int
583nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
584{
585 struct drm_nouveau_gpuobj_free *fini = data;
586 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
587 struct nouveau_abi16_chan *chan;
588 struct nouveau_abi16_ntfy *ntfy;
589 int ret = -ENOENT;
590
591 if (unlikely(!abi16))
592 return -ENOMEM;
593
594 chan = nouveau_abi16_chan(abi16, fini->channel);
595 if (!chan)
596 return nouveau_abi16_put(abi16, -EINVAL);
597
598 /* synchronize with the user channel and destroy the gpu object */
599 nouveau_channel_idle(chan->chan);
600
601 list_for_each_entry(ntfy, &chan->notifiers, head) {
602 if (ntfy->object.handle == fini->handle) {
603 nouveau_abi16_ntfy_fini(chan, ntfy);
604 ret = 0;
605 break;
606 }
607 }
608
609 return nouveau_abi16_put(abi16, ret);
610}