Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24
  25#include <linux/apple-gmux.h>
  26#include <linux/console.h>
  27#include <linux/delay.h>
  28#include <linux/module.h>
  29#include <linux/pci.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/vgaarb.h>
  32#include <linux/vga_switcheroo.h>
 
  33
  34#include "drmP.h"
  35#include "drm_crtc_helper.h"
 
  36
  37#include <core/gpuobj.h>
  38#include <core/option.h>
  39#include <core/pci.h>
  40#include <core/tegra.h>
  41
 
 
 
 
  42#include <nvif/class.h>
  43#include <nvif/cl0002.h>
  44#include <nvif/cla06f.h>
  45#include <nvif/if0004.h>
  46
  47#include "nouveau_drm.h"
  48#include "nouveau_dma.h"
  49#include "nouveau_ttm.h"
  50#include "nouveau_gem.h"
  51#include "nouveau_vga.h"
 
  52#include "nouveau_hwmon.h"
  53#include "nouveau_acpi.h"
  54#include "nouveau_bios.h"
  55#include "nouveau_ioctl.h"
  56#include "nouveau_abi16.h"
  57#include "nouveau_fbcon.h"
  58#include "nouveau_fence.h"
  59#include "nouveau_debugfs.h"
  60#include "nouveau_usif.h"
  61#include "nouveau_connector.h"
  62#include "nouveau_platform.h"
 
 
  63
  64MODULE_PARM_DESC(config, "option string to pass to driver core");
  65static char *nouveau_config;
  66module_param_named(config, nouveau_config, charp, 0400);
  67
  68MODULE_PARM_DESC(debug, "debug string to pass to driver core");
  69static char *nouveau_debug;
  70module_param_named(debug, nouveau_debug, charp, 0400);
  71
  72MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
  73static int nouveau_noaccel = 0;
  74module_param_named(noaccel, nouveau_noaccel, int, 0400);
  75
  76MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
  77		          "0 = disabled, 1 = enabled, 2 = headless)");
  78int nouveau_modeset = -1;
  79module_param_named(modeset, nouveau_modeset, int, 0400);
  80
 
 
 
 
  81MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
  82int nouveau_runtime_pm = -1;
  83module_param_named(runpm, nouveau_runtime_pm, int, 0400);
  84
  85static struct drm_driver driver_stub;
  86static struct drm_driver driver_pci;
  87static struct drm_driver driver_platform;
  88
  89static u64
  90nouveau_pci_name(struct pci_dev *pdev)
  91{
  92	u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
  93	name |= pdev->bus->number << 16;
  94	name |= PCI_SLOT(pdev->devfn) << 8;
  95	return name | PCI_FUNC(pdev->devfn);
  96}
  97
  98static u64
  99nouveau_platform_name(struct platform_device *platformdev)
 100{
 101	return platformdev->id;
 102}
 103
 104static u64
 105nouveau_name(struct drm_device *dev)
 106{
 107	if (dev->pdev)
 108		return nouveau_pci_name(dev->pdev);
 109	else
 110		return nouveau_platform_name(dev->platformdev);
 111}
 112
 113static int
 114nouveau_cli_create(struct drm_device *dev, const char *sname,
 115		   int size, void **pcli)
 116{
 117	struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
 118	int ret;
 119	if (cli) {
 120		snprintf(cli->name, sizeof(cli->name), "%s", sname);
 121		cli->dev = dev;
 122
 123		ret = nvif_client_init(NULL, cli->name, nouveau_name(dev),
 124				       nouveau_config, nouveau_debug,
 125				       &cli->base);
 126		if (ret == 0) {
 127			mutex_init(&cli->mutex);
 128			usif_client_init(cli);
 
 
 
 
 129		}
 130		return ret;
 131	}
 132	return -ENOMEM;
 133}
 134
 135static void
 136nouveau_cli_destroy(struct nouveau_cli *cli)
 137{
 138	nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
 139	nvif_client_fini(&cli->base);
 140	usif_client_fini(cli);
 141	kfree(cli);
 142}
 143
 144static void
 145nouveau_accel_fini(struct nouveau_drm *drm)
 
 146{
 147	nouveau_channel_idle(drm->channel);
 148	nvif_object_fini(&drm->ntfy);
 149	nvkm_gpuobj_del(&drm->notify);
 150	nvif_notify_fini(&drm->flip);
 151	nvif_object_fini(&drm->nvsw);
 152	nouveau_channel_del(&drm->channel);
 
 
 153
 154	nouveau_channel_idle(drm->cechan);
 155	nvif_object_fini(&drm->ttm.copy);
 156	nouveau_channel_del(&drm->cechan);
 
 
 
 
 
 
 
 157
 158	if (drm->fence)
 159		nouveau_fence(drm)->dtor(drm);
 
 
 
 
 
 
 160}
 161
 162static void
 163nouveau_accel_init(struct nouveau_drm *drm)
 
 164{
 165	struct nvif_device *device = &drm->device;
 166	struct nvif_sclass *sclass;
 167	u32 arg0, arg1;
 168	int ret, i, n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169
 170	if (nouveau_noaccel)
 171		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172
 173	/* initialise synchronisation routines */
 174	/*XXX: this is crap, but the fence/channel stuff is a little
 175	 *     backwards in some places.  this will be fixed.
 176	 */
 177	ret = n = nvif_object_sclass_get(&device->object, &sclass);
 178	if (ret < 0)
 179		return;
 
 
 180
 181	for (ret = -ENOSYS, i = 0; i < n; i++) {
 182		switch (sclass[i].oclass) {
 183		case NV03_CHANNEL_DMA:
 184			ret = nv04_fence_create(drm);
 185			break;
 186		case NV10_CHANNEL_DMA:
 187			ret = nv10_fence_create(drm);
 188			break;
 189		case NV17_CHANNEL_DMA:
 190		case NV40_CHANNEL_DMA:
 191			ret = nv17_fence_create(drm);
 192			break;
 193		case NV50_CHANNEL_GPFIFO:
 194			ret = nv50_fence_create(drm);
 195			break;
 196		case G82_CHANNEL_GPFIFO:
 197			ret = nv84_fence_create(drm);
 198			break;
 199		case FERMI_CHANNEL_GPFIFO:
 200		case KEPLER_CHANNEL_GPFIFO_A:
 201		case KEPLER_CHANNEL_GPFIFO_B:
 202		case MAXWELL_CHANNEL_GPFIFO_A:
 203			ret = nvc0_fence_create(drm);
 204			break;
 205		default:
 206			break;
 207		}
 208	}
 209
 210	nvif_object_sclass_put(&sclass);
 211	if (ret) {
 212		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
 213		nouveau_accel_fini(drm);
 214		return;
 215	}
 216
 217	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 218		ret = nouveau_channel_new(drm, &drm->device,
 219					  NVA06F_V0_ENGINE_CE0 |
 220					  NVA06F_V0_ENGINE_CE1,
 221					  0, &drm->cechan);
 222		if (ret)
 223			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 224
 225		arg0 = NVA06F_V0_ENGINE_GR;
 226		arg1 = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227	} else
 228	if (device->info.chipset >= 0xa3 &&
 229	    device->info.chipset != 0xaa &&
 230	    device->info.chipset != 0xac) {
 231		ret = nouveau_channel_new(drm, &drm->device,
 232					  NvDmaFB, NvDmaTT, &drm->cechan);
 233		if (ret)
 234			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
 
 
 
 235
 236		arg0 = NvDmaFB;
 237		arg1 = NvDmaTT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238	} else {
 239		arg0 = NvDmaFB;
 240		arg1 = NvDmaTT;
 241	}
 242
 243	ret = nouveau_channel_new(drm, &drm->device, arg0, arg1, &drm->channel);
 
 244	if (ret) {
 245		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
 246		nouveau_accel_fini(drm);
 247		return;
 248	}
 249
 250	ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
 251			       nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
 252	if (ret == 0) {
 253		ret = RING_SPACE(drm->channel, 2);
 
 
 
 
 254		if (ret == 0) {
 255			if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
 
 256				BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
 257				OUT_RING  (drm->channel, NVDRM_NVSW);
 258			} else
 259			if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) {
 260				BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
 261				OUT_RING  (drm->channel, 0x001f0000);
 262			}
 263		}
 264
 265		ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
 266				       false, NV04_NVSW_NTFY_UEVENT,
 267				       NULL, 0, 0, &drm->flip);
 268		if (ret == 0)
 269			ret = nvif_notify_get(&drm->flip);
 270		if (ret) {
 271			nouveau_accel_fini(drm);
 
 272			return;
 273		}
 274	}
 275
 276	if (ret) {
 277		NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
 278		nouveau_accel_fini(drm);
 279		return;
 280	}
 281
 282	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
 283		ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
 284				      NULL, &drm->notify);
 285		if (ret) {
 286			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
 287			nouveau_accel_fini(drm);
 288			return;
 289		}
 290
 291		ret = nvif_object_init(&drm->channel->user, NvNotify0,
 292				       NV_DMA_IN_MEMORY,
 293				       &(struct nv_dma_v0) {
 294						.target = NV_DMA_V0_TARGET_VRAM,
 295						.access = NV_DMA_V0_ACCESS_RDWR,
 296						.start = drm->notify->addr,
 297						.limit = drm->notify->addr + 31
 298				       }, sizeof(struct nv_dma_v0),
 299				       &drm->ntfy);
 300		if (ret) {
 301			nouveau_accel_fini(drm);
 302			return;
 303		}
 304	}
 305
 306
 307	nouveau_bo_move_init(drm);
 308}
 309
 310static int nouveau_drm_probe(struct pci_dev *pdev,
 311			     const struct pci_device_id *pent)
 312{
 313	struct nvkm_device *device;
 314	struct apertures_struct *aper;
 315	bool boot = false;
 316	int ret;
 317
 318	/*
 319	 * apple-gmux is needed on dual GPU MacBook Pro
 320	 * to probe the panel if we're the inactive GPU.
 321	 */
 322	if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
 323	    apple_gmux_present() && pdev != vga_default_device() &&
 324	    !vga_switcheroo_handler_flags())
 325		return -EPROBE_DEFER;
 326
 327	/* remove conflicting drivers (vesafb, efifb etc) */
 328	aper = alloc_apertures(3);
 329	if (!aper)
 330		return -ENOMEM;
 331
 332	aper->ranges[0].base = pci_resource_start(pdev, 1);
 333	aper->ranges[0].size = pci_resource_len(pdev, 1);
 334	aper->count = 1;
 335
 336	if (pci_resource_len(pdev, 2)) {
 337		aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
 338		aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
 339		aper->count++;
 340	}
 341
 342	if (pci_resource_len(pdev, 3)) {
 343		aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
 344		aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
 345		aper->count++;
 346	}
 347
 348#ifdef CONFIG_X86
 349	boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 350#endif
 351	if (nouveau_modeset != 2)
 352		remove_conflicting_framebuffers(aper, "nouveaufb", boot);
 353	kfree(aper);
 354
 355	ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
 356				  true, true, ~0ULL, &device);
 357	if (ret)
 358		return ret;
 359
 360	pci_set_master(pdev);
 361
 362	ret = drm_get_pci_dev(pdev, pent, &driver_pci);
 363	if (ret) {
 364		nvkm_device_del(&device);
 365		return ret;
 366	}
 367
 368	return 0;
 369}
 370
 371#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
 372
 373static void
 374nouveau_get_hdmi_dev(struct nouveau_drm *drm)
 375{
 376	struct pci_dev *pdev = drm->dev->pdev;
 
 
 377
 378	if (!pdev) {
 379		NV_DEBUG(drm, "not a PCI device; no HDMI\n");
 380		drm->hdmi_device = NULL;
 381		return;
 382	}
 383
 384	/* subfunction one is a hdmi audio device? */
 385	drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
 386						PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
 
 387
 388	if (!drm->hdmi_device) {
 389		NV_DEBUG(drm, "hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
 
 
 
 390		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 391	}
 392
 393	if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
 394		NV_DEBUG(drm, "possible hdmi device not audio %d\n", drm->hdmi_device->class);
 395		pci_dev_put(drm->hdmi_device);
 396		drm->hdmi_device = NULL;
 397		return;
 398	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 399}
 400
 401static int
 402nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 403{
 404	struct nouveau_drm *drm;
 405	int ret;
 406
 407	ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm);
 408	if (ret)
 409		return ret;
 410
 411	dev->dev_private = drm;
 412	drm->dev = dev;
 413	nvxx_client(&drm->client.base)->debug =
 414		nvkm_dbgopt(nouveau_debug, "DRM");
 415
 416	INIT_LIST_HEAD(&drm->clients);
 417	spin_lock_init(&drm->tile.lock);
 418
 419	nouveau_get_hdmi_dev(drm);
 420
 421	ret = nvif_device_init(&drm->client.base.object, 0, NV_DEVICE,
 422			       &(struct nv_device_v0) {
 423					.device = ~0,
 424			       }, sizeof(struct nv_device_v0),
 425			       &drm->device);
 426	if (ret)
 427		goto fail_device;
 428
 429	dev->irq_enabled = true;
 430
 
 
 
 
 
 
 431	/* workaround an odd issue on nvc1 by disabling the device's
 432	 * nosnoop capability.  hopefully won't cause issues until a
 433	 * better fix is found - assuming there is one...
 434	 */
 435	if (drm->device.info.chipset == 0xc1)
 436		nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000);
 437
 438	nouveau_vga_init(drm);
 439
 440	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 441		ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
 442				  0x1000, NULL, &drm->client.vm);
 443		if (ret)
 444			goto fail_device;
 445
 446		nvxx_client(&drm->client.base)->vm = drm->client.vm;
 447	}
 448
 449	ret = nouveau_ttm_init(drm);
 450	if (ret)
 451		goto fail_ttm;
 452
 453	ret = nouveau_bios_init(dev);
 454	if (ret)
 455		goto fail_bios;
 456
 
 
 457	ret = nouveau_display_create(dev);
 458	if (ret)
 459		goto fail_dispctor;
 460
 461	if (dev->mode_config.num_crtc) {
 462		ret = nouveau_display_init(dev);
 463		if (ret)
 464			goto fail_dispinit;
 465	}
 466
 467	nouveau_debugfs_init(drm);
 468	nouveau_hwmon_init(dev);
 469	nouveau_accel_init(drm);
 
 470	nouveau_fbcon_init(dev);
 
 471
 472	if (nouveau_runtime_pm != 0) {
 473		pm_runtime_use_autosuspend(dev->dev);
 474		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
 475		pm_runtime_set_active(dev->dev);
 476		pm_runtime_allow(dev->dev);
 477		pm_runtime_mark_last_busy(dev->dev);
 478		pm_runtime_put(dev->dev);
 479	}
 
 480	return 0;
 481
 482fail_dispinit:
 483	nouveau_display_destroy(dev);
 484fail_dispctor:
 
 485	nouveau_bios_takedown(dev);
 486fail_bios:
 487	nouveau_ttm_fini(drm);
 488fail_ttm:
 489	nouveau_vga_fini(drm);
 490fail_device:
 491	nvif_device_fini(&drm->device);
 492	nouveau_cli_destroy(&drm->client);
 
 
 493	return ret;
 494}
 495
 496static int
 497nouveau_drm_unload(struct drm_device *dev)
 498{
 499	struct nouveau_drm *drm = nouveau_drm(dev);
 500
 501	pm_runtime_get_sync(dev->dev);
 
 
 
 
 
 502	nouveau_fbcon_fini(dev);
 503	nouveau_accel_fini(drm);
 
 504	nouveau_hwmon_fini(dev);
 505	nouveau_debugfs_fini(drm);
 506
 507	if (dev->mode_config.num_crtc)
 508		nouveau_display_fini(dev);
 509	nouveau_display_destroy(dev);
 510
 
 511	nouveau_bios_takedown(dev);
 512
 513	nouveau_ttm_fini(drm);
 514	nouveau_vga_fini(drm);
 515
 516	nvif_device_fini(&drm->device);
 517	if (drm->hdmi_device)
 518		pci_dev_put(drm->hdmi_device);
 519	nouveau_cli_destroy(&drm->client);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520	return 0;
 
 
 
 
 
 
 
 
 
 
 521}
 522
 523void
 524nouveau_drm_device_remove(struct drm_device *dev)
 525{
 
 526	struct nouveau_drm *drm = nouveau_drm(dev);
 527	struct nvkm_client *client;
 528	struct nvkm_device *device;
 529
 
 
 530	dev->irq_enabled = false;
 531	client = nvxx_client(&drm->client.base);
 532	device = nvkm_device_find(client->device);
 533	drm_put_dev(dev);
 534
 
 
 
 535	nvkm_device_del(&device);
 536}
 537
 538static void
 539nouveau_drm_remove(struct pci_dev *pdev)
 540{
 541	struct drm_device *dev = pci_get_drvdata(pdev);
 542
 543	nouveau_drm_device_remove(dev);
 544}
 545
 546static int
 547nouveau_do_suspend(struct drm_device *dev, bool runtime)
 548{
 549	struct nouveau_drm *drm = nouveau_drm(dev);
 550	struct nouveau_cli *cli;
 551	int ret;
 552
 
 
 
 
 553	if (dev->mode_config.num_crtc) {
 554		NV_INFO(drm, "suspending console...\n");
 555		nouveau_fbcon_set_suspend(dev, 1);
 556		NV_INFO(drm, "suspending display...\n");
 557		ret = nouveau_display_suspend(dev, runtime);
 558		if (ret)
 559			return ret;
 560	}
 561
 562	NV_INFO(drm, "evicting buffers...\n");
 563	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 564
 565	NV_INFO(drm, "waiting for kernel channels to go idle...\n");
 566	if (drm->cechan) {
 567		ret = nouveau_channel_idle(drm->cechan);
 568		if (ret)
 569			goto fail_display;
 570	}
 571
 572	if (drm->channel) {
 573		ret = nouveau_channel_idle(drm->channel);
 574		if (ret)
 575			goto fail_display;
 576	}
 577
 578	NV_INFO(drm, "suspending client object trees...\n");
 579	if (drm->fence && nouveau_fence(drm)->suspend) {
 580		if (!nouveau_fence(drm)->suspend(drm)) {
 581			ret = -ENOMEM;
 582			goto fail_display;
 583		}
 584	}
 585
 586	list_for_each_entry(cli, &drm->clients, head) {
 587		ret = nvif_client_suspend(&cli->base);
 588		if (ret)
 589			goto fail_client;
 590	}
 591
 592	NV_INFO(drm, "suspending kernel object tree...\n");
 593	ret = nvif_client_suspend(&drm->client.base);
 594	if (ret)
 595		goto fail_client;
 596
 597	return 0;
 598
 599fail_client:
 600	list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
 601		nvif_client_resume(&cli->base);
 602	}
 603
 604	if (drm->fence && nouveau_fence(drm)->resume)
 605		nouveau_fence(drm)->resume(drm);
 606
 607fail_display:
 608	if (dev->mode_config.num_crtc) {
 609		NV_INFO(drm, "resuming display...\n");
 610		nouveau_display_resume(dev, runtime);
 611	}
 612	return ret;
 613}
 614
 615static int
 616nouveau_do_resume(struct drm_device *dev, bool runtime)
 617{
 
 618	struct nouveau_drm *drm = nouveau_drm(dev);
 619	struct nouveau_cli *cli;
 620
 621	NV_INFO(drm, "resuming kernel object tree...\n");
 622	nvif_client_resume(&drm->client.base);
 
 
 
 
 623
 624	NV_INFO(drm, "resuming client object trees...\n");
 625	if (drm->fence && nouveau_fence(drm)->resume)
 626		nouveau_fence(drm)->resume(drm);
 627
 628	list_for_each_entry(cli, &drm->clients, head) {
 629		nvif_client_resume(&cli->base);
 630	}
 631
 632	nouveau_run_vbios_init(dev);
 633
 634	if (dev->mode_config.num_crtc) {
 635		NV_INFO(drm, "resuming display...\n");
 636		nouveau_display_resume(dev, runtime);
 637		NV_INFO(drm, "resuming console...\n");
 638		nouveau_fbcon_set_suspend(dev, 0);
 639	}
 640
 
 
 
 641	return 0;
 642}
 643
 644int
 645nouveau_pmops_suspend(struct device *dev)
 646{
 647	struct pci_dev *pdev = to_pci_dev(dev);
 648	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 649	int ret;
 650
 651	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
 652	    drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
 653		return 0;
 654
 655	ret = nouveau_do_suspend(drm_dev, false);
 656	if (ret)
 657		return ret;
 658
 659	pci_save_state(pdev);
 660	pci_disable_device(pdev);
 661	pci_set_power_state(pdev, PCI_D3hot);
 662	udelay(200);
 663	return 0;
 664}
 665
 666int
 667nouveau_pmops_resume(struct device *dev)
 668{
 669	struct pci_dev *pdev = to_pci_dev(dev);
 670	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 671	int ret;
 672
 673	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
 674	    drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
 675		return 0;
 676
 677	pci_set_power_state(pdev, PCI_D0);
 678	pci_restore_state(pdev);
 679	ret = pci_enable_device(pdev);
 680	if (ret)
 681		return ret;
 682	pci_set_master(pdev);
 683
 684	return nouveau_do_resume(drm_dev, false);
 
 
 
 
 
 685}
 686
 687static int
 688nouveau_pmops_freeze(struct device *dev)
 689{
 690	struct pci_dev *pdev = to_pci_dev(dev);
 691	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 692	return nouveau_do_suspend(drm_dev, false);
 693}
 694
 695static int
 696nouveau_pmops_thaw(struct device *dev)
 697{
 698	struct pci_dev *pdev = to_pci_dev(dev);
 699	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 700	return nouveau_do_resume(drm_dev, false);
 701}
 702
 
 
 
 
 
 
 
 
 703static int
 704nouveau_pmops_runtime_suspend(struct device *dev)
 705{
 706	struct pci_dev *pdev = to_pci_dev(dev);
 707	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 708	int ret;
 709
 710	if (nouveau_runtime_pm == 0) {
 711		pm_runtime_forbid(dev);
 712		return -EBUSY;
 713	}
 714
 715	/* are we optimus enabled? */
 716	if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
 717		DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
 718		pm_runtime_forbid(dev);
 719		return -EBUSY;
 720	}
 721
 722	drm_kms_helper_poll_disable(drm_dev);
 723	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 724	nouveau_switcheroo_optimus_dsm();
 725	ret = nouveau_do_suspend(drm_dev, true);
 726	pci_save_state(pdev);
 727	pci_disable_device(pdev);
 728	pci_ignore_hotplug(pdev);
 729	pci_set_power_state(pdev, PCI_D3cold);
 730	drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
 731	return ret;
 732}
 733
 734static int
 735nouveau_pmops_runtime_resume(struct device *dev)
 736{
 737	struct pci_dev *pdev = to_pci_dev(dev);
 738	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 739	struct nvif_device *device = &nouveau_drm(drm_dev)->device;
 
 740	int ret;
 741
 742	if (nouveau_runtime_pm == 0)
 743		return -EINVAL;
 
 
 744
 745	pci_set_power_state(pdev, PCI_D0);
 746	pci_restore_state(pdev);
 747	ret = pci_enable_device(pdev);
 748	if (ret)
 749		return ret;
 750	pci_set_master(pdev);
 751
 752	ret = nouveau_do_resume(drm_dev, true);
 753	drm_kms_helper_poll_enable(drm_dev);
 
 
 
 
 754	/* do magic */
 755	nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
 756	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
 757	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
 
 
 
 
 758	return ret;
 759}
 760
 761static int
 762nouveau_pmops_runtime_idle(struct device *dev)
 763{
 764	struct pci_dev *pdev = to_pci_dev(dev);
 765	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 766	struct nouveau_drm *drm = nouveau_drm(drm_dev);
 767	struct drm_crtc *crtc;
 768
 769	if (nouveau_runtime_pm == 0) {
 770		pm_runtime_forbid(dev);
 771		return -EBUSY;
 772	}
 773
 774	/* are we optimus enabled? */
 775	if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
 776		DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
 777		pm_runtime_forbid(dev);
 778		return -EBUSY;
 779	}
 780
 781	/* if we have a hdmi audio device - make sure it has a driver loaded */
 782	if (drm->hdmi_device) {
 783		if (!drm->hdmi_device->driver) {
 784			DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
 785			pm_runtime_mark_last_busy(dev);
 786			return -EBUSY;
 787		}
 788	}
 789
 790	list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
 791		if (crtc->enabled) {
 792			DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
 793			return -EBUSY;
 794		}
 795	}
 796	pm_runtime_mark_last_busy(dev);
 797	pm_runtime_autosuspend(dev);
 798	/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
 799	return 1;
 800}
 801
 802static int
 803nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 804{
 805	struct nouveau_drm *drm = nouveau_drm(dev);
 806	struct nouveau_cli *cli;
 807	char name[32], tmpname[TASK_COMM_LEN];
 808	int ret;
 809
 810	/* need to bring up power immediately if opening device */
 811	ret = pm_runtime_get_sync(dev->dev);
 812	if (ret < 0 && ret != -EACCES)
 813		return ret;
 814
 815	get_task_comm(tmpname, current);
 816	snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
 817
 818	ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli);
 
 
 
 819
 
 820	if (ret)
 821		goto out_suspend;
 822
 823	cli->base.super = false;
 824
 825	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 826		ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
 827				  0x1000, NULL, &cli->vm);
 828		if (ret) {
 829			nouveau_cli_destroy(cli);
 830			goto out_suspend;
 831		}
 832
 833		nvxx_client(&cli->base)->vm = cli->vm;
 834	}
 835
 836	fpriv->driver_priv = cli;
 837
 838	mutex_lock(&drm->client.mutex);
 839	list_add(&cli->head, &drm->clients);
 840	mutex_unlock(&drm->client.mutex);
 841
 842out_suspend:
 
 
 
 
 
 843	pm_runtime_mark_last_busy(dev->dev);
 844	pm_runtime_put_autosuspend(dev->dev);
 845
 846	return ret;
 847}
 848
 849static void
 850nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
 851{
 852	struct nouveau_cli *cli = nouveau_cli(fpriv);
 853	struct nouveau_drm *drm = nouveau_drm(dev);
 854
 855	pm_runtime_get_sync(dev->dev);
 856
 857	mutex_lock(&cli->mutex);
 858	if (cli->abi16)
 859		nouveau_abi16_fini(cli->abi16);
 860	mutex_unlock(&cli->mutex);
 861
 862	mutex_lock(&drm->client.mutex);
 863	list_del(&cli->head);
 864	mutex_unlock(&drm->client.mutex);
 865
 866}
 867
 868static void
 869nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
 870{
 871	struct nouveau_cli *cli = nouveau_cli(fpriv);
 872	nouveau_cli_destroy(cli);
 873	pm_runtime_mark_last_busy(dev->dev);
 874	pm_runtime_put_autosuspend(dev->dev);
 875}
 876
 877static const struct drm_ioctl_desc
 878nouveau_ioctls[] = {
 879	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
 880	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 881	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
 882	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_AUTH|DRM_RENDER_ALLOW),
 883	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
 884	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
 885	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW),
 886	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW),
 887	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW),
 888	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
 889	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
 890	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
 
 
 891};
 892
 893long
 894nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 895{
 896	struct drm_file *filp = file->private_data;
 897	struct drm_device *dev = filp->minor->dev;
 898	long ret;
 899
 900	ret = pm_runtime_get_sync(dev->dev);
 901	if (ret < 0 && ret != -EACCES)
 902		return ret;
 903
 904	switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
 905	case DRM_NOUVEAU_NVIF:
 906		ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
 907		break;
 908	default:
 909		ret = drm_ioctl(file, cmd, arg);
 910		break;
 911	}
 912
 913	pm_runtime_mark_last_busy(dev->dev);
 914	pm_runtime_put_autosuspend(dev->dev);
 915	return ret;
 916}
 917
 918static const struct file_operations
 919nouveau_driver_fops = {
 920	.owner = THIS_MODULE,
 921	.open = drm_open,
 922	.release = drm_release,
 923	.unlocked_ioctl = nouveau_drm_ioctl,
 924	.mmap = nouveau_ttm_mmap,
 925	.poll = drm_poll,
 926	.read = drm_read,
 927#if defined(CONFIG_COMPAT)
 928	.compat_ioctl = nouveau_compat_ioctl,
 929#endif
 930	.llseek = noop_llseek,
 931};
 932
 933static struct drm_driver
 934driver_stub = {
 935	.driver_features =
 936		DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
 937		DRIVER_KMS_LEGACY_CONTEXT,
 
 
 
 938
 939	.load = nouveau_drm_load,
 940	.unload = nouveau_drm_unload,
 941	.open = nouveau_drm_open,
 942	.preclose = nouveau_drm_preclose,
 943	.postclose = nouveau_drm_postclose,
 944	.lastclose = nouveau_vga_lastclose,
 945
 946#if defined(CONFIG_DEBUG_FS)
 947	.debugfs_init = nouveau_drm_debugfs_init,
 948	.debugfs_cleanup = nouveau_drm_debugfs_cleanup,
 949#endif
 950
 951	.get_vblank_counter = drm_vblank_no_hw_counter,
 952	.enable_vblank = nouveau_display_vblank_enable,
 953	.disable_vblank = nouveau_display_vblank_disable,
 954	.get_scanout_position = nouveau_display_scanoutpos,
 955	.get_vblank_timestamp = nouveau_display_vblstamp,
 956
 957	.ioctls = nouveau_ioctls,
 958	.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
 959	.fops = &nouveau_driver_fops,
 960
 961	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 962	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 963	.gem_prime_export = drm_gem_prime_export,
 964	.gem_prime_import = drm_gem_prime_import,
 965	.gem_prime_pin = nouveau_gem_prime_pin,
 966	.gem_prime_res_obj = nouveau_gem_prime_res_obj,
 967	.gem_prime_unpin = nouveau_gem_prime_unpin,
 968	.gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
 969	.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
 970	.gem_prime_vmap = nouveau_gem_prime_vmap,
 971	.gem_prime_vunmap = nouveau_gem_prime_vunmap,
 972
 973	.gem_free_object = nouveau_gem_object_del,
 974	.gem_open_object = nouveau_gem_object_open,
 975	.gem_close_object = nouveau_gem_object_close,
 976
 977	.dumb_create = nouveau_display_dumb_create,
 978	.dumb_map_offset = nouveau_display_dumb_map_offset,
 979	.dumb_destroy = drm_gem_dumb_destroy,
 980
 981	.name = DRIVER_NAME,
 982	.desc = DRIVER_DESC,
 983#ifdef GIT_REVISION
 984	.date = GIT_REVISION,
 985#else
 986	.date = DRIVER_DATE,
 987#endif
 988	.major = DRIVER_MAJOR,
 989	.minor = DRIVER_MINOR,
 990	.patchlevel = DRIVER_PATCHLEVEL,
 991};
 992
 993static struct pci_device_id
 994nouveau_drm_pci_table[] = {
 995	{
 996		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
 997		.class = PCI_BASE_CLASS_DISPLAY << 16,
 998		.class_mask  = 0xff << 16,
 999	},
1000	{
1001		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
1002		.class = PCI_BASE_CLASS_DISPLAY << 16,
1003		.class_mask  = 0xff << 16,
1004	},
1005	{}
1006};
1007
1008static void nouveau_display_options(void)
1009{
1010	DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
1011
1012	DRM_DEBUG_DRIVER("... tv_disable   : %d\n", nouveau_tv_disable);
1013	DRM_DEBUG_DRIVER("... ignorelid    : %d\n", nouveau_ignorelid);
1014	DRM_DEBUG_DRIVER("... duallink     : %d\n", nouveau_duallink);
1015	DRM_DEBUG_DRIVER("... nofbaccel    : %d\n", nouveau_nofbaccel);
1016	DRM_DEBUG_DRIVER("... config       : %s\n", nouveau_config);
1017	DRM_DEBUG_DRIVER("... debug        : %s\n", nouveau_debug);
1018	DRM_DEBUG_DRIVER("... noaccel      : %d\n", nouveau_noaccel);
1019	DRM_DEBUG_DRIVER("... modeset      : %d\n", nouveau_modeset);
1020	DRM_DEBUG_DRIVER("... runpm        : %d\n", nouveau_runtime_pm);
1021	DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
 
1022}
1023
1024static const struct dev_pm_ops nouveau_pm_ops = {
1025	.suspend = nouveau_pmops_suspend,
1026	.resume = nouveau_pmops_resume,
1027	.freeze = nouveau_pmops_freeze,
1028	.thaw = nouveau_pmops_thaw,
1029	.poweroff = nouveau_pmops_freeze,
1030	.restore = nouveau_pmops_resume,
1031	.runtime_suspend = nouveau_pmops_runtime_suspend,
1032	.runtime_resume = nouveau_pmops_runtime_resume,
1033	.runtime_idle = nouveau_pmops_runtime_idle,
1034};
1035
1036static struct pci_driver
1037nouveau_drm_pci_driver = {
1038	.name = "nouveau",
1039	.id_table = nouveau_drm_pci_table,
1040	.probe = nouveau_drm_probe,
1041	.remove = nouveau_drm_remove,
1042	.driver.pm = &nouveau_pm_ops,
1043};
1044
1045struct drm_device *
1046nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
1047			       struct platform_device *pdev,
1048			       struct nvkm_device **pdevice)
1049{
1050	struct drm_device *drm;
1051	int err;
1052
1053	err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
1054				    true, true, ~0ULL, pdevice);
1055	if (err)
1056		goto err_free;
1057
1058	drm = drm_dev_alloc(&driver_platform, &pdev->dev);
1059	if (!drm) {
1060		err = -ENOMEM;
1061		goto err_free;
1062	}
1063
1064	drm->platformdev = pdev;
 
 
 
1065	platform_set_drvdata(pdev, drm);
1066
1067	return drm;
1068
 
 
1069err_free:
1070	nvkm_device_del(pdevice);
1071
1072	return ERR_PTR(err);
1073}
1074
1075static int __init
1076nouveau_drm_init(void)
1077{
1078	driver_pci = driver_stub;
1079	driver_pci.set_busid = drm_pci_set_busid;
1080	driver_platform = driver_stub;
1081	driver_platform.set_busid = drm_platform_set_busid;
1082
1083	nouveau_display_options();
1084
1085	if (nouveau_modeset == -1) {
1086#ifdef CONFIG_VGA_CONSOLE
1087		if (vgacon_text_force())
1088			nouveau_modeset = 0;
1089#endif
1090	}
1091
1092	if (!nouveau_modeset)
1093		return 0;
1094
1095#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
1096	platform_driver_register(&nouveau_platform_driver);
1097#endif
1098
1099	nouveau_register_dsm_handler();
1100	return drm_pci_init(&driver_pci, &nouveau_drm_pci_driver);
 
 
 
 
 
 
1101}
1102
1103static void __exit
1104nouveau_drm_exit(void)
1105{
1106	if (!nouveau_modeset)
1107		return;
1108
1109	drm_pci_exit(&driver_pci, &nouveau_drm_pci_driver);
 
 
 
1110	nouveau_unregister_dsm_handler();
1111
1112#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
1113	platform_driver_unregister(&nouveau_platform_driver);
1114#endif
 
 
1115}
1116
1117module_init(nouveau_drm_init);
1118module_exit(nouveau_drm_exit);
1119
1120MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
1121MODULE_AUTHOR(DRIVER_AUTHOR);
1122MODULE_DESCRIPTION(DRIVER_DESC);
1123MODULE_LICENSE("GPL and additional rights");
v5.4
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24
 
  25#include <linux/console.h>
  26#include <linux/delay.h>
  27#include <linux/module.h>
  28#include <linux/pci.h>
  29#include <linux/pm_runtime.h>
 
  30#include <linux/vga_switcheroo.h>
  31#include <linux/mmu_notifier.h>
  32
  33#include <drm/drm_crtc_helper.h>
  34#include <drm/drm_ioctl.h>
  35#include <drm/drm_vblank.h>
  36
  37#include <core/gpuobj.h>
  38#include <core/option.h>
  39#include <core/pci.h>
  40#include <core/tegra.h>
  41
  42#include <nvif/driver.h>
  43#include <nvif/fifo.h>
  44#include <nvif/user.h>
  45
  46#include <nvif/class.h>
  47#include <nvif/cl0002.h>
  48#include <nvif/cla06f.h>
 
  49
  50#include "nouveau_drv.h"
  51#include "nouveau_dma.h"
  52#include "nouveau_ttm.h"
  53#include "nouveau_gem.h"
  54#include "nouveau_vga.h"
  55#include "nouveau_led.h"
  56#include "nouveau_hwmon.h"
  57#include "nouveau_acpi.h"
  58#include "nouveau_bios.h"
  59#include "nouveau_ioctl.h"
  60#include "nouveau_abi16.h"
  61#include "nouveau_fbcon.h"
  62#include "nouveau_fence.h"
  63#include "nouveau_debugfs.h"
  64#include "nouveau_usif.h"
  65#include "nouveau_connector.h"
  66#include "nouveau_platform.h"
  67#include "nouveau_svm.h"
  68#include "nouveau_dmem.h"
  69
  70MODULE_PARM_DESC(config, "option string to pass to driver core");
  71static char *nouveau_config;
  72module_param_named(config, nouveau_config, charp, 0400);
  73
  74MODULE_PARM_DESC(debug, "debug string to pass to driver core");
  75static char *nouveau_debug;
  76module_param_named(debug, nouveau_debug, charp, 0400);
  77
  78MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
  79static int nouveau_noaccel = 0;
  80module_param_named(noaccel, nouveau_noaccel, int, 0400);
  81
  82MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
  83		          "0 = disabled, 1 = enabled, 2 = headless)");
  84int nouveau_modeset = -1;
  85module_param_named(modeset, nouveau_modeset, int, 0400);
  86
  87MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
  88static int nouveau_atomic = 0;
  89module_param_named(atomic, nouveau_atomic, int, 0400);
  90
  91MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
  92static int nouveau_runtime_pm = -1;
  93module_param_named(runpm, nouveau_runtime_pm, int, 0400);
  94
  95static struct drm_driver driver_stub;
  96static struct drm_driver driver_pci;
  97static struct drm_driver driver_platform;
  98
  99static u64
 100nouveau_pci_name(struct pci_dev *pdev)
 101{
 102	u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
 103	name |= pdev->bus->number << 16;
 104	name |= PCI_SLOT(pdev->devfn) << 8;
 105	return name | PCI_FUNC(pdev->devfn);
 106}
 107
 108static u64
 109nouveau_platform_name(struct platform_device *platformdev)
 110{
 111	return platformdev->id;
 112}
 113
 114static u64
 115nouveau_name(struct drm_device *dev)
 116{
 117	if (dev->pdev)
 118		return nouveau_pci_name(dev->pdev);
 119	else
 120		return nouveau_platform_name(to_platform_device(dev->dev));
 121}
 122
 123static inline bool
 124nouveau_cli_work_ready(struct dma_fence *fence)
 
 125{
 126	if (!dma_fence_is_signaled(fence))
 127		return false;
 128	dma_fence_put(fence);
 129	return true;
 130}
 131
 132static void
 133nouveau_cli_work(struct work_struct *w)
 134{
 135	struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
 136	struct nouveau_cli_work *work, *wtmp;
 137	mutex_lock(&cli->lock);
 138	list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
 139		if (!work->fence || nouveau_cli_work_ready(work->fence)) {
 140			list_del(&work->head);
 141			work->func(work);
 142		}
 
 143	}
 144	mutex_unlock(&cli->lock);
 145}
 146
 147static void
 148nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
 149{
 150	struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
 151	schedule_work(&work->cli->work);
 
 
 152}
 153
 154void
 155nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
 156		       struct nouveau_cli_work *work)
 157{
 158	work->fence = dma_fence_get(fence);
 159	work->cli = cli;
 160	mutex_lock(&cli->lock);
 161	list_add_tail(&work->head, &cli->worker);
 162	if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
 163		nouveau_cli_work_fence(fence, &work->cb);
 164	mutex_unlock(&cli->lock);
 165}
 166
 167static void
 168nouveau_cli_fini(struct nouveau_cli *cli)
 169{
 170	/* All our channels are dead now, which means all the fences they
 171	 * own are signalled, and all callback functions have been called.
 172	 *
 173	 * So, after flushing the workqueue, there should be nothing left.
 174	 */
 175	flush_work(&cli->work);
 176	WARN_ON(!list_empty(&cli->worker));
 177
 178	usif_client_fini(cli);
 179	nouveau_vmm_fini(&cli->svm);
 180	nouveau_vmm_fini(&cli->vmm);
 181	nvif_mmu_fini(&cli->mmu);
 182	nvif_device_fini(&cli->device);
 183	mutex_lock(&cli->drm->master.lock);
 184	nvif_client_fini(&cli->base);
 185	mutex_unlock(&cli->drm->master.lock);
 186}
 187
 188static int
 189nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
 190		 struct nouveau_cli *cli)
 191{
 192	static const struct nvif_mclass
 193	mems[] = {
 194		{ NVIF_CLASS_MEM_GF100, -1 },
 195		{ NVIF_CLASS_MEM_NV50 , -1 },
 196		{ NVIF_CLASS_MEM_NV04 , -1 },
 197		{}
 198	};
 199	static const struct nvif_mclass
 200	mmus[] = {
 201		{ NVIF_CLASS_MMU_GF100, -1 },
 202		{ NVIF_CLASS_MMU_NV50 , -1 },
 203		{ NVIF_CLASS_MMU_NV04 , -1 },
 204		{}
 205	};
 206	static const struct nvif_mclass
 207	vmms[] = {
 208		{ NVIF_CLASS_VMM_GP100, -1 },
 209		{ NVIF_CLASS_VMM_GM200, -1 },
 210		{ NVIF_CLASS_VMM_GF100, -1 },
 211		{ NVIF_CLASS_VMM_NV50 , -1 },
 212		{ NVIF_CLASS_VMM_NV04 , -1 },
 213		{}
 214	};
 215	u64 device = nouveau_name(drm->dev);
 216	int ret;
 217
 218	snprintf(cli->name, sizeof(cli->name), "%s", sname);
 219	cli->drm = drm;
 220	mutex_init(&cli->mutex);
 221	usif_client_init(cli);
 222
 223	INIT_WORK(&cli->work, nouveau_cli_work);
 224	INIT_LIST_HEAD(&cli->worker);
 225	mutex_init(&cli->lock);
 226
 227	if (cli == &drm->master) {
 228		ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
 229				       cli->name, device, &cli->base);
 230	} else {
 231		mutex_lock(&drm->master.lock);
 232		ret = nvif_client_init(&drm->master.base, cli->name, device,
 233				       &cli->base);
 234		mutex_unlock(&drm->master.lock);
 235	}
 236	if (ret) {
 237		NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
 238		goto done;
 239	}
 240
 241	ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE,
 242			       &(struct nv_device_v0) {
 243					.device = ~0,
 244			       }, sizeof(struct nv_device_v0),
 245			       &cli->device);
 246	if (ret) {
 247		NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
 248		goto done;
 249	}
 250
 251	ret = nvif_mclass(&cli->device.object, mmus);
 252	if (ret < 0) {
 253		NV_PRINTK(err, cli, "No supported MMU class\n");
 254		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255	}
 256
 257	ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
 258	if (ret) {
 259		NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
 260		goto done;
 
 261	}
 262
 263	ret = nvif_mclass(&cli->mmu.object, vmms);
 264	if (ret < 0) {
 265		NV_PRINTK(err, cli, "No supported VMM class\n");
 266		goto done;
 267	}
 
 
 268
 269	ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
 270	if (ret) {
 271		NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
 272		goto done;
 273	}
 274
 275	ret = nvif_mclass(&cli->mmu.object, mems);
 276	if (ret < 0) {
 277		NV_PRINTK(err, cli, "No supported MEM class\n");
 278		goto done;
 279	}
 280
 281	cli->mem = &mems[ret];
 282	return 0;
 283done:
 284	if (ret)
 285		nouveau_cli_fini(cli);
 286	return ret;
 287}
 288
 289static void
 290nouveau_accel_ce_fini(struct nouveau_drm *drm)
 291{
 292	nouveau_channel_idle(drm->cechan);
 293	nvif_object_fini(&drm->ttm.copy);
 294	nouveau_channel_del(&drm->cechan);
 295}
 296
 297static void
 298nouveau_accel_ce_init(struct nouveau_drm *drm)
 299{
 300	struct nvif_device *device = &drm->client.device;
 301	int ret = 0;
 302
 303	/* Allocate channel that has access to a (preferably async) copy
 304	 * engine, to use for TTM buffer moves.
 305	 */
 306	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 307		ret = nouveau_channel_new(drm, device,
 308					  nvif_fifo_runlist_ce(device), 0,
 309					  true, &drm->cechan);
 310	} else
 311	if (device->info.chipset >= 0xa3 &&
 312	    device->info.chipset != 0xaa &&
 313	    device->info.chipset != 0xac) {
 314		/* Prior to Kepler, there's only a single runlist, so all
 315		 * engines can be accessed from any channel.
 316		 *
 317		 * We still want to use a separate channel though.
 318		 */
 319		ret = nouveau_channel_new(drm, device, NvDmaFB, NvDmaTT, false,
 320					  &drm->cechan);
 321	}
 322
 323	if (ret)
 324		NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 325}
 326
 327static void
 328nouveau_accel_gr_fini(struct nouveau_drm *drm)
 329{
 330	nouveau_channel_idle(drm->channel);
 331	nvif_object_fini(&drm->ntfy);
 332	nvkm_gpuobj_del(&drm->notify);
 333	nvif_object_fini(&drm->nvsw);
 334	nouveau_channel_del(&drm->channel);
 335}
 336
 337static void
 338nouveau_accel_gr_init(struct nouveau_drm *drm)
 339{
 340	struct nvif_device *device = &drm->client.device;
 341	u32 arg0, arg1;
 342	int ret;
 343
 344	/* Allocate channel that has access to the graphics engine. */
 345	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 346		arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
 347		arg1 = 1;
 348	} else {
 349		arg0 = NvDmaFB;
 350		arg1 = NvDmaTT;
 351	}
 352
 353	ret = nouveau_channel_new(drm, device, arg0, arg1, false,
 354				  &drm->channel);
 355	if (ret) {
 356		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
 357		nouveau_accel_gr_fini(drm);
 358		return;
 359	}
 360
 361	/* A SW class is used on pre-NV50 HW to assist with handling the
 362	 * synchronisation of page flips, as well as to implement fences
 363	 * on TNT/TNT2 HW that lacks any kind of support in host.
 364	 */
 365	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
 366		ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
 367				       nouveau_abi16_swclass(drm), NULL, 0,
 368				       &drm->nvsw);
 369		if (ret == 0) {
 370			ret = RING_SPACE(drm->channel, 2);
 371			if (ret == 0) {
 372				BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
 373				OUT_RING  (drm->channel, drm->nvsw.handle);
 
 
 
 
 374			}
 375		}
 376
 
 
 
 
 
 377		if (ret) {
 378			NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
 379			nouveau_accel_gr_fini(drm);
 380			return;
 381		}
 382	}
 383
 384	/* NvMemoryToMemoryFormat requires a notifier ctxdma for some reason,
 385	 * even if notification is never requested, so, allocate a ctxdma on
 386	 * any GPU where it's possible we'll end up using M2MF for BO moves.
 387	 */
 
 
 388	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
 389		ret = nvkm_gpuobj_new(nvxx_device(device), 32, 0, false, NULL,
 390				      &drm->notify);
 391		if (ret) {
 392			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
 393			nouveau_accel_gr_fini(drm);
 394			return;
 395		}
 396
 397		ret = nvif_object_init(&drm->channel->user, NvNotify0,
 398				       NV_DMA_IN_MEMORY,
 399				       &(struct nv_dma_v0) {
 400						.target = NV_DMA_V0_TARGET_VRAM,
 401						.access = NV_DMA_V0_ACCESS_RDWR,
 402						.start = drm->notify->addr,
 403						.limit = drm->notify->addr + 31
 404				       }, sizeof(struct nv_dma_v0),
 405				       &drm->ntfy);
 406		if (ret) {
 407			nouveau_accel_gr_fini(drm);
 408			return;
 409		}
 410	}
 
 
 
 411}
 412
 413static void
 414nouveau_accel_fini(struct nouveau_drm *drm)
 415{
 416	nouveau_accel_ce_fini(drm);
 417	nouveau_accel_gr_fini(drm);
 418	if (drm->fence)
 419		nouveau_fence(drm)->dtor(drm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 420}
 421
 
 
 422static void
 423nouveau_accel_init(struct nouveau_drm *drm)
 424{
 425	struct nvif_device *device = &drm->client.device;
 426	struct nvif_sclass *sclass;
 427	int ret, i, n;
 428
 429	if (nouveau_noaccel)
 
 
 430		return;
 
 431
 432	/* Initialise global support for channels, and synchronisation. */
 433	ret = nouveau_channels_init(drm);
 434	if (ret)
 435		return;
 436
 437	/*XXX: this is crap, but the fence/channel stuff is a little
 438	 *     backwards in some places.  this will be fixed.
 439	 */
 440	ret = n = nvif_object_sclass_get(&device->object, &sclass);
 441	if (ret < 0)
 442		return;
 443
 444	for (ret = -ENOSYS, i = 0; i < n; i++) {
 445		switch (sclass[i].oclass) {
 446		case NV03_CHANNEL_DMA:
 447			ret = nv04_fence_create(drm);
 448			break;
 449		case NV10_CHANNEL_DMA:
 450			ret = nv10_fence_create(drm);
 451			break;
 452		case NV17_CHANNEL_DMA:
 453		case NV40_CHANNEL_DMA:
 454			ret = nv17_fence_create(drm);
 455			break;
 456		case NV50_CHANNEL_GPFIFO:
 457			ret = nv50_fence_create(drm);
 458			break;
 459		case G82_CHANNEL_GPFIFO:
 460			ret = nv84_fence_create(drm);
 461			break;
 462		case FERMI_CHANNEL_GPFIFO:
 463		case KEPLER_CHANNEL_GPFIFO_A:
 464		case KEPLER_CHANNEL_GPFIFO_B:
 465		case MAXWELL_CHANNEL_GPFIFO_A:
 466		case PASCAL_CHANNEL_GPFIFO_A:
 467		case VOLTA_CHANNEL_GPFIFO_A:
 468		case TURING_CHANNEL_GPFIFO_A:
 469			ret = nvc0_fence_create(drm);
 470			break;
 471		default:
 472			break;
 473		}
 474	}
 475
 476	nvif_object_sclass_put(&sclass);
 477	if (ret) {
 478		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
 479		nouveau_accel_fini(drm);
 480		return;
 481	}
 482
 483	/* Volta requires access to a doorbell register for kickoff. */
 484	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
 485		ret = nvif_user_init(device);
 486		if (ret)
 487			return;
 488	}
 489
 490	/* Allocate channels we need to support various functions. */
 491	nouveau_accel_gr_init(drm);
 492	nouveau_accel_ce_init(drm);
 493
 494	/* Initialise accelerated TTM buffer moves. */
 495	nouveau_bo_move_init(drm);
 496}
 497
 498static int
 499nouveau_drm_device_init(struct drm_device *dev)
 500{
 501	struct nouveau_drm *drm;
 502	int ret;
 503
 504	if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
 505		return -ENOMEM;
 
 
 506	dev->dev_private = drm;
 507	drm->dev = dev;
 
 
 508
 509	ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
 510	if (ret)
 511		goto fail_alloc;
 
 512
 513	ret = nouveau_cli_init(drm, "DRM", &drm->client);
 
 
 
 
 514	if (ret)
 515		goto fail_master;
 516
 517	dev->irq_enabled = true;
 518
 519	nvxx_client(&drm->client.base)->debug =
 520		nvkm_dbgopt(nouveau_debug, "DRM");
 521
 522	INIT_LIST_HEAD(&drm->clients);
 523	spin_lock_init(&drm->tile.lock);
 524
 525	/* workaround an odd issue on nvc1 by disabling the device's
 526	 * nosnoop capability.  hopefully won't cause issues until a
 527	 * better fix is found - assuming there is one...
 528	 */
 529	if (drm->client.device.info.chipset == 0xc1)
 530		nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000);
 531
 532	nouveau_vga_init(drm);
 533
 
 
 
 
 
 
 
 
 
 534	ret = nouveau_ttm_init(drm);
 535	if (ret)
 536		goto fail_ttm;
 537
 538	ret = nouveau_bios_init(dev);
 539	if (ret)
 540		goto fail_bios;
 541
 542	nouveau_accel_init(drm);
 543
 544	ret = nouveau_display_create(dev);
 545	if (ret)
 546		goto fail_dispctor;
 547
 548	if (dev->mode_config.num_crtc) {
 549		ret = nouveau_display_init(dev, false, false);
 550		if (ret)
 551			goto fail_dispinit;
 552	}
 553
 554	nouveau_debugfs_init(drm);
 555	nouveau_hwmon_init(dev);
 556	nouveau_svm_init(drm);
 557	nouveau_dmem_init(drm);
 558	nouveau_fbcon_init(dev);
 559	nouveau_led_init(dev);
 560
 561	if (nouveau_pmops_runtime()) {
 562		pm_runtime_use_autosuspend(dev->dev);
 563		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
 564		pm_runtime_set_active(dev->dev);
 565		pm_runtime_allow(dev->dev);
 566		pm_runtime_mark_last_busy(dev->dev);
 567		pm_runtime_put(dev->dev);
 568	}
 569
 570	return 0;
 571
 572fail_dispinit:
 573	nouveau_display_destroy(dev);
 574fail_dispctor:
 575	nouveau_accel_fini(drm);
 576	nouveau_bios_takedown(dev);
 577fail_bios:
 578	nouveau_ttm_fini(drm);
 579fail_ttm:
 580	nouveau_vga_fini(drm);
 581	nouveau_cli_fini(&drm->client);
 582fail_master:
 583	nouveau_cli_fini(&drm->master);
 584fail_alloc:
 585	kfree(drm);
 586	return ret;
 587}
 588
 589static void
 590nouveau_drm_device_fini(struct drm_device *dev)
 591{
 592	struct nouveau_drm *drm = nouveau_drm(dev);
 593
 594	if (nouveau_pmops_runtime()) {
 595		pm_runtime_get_sync(dev->dev);
 596		pm_runtime_forbid(dev->dev);
 597	}
 598
 599	nouveau_led_fini(dev);
 600	nouveau_fbcon_fini(dev);
 601	nouveau_dmem_fini(drm);
 602	nouveau_svm_fini(drm);
 603	nouveau_hwmon_fini(dev);
 604	nouveau_debugfs_fini(drm);
 605
 606	if (dev->mode_config.num_crtc)
 607		nouveau_display_fini(dev, false, false);
 608	nouveau_display_destroy(dev);
 609
 610	nouveau_accel_fini(drm);
 611	nouveau_bios_takedown(dev);
 612
 613	nouveau_ttm_fini(drm);
 614	nouveau_vga_fini(drm);
 615
 616	nouveau_cli_fini(&drm->client);
 617	nouveau_cli_fini(&drm->master);
 618	kfree(drm);
 619}
 620
 621static int nouveau_drm_probe(struct pci_dev *pdev,
 622			     const struct pci_device_id *pent)
 623{
 624	struct nvkm_device *device;
 625	struct drm_device *drm_dev;
 626	struct apertures_struct *aper;
 627	bool boot = false;
 628	int ret;
 629
 630	if (vga_switcheroo_client_probe_defer(pdev))
 631		return -EPROBE_DEFER;
 632
 633	/* We need to check that the chipset is supported before booting
 634	 * fbdev off the hardware, as there's no way to put it back.
 635	 */
 636	ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
 637				  true, false, 0, &device);
 638	if (ret)
 639		return ret;
 640
 641	nvkm_device_del(&device);
 642
 643	/* Remove conflicting drivers (vesafb, efifb etc). */
 644	aper = alloc_apertures(3);
 645	if (!aper)
 646		return -ENOMEM;
 647
 648	aper->ranges[0].base = pci_resource_start(pdev, 1);
 649	aper->ranges[0].size = pci_resource_len(pdev, 1);
 650	aper->count = 1;
 651
 652	if (pci_resource_len(pdev, 2)) {
 653		aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
 654		aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
 655		aper->count++;
 656	}
 657
 658	if (pci_resource_len(pdev, 3)) {
 659		aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
 660		aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
 661		aper->count++;
 662	}
 663
 664#ifdef CONFIG_X86
 665	boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 666#endif
 667	if (nouveau_modeset != 2)
 668		drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
 669	kfree(aper);
 670
 671	ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
 672				  true, true, ~0ULL, &device);
 673	if (ret)
 674		return ret;
 675
 676	pci_set_master(pdev);
 677
 678	if (nouveau_atomic)
 679		driver_pci.driver_features |= DRIVER_ATOMIC;
 680
 681	drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev);
 682	if (IS_ERR(drm_dev)) {
 683		ret = PTR_ERR(drm_dev);
 684		goto fail_nvkm;
 685	}
 686
 687	ret = pci_enable_device(pdev);
 688	if (ret)
 689		goto fail_drm;
 690
 691	drm_dev->pdev = pdev;
 692	pci_set_drvdata(pdev, drm_dev);
 693
 694	ret = nouveau_drm_device_init(drm_dev);
 695	if (ret)
 696		goto fail_pci;
 697
 698	ret = drm_dev_register(drm_dev, pent->driver_data);
 699	if (ret)
 700		goto fail_drm_dev_init;
 701
 702	return 0;
 703
 704fail_drm_dev_init:
 705	nouveau_drm_device_fini(drm_dev);
 706fail_pci:
 707	pci_disable_device(pdev);
 708fail_drm:
 709	drm_dev_put(drm_dev);
 710fail_nvkm:
 711	nvkm_device_del(&device);
 712	return ret;
 713}
 714
 715void
 716nouveau_drm_device_remove(struct drm_device *dev)
 717{
 718	struct pci_dev *pdev = dev->pdev;
 719	struct nouveau_drm *drm = nouveau_drm(dev);
 720	struct nvkm_client *client;
 721	struct nvkm_device *device;
 722
 723	drm_dev_unregister(dev);
 724
 725	dev->irq_enabled = false;
 726	client = nvxx_client(&drm->client.base);
 727	device = nvkm_device_find(client->device);
 
 728
 729	nouveau_drm_device_fini(dev);
 730	pci_disable_device(pdev);
 731	drm_dev_put(dev);
 732	nvkm_device_del(&device);
 733}
 734
 735static void
 736nouveau_drm_remove(struct pci_dev *pdev)
 737{
 738	struct drm_device *dev = pci_get_drvdata(pdev);
 739
 740	nouveau_drm_device_remove(dev);
 741}
 742
 743static int
 744nouveau_do_suspend(struct drm_device *dev, bool runtime)
 745{
 746	struct nouveau_drm *drm = nouveau_drm(dev);
 
 747	int ret;
 748
 749	nouveau_svm_suspend(drm);
 750	nouveau_dmem_suspend(drm);
 751	nouveau_led_suspend(dev);
 752
 753	if (dev->mode_config.num_crtc) {
 754		NV_DEBUG(drm, "suspending console...\n");
 755		nouveau_fbcon_set_suspend(dev, 1);
 756		NV_DEBUG(drm, "suspending display...\n");
 757		ret = nouveau_display_suspend(dev, runtime);
 758		if (ret)
 759			return ret;
 760	}
 761
 762	NV_DEBUG(drm, "evicting buffers...\n");
 763	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 764
 765	NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
 766	if (drm->cechan) {
 767		ret = nouveau_channel_idle(drm->cechan);
 768		if (ret)
 769			goto fail_display;
 770	}
 771
 772	if (drm->channel) {
 773		ret = nouveau_channel_idle(drm->channel);
 774		if (ret)
 775			goto fail_display;
 776	}
 777
 778	NV_DEBUG(drm, "suspending fence...\n");
 779	if (drm->fence && nouveau_fence(drm)->suspend) {
 780		if (!nouveau_fence(drm)->suspend(drm)) {
 781			ret = -ENOMEM;
 782			goto fail_display;
 783		}
 784	}
 785
 786	NV_DEBUG(drm, "suspending object tree...\n");
 787	ret = nvif_client_suspend(&drm->master.base);
 
 
 
 
 
 
 788	if (ret)
 789		goto fail_client;
 790
 791	return 0;
 792
 793fail_client:
 
 
 
 
 794	if (drm->fence && nouveau_fence(drm)->resume)
 795		nouveau_fence(drm)->resume(drm);
 796
 797fail_display:
 798	if (dev->mode_config.num_crtc) {
 799		NV_DEBUG(drm, "resuming display...\n");
 800		nouveau_display_resume(dev, runtime);
 801	}
 802	return ret;
 803}
 804
 805static int
 806nouveau_do_resume(struct drm_device *dev, bool runtime)
 807{
 808	int ret = 0;
 809	struct nouveau_drm *drm = nouveau_drm(dev);
 
 810
 811	NV_DEBUG(drm, "resuming object tree...\n");
 812	ret = nvif_client_resume(&drm->master.base);
 813	if (ret) {
 814		NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
 815		return ret;
 816	}
 817
 818	NV_DEBUG(drm, "resuming fence...\n");
 819	if (drm->fence && nouveau_fence(drm)->resume)
 820		nouveau_fence(drm)->resume(drm);
 821
 
 
 
 
 822	nouveau_run_vbios_init(dev);
 823
 824	if (dev->mode_config.num_crtc) {
 825		NV_DEBUG(drm, "resuming display...\n");
 826		nouveau_display_resume(dev, runtime);
 827		NV_DEBUG(drm, "resuming console...\n");
 828		nouveau_fbcon_set_suspend(dev, 0);
 829	}
 830
 831	nouveau_led_resume(dev);
 832	nouveau_dmem_resume(drm);
 833	nouveau_svm_resume(drm);
 834	return 0;
 835}
 836
 837int
 838nouveau_pmops_suspend(struct device *dev)
 839{
 840	struct pci_dev *pdev = to_pci_dev(dev);
 841	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 842	int ret;
 843
 844	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
 845	    drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
 846		return 0;
 847
 848	ret = nouveau_do_suspend(drm_dev, false);
 849	if (ret)
 850		return ret;
 851
 852	pci_save_state(pdev);
 853	pci_disable_device(pdev);
 854	pci_set_power_state(pdev, PCI_D3hot);
 855	udelay(200);
 856	return 0;
 857}
 858
 859int
 860nouveau_pmops_resume(struct device *dev)
 861{
 862	struct pci_dev *pdev = to_pci_dev(dev);
 863	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 864	int ret;
 865
 866	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
 867	    drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
 868		return 0;
 869
 870	pci_set_power_state(pdev, PCI_D0);
 871	pci_restore_state(pdev);
 872	ret = pci_enable_device(pdev);
 873	if (ret)
 874		return ret;
 875	pci_set_master(pdev);
 876
 877	ret = nouveau_do_resume(drm_dev, false);
 878
 879	/* Monitors may have been connected / disconnected during suspend */
 880	schedule_work(&nouveau_drm(drm_dev)->hpd_work);
 881
 882	return ret;
 883}
 884
 885static int
 886nouveau_pmops_freeze(struct device *dev)
 887{
 888	struct pci_dev *pdev = to_pci_dev(dev);
 889	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 890	return nouveau_do_suspend(drm_dev, false);
 891}
 892
 893static int
 894nouveau_pmops_thaw(struct device *dev)
 895{
 896	struct pci_dev *pdev = to_pci_dev(dev);
 897	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 898	return nouveau_do_resume(drm_dev, false);
 899}
 900
 901bool
 902nouveau_pmops_runtime(void)
 903{
 904	if (nouveau_runtime_pm == -1)
 905		return nouveau_is_optimus() || nouveau_is_v1_dsm();
 906	return nouveau_runtime_pm == 1;
 907}
 908
 909static int
 910nouveau_pmops_runtime_suspend(struct device *dev)
 911{
 912	struct pci_dev *pdev = to_pci_dev(dev);
 913	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 914	int ret;
 915
 916	if (!nouveau_pmops_runtime()) {
 917		pm_runtime_forbid(dev);
 918		return -EBUSY;
 919	}
 920
 
 
 
 
 
 
 
 
 
 921	nouveau_switcheroo_optimus_dsm();
 922	ret = nouveau_do_suspend(drm_dev, true);
 923	pci_save_state(pdev);
 924	pci_disable_device(pdev);
 925	pci_ignore_hotplug(pdev);
 926	pci_set_power_state(pdev, PCI_D3cold);
 927	drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
 928	return ret;
 929}
 930
 931static int
 932nouveau_pmops_runtime_resume(struct device *dev)
 933{
 934	struct pci_dev *pdev = to_pci_dev(dev);
 935	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 936	struct nouveau_drm *drm = nouveau_drm(drm_dev);
 937	struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
 938	int ret;
 939
 940	if (!nouveau_pmops_runtime()) {
 941		pm_runtime_forbid(dev);
 942		return -EBUSY;
 943	}
 944
 945	pci_set_power_state(pdev, PCI_D0);
 946	pci_restore_state(pdev);
 947	ret = pci_enable_device(pdev);
 948	if (ret)
 949		return ret;
 950	pci_set_master(pdev);
 951
 952	ret = nouveau_do_resume(drm_dev, true);
 953	if (ret) {
 954		NV_ERROR(drm, "resume failed with: %d\n", ret);
 955		return ret;
 956	}
 957
 958	/* do magic */
 959	nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
 
 960	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
 961
 962	/* Monitors may have been connected / disconnected during suspend */
 963	schedule_work(&nouveau_drm(drm_dev)->hpd_work);
 964
 965	return ret;
 966}
 967
 968static int
 969nouveau_pmops_runtime_idle(struct device *dev)
 970{
 971	if (!nouveau_pmops_runtime()) {
 
 
 
 
 
 972		pm_runtime_forbid(dev);
 973		return -EBUSY;
 974	}
 975
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 976	pm_runtime_mark_last_busy(dev);
 977	pm_runtime_autosuspend(dev);
 978	/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
 979	return 1;
 980}
 981
 982static int
 983nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 984{
 985	struct nouveau_drm *drm = nouveau_drm(dev);
 986	struct nouveau_cli *cli;
 987	char name[32], tmpname[TASK_COMM_LEN];
 988	int ret;
 989
 990	/* need to bring up power immediately if opening device */
 991	ret = pm_runtime_get_sync(dev->dev);
 992	if (ret < 0 && ret != -EACCES)
 993		return ret;
 994
 995	get_task_comm(tmpname, current);
 996	snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
 997
 998	if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
 999		ret = -ENOMEM;
1000		goto done;
1001	}
1002
1003	ret = nouveau_cli_init(drm, name, cli);
1004	if (ret)
1005		goto done;
1006
1007	cli->base.super = false;
1008
 
 
 
 
 
 
 
 
 
 
 
1009	fpriv->driver_priv = cli;
1010
1011	mutex_lock(&drm->client.mutex);
1012	list_add(&cli->head, &drm->clients);
1013	mutex_unlock(&drm->client.mutex);
1014
1015done:
1016	if (ret && cli) {
1017		nouveau_cli_fini(cli);
1018		kfree(cli);
1019	}
1020
1021	pm_runtime_mark_last_busy(dev->dev);
1022	pm_runtime_put_autosuspend(dev->dev);
 
1023	return ret;
1024}
1025
1026static void
1027nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
1028{
1029	struct nouveau_cli *cli = nouveau_cli(fpriv);
1030	struct nouveau_drm *drm = nouveau_drm(dev);
1031
1032	pm_runtime_get_sync(dev->dev);
1033
1034	mutex_lock(&cli->mutex);
1035	if (cli->abi16)
1036		nouveau_abi16_fini(cli->abi16);
1037	mutex_unlock(&cli->mutex);
1038
1039	mutex_lock(&drm->client.mutex);
1040	list_del(&cli->head);
1041	mutex_unlock(&drm->client.mutex);
1042
1043	nouveau_cli_fini(cli);
1044	kfree(cli);
 
 
 
 
 
1045	pm_runtime_mark_last_busy(dev->dev);
1046	pm_runtime_put_autosuspend(dev->dev);
1047}
1048
1049static const struct drm_ioctl_desc
1050nouveau_ioctls[] = {
1051	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_RENDER_ALLOW),
1052	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1053	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_RENDER_ALLOW),
1054	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_RENDER_ALLOW),
1055	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_RENDER_ALLOW),
1056	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_RENDER_ALLOW),
1057	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_RENDER_ALLOW),
1058	DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_RENDER_ALLOW),
1059	DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_RENDER_ALLOW),
1060	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_RENDER_ALLOW),
1061	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_RENDER_ALLOW),
1062	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
1063	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
1064	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
1065};
1066
1067long
1068nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1069{
1070	struct drm_file *filp = file->private_data;
1071	struct drm_device *dev = filp->minor->dev;
1072	long ret;
1073
1074	ret = pm_runtime_get_sync(dev->dev);
1075	if (ret < 0 && ret != -EACCES)
1076		return ret;
1077
1078	switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
1079	case DRM_NOUVEAU_NVIF:
1080		ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
1081		break;
1082	default:
1083		ret = drm_ioctl(file, cmd, arg);
1084		break;
1085	}
1086
1087	pm_runtime_mark_last_busy(dev->dev);
1088	pm_runtime_put_autosuspend(dev->dev);
1089	return ret;
1090}
1091
1092static const struct file_operations
1093nouveau_driver_fops = {
1094	.owner = THIS_MODULE,
1095	.open = drm_open,
1096	.release = drm_release,
1097	.unlocked_ioctl = nouveau_drm_ioctl,
1098	.mmap = nouveau_ttm_mmap,
1099	.poll = drm_poll,
1100	.read = drm_read,
1101#if defined(CONFIG_COMPAT)
1102	.compat_ioctl = nouveau_compat_ioctl,
1103#endif
1104	.llseek = noop_llseek,
1105};
1106
1107static struct drm_driver
1108driver_stub = {
1109	.driver_features =
1110		DRIVER_GEM | DRIVER_MODESET | DRIVER_RENDER
1111#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
1112		| DRIVER_KMS_LEGACY_CONTEXT
1113#endif
1114		,
1115
 
 
1116	.open = nouveau_drm_open,
 
1117	.postclose = nouveau_drm_postclose,
1118	.lastclose = nouveau_vga_lastclose,
1119
1120#if defined(CONFIG_DEBUG_FS)
1121	.debugfs_init = nouveau_drm_debugfs_init,
 
1122#endif
1123
 
1124	.enable_vblank = nouveau_display_vblank_enable,
1125	.disable_vblank = nouveau_display_vblank_disable,
1126	.get_scanout_position = nouveau_display_scanoutpos,
1127	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
1128
1129	.ioctls = nouveau_ioctls,
1130	.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
1131	.fops = &nouveau_driver_fops,
1132
1133	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1134	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 
 
1135	.gem_prime_pin = nouveau_gem_prime_pin,
 
1136	.gem_prime_unpin = nouveau_gem_prime_unpin,
1137	.gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
1138	.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
1139	.gem_prime_vmap = nouveau_gem_prime_vmap,
1140	.gem_prime_vunmap = nouveau_gem_prime_vunmap,
1141
1142	.gem_free_object_unlocked = nouveau_gem_object_del,
1143	.gem_open_object = nouveau_gem_object_open,
1144	.gem_close_object = nouveau_gem_object_close,
1145
1146	.dumb_create = nouveau_display_dumb_create,
1147	.dumb_map_offset = nouveau_display_dumb_map_offset,
 
1148
1149	.name = DRIVER_NAME,
1150	.desc = DRIVER_DESC,
1151#ifdef GIT_REVISION
1152	.date = GIT_REVISION,
1153#else
1154	.date = DRIVER_DATE,
1155#endif
1156	.major = DRIVER_MAJOR,
1157	.minor = DRIVER_MINOR,
1158	.patchlevel = DRIVER_PATCHLEVEL,
1159};
1160
1161static struct pci_device_id
1162nouveau_drm_pci_table[] = {
1163	{
1164		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
1165		.class = PCI_BASE_CLASS_DISPLAY << 16,
1166		.class_mask  = 0xff << 16,
1167	},
1168	{
1169		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
1170		.class = PCI_BASE_CLASS_DISPLAY << 16,
1171		.class_mask  = 0xff << 16,
1172	},
1173	{}
1174};
1175
1176static void nouveau_display_options(void)
1177{
1178	DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
1179
1180	DRM_DEBUG_DRIVER("... tv_disable   : %d\n", nouveau_tv_disable);
1181	DRM_DEBUG_DRIVER("... ignorelid    : %d\n", nouveau_ignorelid);
1182	DRM_DEBUG_DRIVER("... duallink     : %d\n", nouveau_duallink);
1183	DRM_DEBUG_DRIVER("... nofbaccel    : %d\n", nouveau_nofbaccel);
1184	DRM_DEBUG_DRIVER("... config       : %s\n", nouveau_config);
1185	DRM_DEBUG_DRIVER("... debug        : %s\n", nouveau_debug);
1186	DRM_DEBUG_DRIVER("... noaccel      : %d\n", nouveau_noaccel);
1187	DRM_DEBUG_DRIVER("... modeset      : %d\n", nouveau_modeset);
1188	DRM_DEBUG_DRIVER("... runpm        : %d\n", nouveau_runtime_pm);
1189	DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
1190	DRM_DEBUG_DRIVER("... hdmimhz      : %d\n", nouveau_hdmimhz);
1191}
1192
1193static const struct dev_pm_ops nouveau_pm_ops = {
1194	.suspend = nouveau_pmops_suspend,
1195	.resume = nouveau_pmops_resume,
1196	.freeze = nouveau_pmops_freeze,
1197	.thaw = nouveau_pmops_thaw,
1198	.poweroff = nouveau_pmops_freeze,
1199	.restore = nouveau_pmops_resume,
1200	.runtime_suspend = nouveau_pmops_runtime_suspend,
1201	.runtime_resume = nouveau_pmops_runtime_resume,
1202	.runtime_idle = nouveau_pmops_runtime_idle,
1203};
1204
1205static struct pci_driver
1206nouveau_drm_pci_driver = {
1207	.name = "nouveau",
1208	.id_table = nouveau_drm_pci_table,
1209	.probe = nouveau_drm_probe,
1210	.remove = nouveau_drm_remove,
1211	.driver.pm = &nouveau_pm_ops,
1212};
1213
1214struct drm_device *
1215nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
1216			       struct platform_device *pdev,
1217			       struct nvkm_device **pdevice)
1218{
1219	struct drm_device *drm;
1220	int err;
1221
1222	err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
1223				    true, true, ~0ULL, pdevice);
1224	if (err)
1225		goto err_free;
1226
1227	drm = drm_dev_alloc(&driver_platform, &pdev->dev);
1228	if (IS_ERR(drm)) {
1229		err = PTR_ERR(drm);
1230		goto err_free;
1231	}
1232
1233	err = nouveau_drm_device_init(drm);
1234	if (err)
1235		goto err_put;
1236
1237	platform_set_drvdata(pdev, drm);
1238
1239	return drm;
1240
1241err_put:
1242	drm_dev_put(drm);
1243err_free:
1244	nvkm_device_del(pdevice);
1245
1246	return ERR_PTR(err);
1247}
1248
1249static int __init
1250nouveau_drm_init(void)
1251{
1252	driver_pci = driver_stub;
 
1253	driver_platform = driver_stub;
 
1254
1255	nouveau_display_options();
1256
1257	if (nouveau_modeset == -1) {
 
1258		if (vgacon_text_force())
1259			nouveau_modeset = 0;
 
1260	}
1261
1262	if (!nouveau_modeset)
1263		return 0;
1264
1265#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
1266	platform_driver_register(&nouveau_platform_driver);
1267#endif
1268
1269	nouveau_register_dsm_handler();
1270	nouveau_backlight_ctor();
1271
1272#ifdef CONFIG_PCI
1273	return pci_register_driver(&nouveau_drm_pci_driver);
1274#else
1275	return 0;
1276#endif
1277}
1278
1279static void __exit
1280nouveau_drm_exit(void)
1281{
1282	if (!nouveau_modeset)
1283		return;
1284
1285#ifdef CONFIG_PCI
1286	pci_unregister_driver(&nouveau_drm_pci_driver);
1287#endif
1288	nouveau_backlight_dtor();
1289	nouveau_unregister_dsm_handler();
1290
1291#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
1292	platform_driver_unregister(&nouveau_platform_driver);
1293#endif
1294	if (IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM))
1295		mmu_notifier_synchronize();
1296}
1297
1298module_init(nouveau_drm_init);
1299module_exit(nouveau_drm_exit);
1300
1301MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
1302MODULE_AUTHOR(DRIVER_AUTHOR);
1303MODULE_DESCRIPTION(DRIVER_DESC);
1304MODULE_LICENSE("GPL and additional rights");