Loading...
1#include <linux/vgaarb.h>
2#include <linux/vga_switcheroo.h>
3
4#include <drm/drmP.h>
5#include <drm/drm_crtc_helper.h>
6
7#include "nouveau_drm.h"
8#include "nouveau_acpi.h"
9#include "nouveau_fbcon.h"
10#include "nouveau_vga.h"
11
12static unsigned int
13nouveau_vga_set_decode(void *priv, bool state)
14{
15 struct nouveau_drm *drm = nouveau_drm(priv);
16 struct nvif_object *device = &drm->device.object;
17
18 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE &&
19 drm->device.info.chipset >= 0x4c)
20 nvif_wr32(device, 0x088060, state);
21 else
22 if (drm->device.info.chipset >= 0x40)
23 nvif_wr32(device, 0x088054, state);
24 else
25 nvif_wr32(device, 0x001854, state);
26
27 if (state)
28 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
29 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
30 else
31 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
32}
33
34static void
35nouveau_switcheroo_set_state(struct pci_dev *pdev,
36 enum vga_switcheroo_state state)
37{
38 struct drm_device *dev = pci_get_drvdata(pdev);
39
40 if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
41 return;
42
43 if (state == VGA_SWITCHEROO_ON) {
44 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
45 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
46 nouveau_pmops_resume(&pdev->dev);
47 drm_kms_helper_poll_enable(dev);
48 dev->switch_power_state = DRM_SWITCH_POWER_ON;
49 } else {
50 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
51 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
52 drm_kms_helper_poll_disable(dev);
53 nouveau_switcheroo_optimus_dsm();
54 nouveau_pmops_suspend(&pdev->dev);
55 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
56 }
57}
58
59static void
60nouveau_switcheroo_reprobe(struct pci_dev *pdev)
61{
62 struct drm_device *dev = pci_get_drvdata(pdev);
63 nouveau_fbcon_output_poll_changed(dev);
64}
65
66static bool
67nouveau_switcheroo_can_switch(struct pci_dev *pdev)
68{
69 struct drm_device *dev = pci_get_drvdata(pdev);
70
71 /*
72 * FIXME: open_count is protected by drm_global_mutex but that would lead to
73 * locking inversion with the driver load path. And the access here is
74 * completely racy anyway. So don't bother with locking for now.
75 */
76 return dev->open_count == 0;
77}
78
79static const struct vga_switcheroo_client_ops
80nouveau_switcheroo_ops = {
81 .set_gpu_state = nouveau_switcheroo_set_state,
82 .reprobe = nouveau_switcheroo_reprobe,
83 .can_switch = nouveau_switcheroo_can_switch,
84};
85
86void
87nouveau_vga_init(struct nouveau_drm *drm)
88{
89 struct drm_device *dev = drm->dev;
90 bool runtime = false;
91
92 /* only relevant for PCI devices */
93 if (!dev->pdev)
94 return;
95
96 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
97
98 if (nouveau_runtime_pm == 1)
99 runtime = true;
100 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
101 runtime = true;
102 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
103
104 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
105 vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
106}
107
108void
109nouveau_vga_fini(struct nouveau_drm *drm)
110{
111 struct drm_device *dev = drm->dev;
112 bool runtime = false;
113
114 if (nouveau_runtime_pm == 1)
115 runtime = true;
116 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
117 runtime = true;
118
119 vga_switcheroo_unregister_client(dev->pdev);
120 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
121 vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
122 vga_client_register(dev->pdev, NULL, NULL, NULL);
123}
124
125
126void
127nouveau_vga_lastclose(struct drm_device *dev)
128{
129 vga_switcheroo_process_delayed_switch();
130}
1// SPDX-License-Identifier: MIT
2#include <linux/vgaarb.h>
3#include <linux/vga_switcheroo.h>
4
5#include <drm/drm_fb_helper.h>
6
7#include "nouveau_drv.h"
8#include "nouveau_acpi.h"
9#include "nouveau_vga.h"
10
11static unsigned int
12nouveau_vga_set_decode(struct pci_dev *pdev, bool state)
13{
14 struct nouveau_drm *drm = nouveau_drm(pci_get_drvdata(pdev));
15 struct nvif_object *device = &drm->client.device.object;
16
17 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
18 drm->client.device.info.chipset >= 0x4c)
19 nvif_wr32(device, 0x088060, state);
20 else
21 if (drm->client.device.info.chipset >= 0x40)
22 nvif_wr32(device, 0x088054, state);
23 else
24 nvif_wr32(device, 0x001854, state);
25
26 if (state)
27 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
28 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
29 else
30 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
31}
32
33static void
34nouveau_switcheroo_set_state(struct pci_dev *pdev,
35 enum vga_switcheroo_state state)
36{
37 struct drm_device *dev = pci_get_drvdata(pdev);
38
39 if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
40 return;
41
42 if (state == VGA_SWITCHEROO_ON) {
43 pr_err("VGA switcheroo: switched nouveau on\n");
44 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
45 nouveau_pmops_resume(&pdev->dev);
46 dev->switch_power_state = DRM_SWITCH_POWER_ON;
47 } else {
48 pr_err("VGA switcheroo: switched nouveau off\n");
49 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
50 nouveau_switcheroo_optimus_dsm();
51 nouveau_pmops_suspend(&pdev->dev);
52 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
53 }
54}
55
56static void
57nouveau_switcheroo_reprobe(struct pci_dev *pdev)
58{
59 struct drm_device *dev = pci_get_drvdata(pdev);
60 drm_fb_helper_output_poll_changed(dev);
61}
62
63static bool
64nouveau_switcheroo_can_switch(struct pci_dev *pdev)
65{
66 struct drm_device *dev = pci_get_drvdata(pdev);
67
68 /*
69 * FIXME: open_count is protected by drm_global_mutex but that would lead to
70 * locking inversion with the driver load path. And the access here is
71 * completely racy anyway. So don't bother with locking for now.
72 */
73 return atomic_read(&dev->open_count) == 0;
74}
75
76static const struct vga_switcheroo_client_ops
77nouveau_switcheroo_ops = {
78 .set_gpu_state = nouveau_switcheroo_set_state,
79 .reprobe = nouveau_switcheroo_reprobe,
80 .can_switch = nouveau_switcheroo_can_switch,
81};
82
83void
84nouveau_vga_init(struct nouveau_drm *drm)
85{
86 struct drm_device *dev = drm->dev;
87 bool runtime = nouveau_pmops_runtime();
88 struct pci_dev *pdev;
89
90 /* only relevant for PCI devices */
91 if (!dev_is_pci(dev->dev))
92 return;
93 pdev = to_pci_dev(dev->dev);
94
95 vga_client_register(pdev, nouveau_vga_set_decode);
96
97 /* don't register Thunderbolt eGPU with vga_switcheroo */
98 if (pci_is_thunderbolt_attached(pdev))
99 return;
100
101 vga_switcheroo_register_client(pdev, &nouveau_switcheroo_ops, runtime);
102
103 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
104 vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
105}
106
107void
108nouveau_vga_fini(struct nouveau_drm *drm)
109{
110 struct drm_device *dev = drm->dev;
111 bool runtime = nouveau_pmops_runtime();
112 struct pci_dev *pdev;
113
114 /* only relevant for PCI devices */
115 if (!dev_is_pci(dev->dev))
116 return;
117 pdev = to_pci_dev(dev->dev);
118
119 vga_client_unregister(pdev);
120
121 if (pci_is_thunderbolt_attached(pdev))
122 return;
123
124 vga_switcheroo_unregister_client(pdev);
125 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
126 vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
127}
128
129
130void
131nouveau_vga_lastclose(struct drm_device *dev)
132{
133 vga_switcheroo_process_delayed_switch();
134}