Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * File:	pci-acpi.c
  3 * Purpose:	Provide PCI support in ACPI
  4 *
  5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
  6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
  7 * Copyright (C) 2004 Intel Corp.
  8 */
  9
 10#include <linux/delay.h>
 11#include <linux/init.h>
 
 12#include <linux/pci.h>
 
 
 13#include <linux/module.h>
 14#include <linux/pci-aspm.h>
 15#include <acpi/acpi.h>
 16#include <acpi/acpi_bus.h>
 17
 18#include <linux/pci-acpi.h>
 19#include <linux/pm_runtime.h>
 
 20#include "pci.h"
 21
 22static DEFINE_MUTEX(pci_acpi_pm_notify_mtx);
 23
 24/**
 25 * pci_acpi_wake_bus - Wake-up notification handler for root buses.
 26 * @handle: ACPI handle of a device the notification is for.
 27 * @event: Type of the signaled event.
 28 * @context: PCI root bus to wake up devices on.
 29 */
 30static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31{
 32	struct pci_bus *pci_bus = context;
 
 
 
 
 
 
 33
 34	if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus)
 35		pci_pme_wakeup_bus(pci_bus);
 36}
 37
 38/**
 39 * pci_acpi_wake_dev - Wake-up notification handler for PCI devices.
 40 * @handle: ACPI handle of a device the notification is for.
 41 * @event: Type of the signaled event.
 42 * @context: PCI device object to wake up.
 43 */
 44static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
 45{
 46	struct pci_dev *pci_dev = context;
 
 
 
 47
 48	if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
 49		return;
 
 
 
 
 50
 51	if (!pci_dev->pm_cap || !pci_dev->pme_support
 52	     || pci_check_pme_status(pci_dev)) {
 53		if (pci_dev->pme_poll)
 54			pci_dev->pme_poll = false;
 55
 56		pci_wakeup_event(pci_dev);
 57		pm_runtime_resume(&pci_dev->dev);
 
 
 
 58	}
 59
 60	if (pci_dev->subordinate)
 61		pci_pme_wakeup_bus(pci_dev->subordinate);
 62}
 
 63
 64/**
 65 * add_pm_notifier - Register PM notifier for given ACPI device.
 66 * @dev: ACPI device to add the notifier for.
 67 * @context: PCI device or bus to check for PME status if an event is signaled.
 68 *
 69 * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of
 70 * PM wake-up events.  For example, wake-up events may be generated for bridges
 71 * if one of the devices below the bridge is signaling PME, even if the bridge
 72 * itself doesn't have a wake-up GPE associated with it.
 73 */
 74static acpi_status add_pm_notifier(struct acpi_device *dev,
 75				   acpi_notify_handler handler,
 76				   void *context)
 77{
 78	acpi_status status = AE_ALREADY_EXISTS;
 
 
 
 
 
 
 
 79
 80	mutex_lock(&pci_acpi_pm_notify_mtx);
 
 81
 82	if (dev->wakeup.flags.notifier_present)
 83		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84
 85	status = acpi_install_notify_handler(dev->handle,
 86					     ACPI_SYSTEM_NOTIFY,
 87					     handler, context);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88	if (ACPI_FAILURE(status))
 89		goto out;
 90
 91	dev->wakeup.flags.notifier_present = true;
 
 
 
 
 92
 93 out:
 94	mutex_unlock(&pci_acpi_pm_notify_mtx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95	return status;
 96}
 97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98/**
 99 * remove_pm_notifier - Unregister PM notifier from given ACPI device.
100 * @dev: ACPI device to remove the notifier from.
 
 
 
 
101 */
102static acpi_status remove_pm_notifier(struct acpi_device *dev,
103				      acpi_notify_handler handler)
104{
105	acpi_status status = AE_BAD_PARAMETER;
 
106
107	mutex_lock(&pci_acpi_pm_notify_mtx);
 
 
108
109	if (!dev->wakeup.flags.notifier_present)
110		goto out;
 
111
112	status = acpi_remove_notify_handler(dev->handle,
113					    ACPI_SYSTEM_NOTIFY,
114					    handler);
115	if (ACPI_FAILURE(status))
116		goto out;
117
118	dev->wakeup.flags.notifier_present = false;
 
 
 
 
 
 
 
119
120 out:
121	mutex_unlock(&pci_acpi_pm_notify_mtx);
122	return status;
123}
124
125/**
126 * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus.
127 * @dev: ACPI device to add the notifier for.
128 * @pci_bus: PCI bus to walk checking for PME status if an event is signaled.
129 */
130acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
131					 struct pci_bus *pci_bus)
132{
133	return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134}
135
136/**
137 * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier.
138 * @dev: ACPI device to remove the notifier from.
139 */
140acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
141{
142	return remove_pm_notifier(dev, pci_acpi_wake_bus);
143}
144
145/**
146 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
147 * @dev: ACPI device to add the notifier for.
148 * @pci_dev: PCI device to check for the PME status if an event is signaled.
149 */
150acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
151				     struct pci_dev *pci_dev)
152{
153	return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
154}
155
156/**
157 * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier.
158 * @dev: ACPI device to remove the notifier from.
159 */
160acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
161{
162	return remove_pm_notifier(dev, pci_acpi_wake_dev);
163}
164
165/*
166 * _SxD returns the D-state with the highest power
167 * (lowest D-state number) supported in the S-state "x".
168 *
169 * If the devices does not have a _PRW
170 * (Power Resources for Wake) supporting system wakeup from "x"
171 * then the OS is free to choose a lower power (higher number
172 * D-state) than the return value from _SxD.
173 *
174 * But if _PRW is enabled at S-state "x", the OS
175 * must not choose a power lower than _SxD --
176 * unless the device has an _SxW method specifying
177 * the lowest power (highest D-state number) the device
178 * may enter while still able to wake the system.
179 *
180 * ie. depending on global OS policy:
181 *
182 * if (_PRW at S-state x)
183 *	choose from highest power _SxD to lowest power _SxW
184 * else // no _PRW at S-state x
185 * 	choose highest power _SxD or any lower power
186 */
187
188static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
189{
190	int acpi_state;
191
192	acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL);
 
 
 
 
193	if (acpi_state < 0)
194		return PCI_POWER_ERROR;
195
196	switch (acpi_state) {
197	case ACPI_STATE_D0:
198		return PCI_D0;
199	case ACPI_STATE_D1:
200		return PCI_D1;
201	case ACPI_STATE_D2:
202		return PCI_D2;
203	case ACPI_STATE_D3_HOT:
204		return PCI_D3hot;
205	case ACPI_STATE_D3_COLD:
206		return PCI_D3cold;
207	}
208	return PCI_POWER_ERROR;
209}
210
211static bool acpi_pci_power_manageable(struct pci_dev *dev)
212{
213	acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
214
215	return handle ? acpi_bus_power_manageable(handle) : false;
216}
217
218static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
219{
220	acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
221	acpi_handle tmp;
222	static const u8 state_conv[] = {
223		[PCI_D0] = ACPI_STATE_D0,
224		[PCI_D1] = ACPI_STATE_D1,
225		[PCI_D2] = ACPI_STATE_D2,
226		[PCI_D3hot] = ACPI_STATE_D3,
227		[PCI_D3cold] = ACPI_STATE_D3
228	};
229	int error = -EINVAL;
230
231	/* If the ACPI device has _EJ0, ignore the device */
232	if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
233		return -ENODEV;
234
235	switch (state) {
 
 
 
 
 
 
236	case PCI_D0:
237	case PCI_D1:
238	case PCI_D2:
239	case PCI_D3hot:
240	case PCI_D3cold:
241		error = acpi_bus_set_power(handle, state_conv[state]);
242	}
243
244	if (!error)
245		dev_printk(KERN_INFO, &dev->dev,
246				"power state changed by ACPI to D%d\n", state);
247
248	return error;
249}
250
251static bool acpi_pci_can_wakeup(struct pci_dev *dev)
252{
253	acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
 
 
 
 
 
 
 
 
254
255	return handle ? acpi_bus_can_wakeup(handle) : false;
 
 
 
 
 
 
256}
257
258static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
259{
260	while (bus->parent) {
261		if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
262			return;
 
263		bus = bus->parent;
264	}
265
266	/* We have reached the root bus. */
267	if (bus->bridge)
268		acpi_pm_device_sleep_wake(bus->bridge, enable);
 
 
 
269}
270
271static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
272{
273	if (acpi_pci_can_wakeup(dev))
274		return acpi_pm_device_sleep_wake(&dev->dev, enable);
275
276	acpi_pci_propagate_wakeup_enable(dev->bus, enable);
277	return 0;
278}
279
280static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
281{
282	while (bus->parent) {
283		struct pci_dev *bridge = bus->self;
284
285		if (bridge->pme_interrupt)
286			return;
287		if (!acpi_pm_device_run_wake(&bridge->dev, enable))
288			return;
289		bus = bus->parent;
290	}
291
292	/* We have reached the root bus. */
293	if (bus->bridge)
294		acpi_pm_device_run_wake(bus->bridge, enable);
295}
296
297static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
298{
299	if (dev->pme_interrupt)
300		return 0;
301
302	if (!acpi_pm_device_run_wake(&dev->dev, enable))
303		return 0;
304
305	acpi_pci_propagate_run_wake(dev->bus, enable);
306	return 0;
307}
308
309static struct pci_platform_pm_ops acpi_pci_platform_pm = {
310	.is_manageable = acpi_pci_power_manageable,
311	.set_state = acpi_pci_set_power_state,
 
312	.choose_state = acpi_pci_choose_state,
313	.can_wakeup = acpi_pci_can_wakeup,
314	.sleep_wake = acpi_pci_sleep_wake,
315	.run_wake = acpi_pci_run_wake,
316};
317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318/* ACPI bus type */
319static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
320{
321	struct pci_dev * pci_dev;
322	u64	addr;
 
323
324	pci_dev = to_pci_dev(dev);
325	/* Please ref to ACPI spec for the syntax of _ADR */
326	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
327	*handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
328	if (!*handle)
329		return -ENODEV;
330	return 0;
331}
332
333static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334{
335	int num;
336	unsigned int seg, bus;
 
 
 
 
 
 
 
 
 
337
338	/*
339	 * The string should be the same as root bridge's name
340	 * Please look at 'pci_scan_bus_parented'
341	 */
342	num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus);
343	if (num != 2)
344		return -ENODEV;
345	*handle = acpi_get_pci_rootbridge_handle(seg, bus);
346	if (!*handle)
347		return -ENODEV;
348	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349}
350
351static struct acpi_bus_type acpi_pci_bus = {
352	.bus = &pci_bus_type,
353	.find_device = acpi_pci_find_device,
354	.find_bridge = acpi_pci_find_root_bridge,
 
 
355};
356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357static int __init acpi_pci_init(void)
358{
359	int ret;
360
361	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
362		printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n");
363		pci_no_msi();
364	}
365
366	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
367		printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
368		pcie_no_aspm();
369	}
370
371	ret = register_acpi_bus_type(&acpi_pci_bus);
372	if (ret)
373		return 0;
 
374	pci_set_platform_pm(&acpi_pci_platform_pm);
 
 
 
375	return 0;
376}
377arch_initcall(acpi_pci_init);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * PCI support in ACPI
 
  4 *
  5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
  6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
  7 * Copyright (C) 2004 Intel Corp.
  8 */
  9
 10#include <linux/delay.h>
 11#include <linux/init.h>
 12#include <linux/irqdomain.h>
 13#include <linux/pci.h>
 14#include <linux/msi.h>
 15#include <linux/pci_hotplug.h>
 16#include <linux/module.h>
 17#include <linux/pci-aspm.h>
 
 
 
 18#include <linux/pci-acpi.h>
 19#include <linux/pm_runtime.h>
 20#include <linux/pm_qos.h>
 21#include "pci.h"
 22
 23/*
 24 * The GUID is defined in the PCI Firmware Specification available here:
 25 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
 
 
 
 
 26 */
 27const guid_t pci_acpi_dsm_guid =
 28	GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
 29		  0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
 30
 31#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
 32static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
 33{
 34	struct device *dev = &adev->dev;
 35	struct resource_entry *entry;
 36	struct list_head list;
 37	unsigned long flags;
 38	int ret;
 39
 40	INIT_LIST_HEAD(&list);
 41	flags = IORESOURCE_MEM;
 42	ret = acpi_dev_get_resources(adev, &list,
 43				     acpi_dev_filter_resource_type_cb,
 44				     (void *) flags);
 45	if (ret < 0) {
 46		dev_err(dev, "failed to parse _CRS method, error code %d\n",
 47			ret);
 48		return ret;
 49	}
 50
 51	if (ret == 0) {
 52		dev_err(dev, "no IO and memory resources present in _CRS\n");
 53		return -EINVAL;
 54	}
 55
 56	entry = list_first_entry(&list, struct resource_entry, node);
 57	*res = *entry->res;
 58	acpi_dev_free_resource_list(&list);
 59	return 0;
 60}
 61
 62static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
 63				 void **retval)
 64{
 65	u16 *segment = context;
 66	unsigned long long uid;
 67	acpi_status status;
 68
 69	status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
 70	if (ACPI_FAILURE(status) || uid != *segment)
 71		return AE_CTRL_DEPTH;
 72
 73	*(acpi_handle *)retval = handle;
 74	return AE_CTRL_TERMINATE;
 75}
 76
 77int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
 78			  struct resource *res)
 
 
 
 
 
 79{
 80	struct acpi_device *adev;
 81	acpi_status status;
 82	acpi_handle handle;
 83	int ret;
 84
 85	status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
 86	if (ACPI_FAILURE(status)) {
 87		dev_err(dev, "can't find _HID %s device to locate resources\n",
 88			hid);
 89		return -ENODEV;
 90	}
 91
 92	ret = acpi_bus_get_device(handle, &adev);
 93	if (ret)
 94		return ret;
 
 95
 96	ret = acpi_get_rc_addr(adev, res);
 97	if (ret) {
 98		dev_err(dev, "can't get resource from %s\n",
 99			dev_name(&adev->dev));
100		return ret;
101	}
102
103	return 0;
 
104}
105#endif
106
107phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
 
 
 
 
 
 
 
 
 
 
 
 
108{
109	acpi_status status = AE_NOT_EXIST;
110	unsigned long long mcfg_addr;
111
112	if (handle)
113		status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
114					       NULL, &mcfg_addr);
115	if (ACPI_FAILURE(status))
116		return 0;
117
118	return (phys_addr_t)mcfg_addr;
119}
120
121static acpi_status decode_type0_hpx_record(union acpi_object *record,
122					   struct hotplug_params *hpx)
123{
124	int i;
125	union acpi_object *fields = record->package.elements;
126	u32 revision = fields[1].integer.value;
127
128	switch (revision) {
129	case 1:
130		if (record->package.count != 6)
131			return AE_ERROR;
132		for (i = 2; i < 6; i++)
133			if (fields[i].type != ACPI_TYPE_INTEGER)
134				return AE_ERROR;
135		hpx->t0 = &hpx->type0_data;
136		hpx->t0->revision        = revision;
137		hpx->t0->cache_line_size = fields[2].integer.value;
138		hpx->t0->latency_timer   = fields[3].integer.value;
139		hpx->t0->enable_serr     = fields[4].integer.value;
140		hpx->t0->enable_perr     = fields[5].integer.value;
141		break;
142	default:
143		printk(KERN_WARNING
144		       "%s: Type 0 Revision %d record not supported\n",
145		       __func__, revision);
146		return AE_ERROR;
147	}
148	return AE_OK;
149}
150
151static acpi_status decode_type1_hpx_record(union acpi_object *record,
152					   struct hotplug_params *hpx)
153{
154	int i;
155	union acpi_object *fields = record->package.elements;
156	u32 revision = fields[1].integer.value;
157
158	switch (revision) {
159	case 1:
160		if (record->package.count != 5)
161			return AE_ERROR;
162		for (i = 2; i < 5; i++)
163			if (fields[i].type != ACPI_TYPE_INTEGER)
164				return AE_ERROR;
165		hpx->t1 = &hpx->type1_data;
166		hpx->t1->revision      = revision;
167		hpx->t1->max_mem_read  = fields[2].integer.value;
168		hpx->t1->avg_max_split = fields[3].integer.value;
169		hpx->t1->tot_max_split = fields[4].integer.value;
170		break;
171	default:
172		printk(KERN_WARNING
173		       "%s: Type 1 Revision %d record not supported\n",
174		       __func__, revision);
175		return AE_ERROR;
176	}
177	return AE_OK;
178}
179
180static acpi_status decode_type2_hpx_record(union acpi_object *record,
181					   struct hotplug_params *hpx)
182{
183	int i;
184	union acpi_object *fields = record->package.elements;
185	u32 revision = fields[1].integer.value;
186
187	switch (revision) {
188	case 1:
189		if (record->package.count != 18)
190			return AE_ERROR;
191		for (i = 2; i < 18; i++)
192			if (fields[i].type != ACPI_TYPE_INTEGER)
193				return AE_ERROR;
194		hpx->t2 = &hpx->type2_data;
195		hpx->t2->revision      = revision;
196		hpx->t2->unc_err_mask_and      = fields[2].integer.value;
197		hpx->t2->unc_err_mask_or       = fields[3].integer.value;
198		hpx->t2->unc_err_sever_and     = fields[4].integer.value;
199		hpx->t2->unc_err_sever_or      = fields[5].integer.value;
200		hpx->t2->cor_err_mask_and      = fields[6].integer.value;
201		hpx->t2->cor_err_mask_or       = fields[7].integer.value;
202		hpx->t2->adv_err_cap_and       = fields[8].integer.value;
203		hpx->t2->adv_err_cap_or        = fields[9].integer.value;
204		hpx->t2->pci_exp_devctl_and    = fields[10].integer.value;
205		hpx->t2->pci_exp_devctl_or     = fields[11].integer.value;
206		hpx->t2->pci_exp_lnkctl_and    = fields[12].integer.value;
207		hpx->t2->pci_exp_lnkctl_or     = fields[13].integer.value;
208		hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
209		hpx->t2->sec_unc_err_sever_or  = fields[15].integer.value;
210		hpx->t2->sec_unc_err_mask_and  = fields[16].integer.value;
211		hpx->t2->sec_unc_err_mask_or   = fields[17].integer.value;
212		break;
213	default:
214		printk(KERN_WARNING
215		       "%s: Type 2 Revision %d record not supported\n",
216		       __func__, revision);
217		return AE_ERROR;
218	}
219	return AE_OK;
220}
221
222static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
223{
224	acpi_status status;
225	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
226	union acpi_object *package, *record, *fields;
227	u32 type;
228	int i;
229
230	/* Clear the return buffer with zeros */
231	memset(hpx, 0, sizeof(struct hotplug_params));
232
233	status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
234	if (ACPI_FAILURE(status))
235		return status;
236
237	package = (union acpi_object *)buffer.pointer;
238	if (package->type != ACPI_TYPE_PACKAGE) {
239		status = AE_ERROR;
240		goto exit;
241	}
242
243	for (i = 0; i < package->package.count; i++) {
244		record = &package->package.elements[i];
245		if (record->type != ACPI_TYPE_PACKAGE) {
246			status = AE_ERROR;
247			goto exit;
248		}
249
250		fields = record->package.elements;
251		if (fields[0].type != ACPI_TYPE_INTEGER ||
252		    fields[1].type != ACPI_TYPE_INTEGER) {
253			status = AE_ERROR;
254			goto exit;
255		}
256
257		type = fields[0].integer.value;
258		switch (type) {
259		case 0:
260			status = decode_type0_hpx_record(record, hpx);
261			if (ACPI_FAILURE(status))
262				goto exit;
263			break;
264		case 1:
265			status = decode_type1_hpx_record(record, hpx);
266			if (ACPI_FAILURE(status))
267				goto exit;
268			break;
269		case 2:
270			status = decode_type2_hpx_record(record, hpx);
271			if (ACPI_FAILURE(status))
272				goto exit;
273			break;
274		default:
275			printk(KERN_ERR "%s: Type %d record not supported\n",
276			       __func__, type);
277			status = AE_ERROR;
278			goto exit;
279		}
280	}
281 exit:
282	kfree(buffer.pointer);
283	return status;
284}
285
286static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
287{
288	acpi_status status;
289	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
290	union acpi_object *package, *fields;
291	int i;
292
293	memset(hpp, 0, sizeof(struct hotplug_params));
294
295	status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
296	if (ACPI_FAILURE(status))
297		return status;
298
299	package = (union acpi_object *) buffer.pointer;
300	if (package->type != ACPI_TYPE_PACKAGE ||
301	    package->package.count != 4) {
302		status = AE_ERROR;
303		goto exit;
304	}
305
306	fields = package->package.elements;
307	for (i = 0; i < 4; i++) {
308		if (fields[i].type != ACPI_TYPE_INTEGER) {
309			status = AE_ERROR;
310			goto exit;
311		}
312	}
313
314	hpp->t0 = &hpp->type0_data;
315	hpp->t0->revision        = 1;
316	hpp->t0->cache_line_size = fields[0].integer.value;
317	hpp->t0->latency_timer   = fields[1].integer.value;
318	hpp->t0->enable_serr     = fields[2].integer.value;
319	hpp->t0->enable_perr     = fields[3].integer.value;
320
321exit:
322	kfree(buffer.pointer);
323	return status;
324}
325
326/* pci_get_hp_params
327 *
328 * @dev - the pci_dev for which we want parameters
329 * @hpp - allocated by the caller
330 */
331int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
332{
333	acpi_status status;
334	acpi_handle handle, phandle;
335	struct pci_bus *pbus;
336
337	if (acpi_pci_disabled)
338		return -ENODEV;
339
340	handle = NULL;
341	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
342		handle = acpi_pci_get_bridge_handle(pbus);
343		if (handle)
344			break;
345	}
346
347	/*
348	 * _HPP settings apply to all child buses, until another _HPP is
349	 * encountered. If we don't find an _HPP for the input pci dev,
350	 * look for it in the parent device scope since that would apply to
351	 * this pci dev.
352	 */
353	while (handle) {
354		status = acpi_run_hpx(handle, hpp);
355		if (ACPI_SUCCESS(status))
356			return 0;
357		status = acpi_run_hpp(handle, hpp);
358		if (ACPI_SUCCESS(status))
359			return 0;
360		if (acpi_is_root_bridge(handle))
361			break;
362		status = acpi_get_parent(handle, &phandle);
363		if (ACPI_FAILURE(status))
364			break;
365		handle = phandle;
366	}
367	return -ENODEV;
368}
369EXPORT_SYMBOL_GPL(pci_get_hp_params);
370
371/**
372 * pciehp_is_native - Check whether a hotplug port is handled by the OS
373 * @pdev: Hotplug port to check
374 *
375 * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field
376 * and return the value of the "PCI Express Native Hot Plug control" bit.
377 * On failure to obtain the _OSC Control Field return %false.
378 */
379bool pciehp_is_native(struct pci_dev *pdev)
 
380{
381	struct acpi_pci_root *root;
382	acpi_handle handle;
383
384	handle = acpi_find_root_bridge_handle(pdev);
385	if (!handle)
386		return false;
387
388	root = acpi_pci_find_root(handle);
389	if (!root)
390		return false;
391
392	return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
393}
 
 
 
394
395/**
396 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
397 * @context: Device wakeup context.
398 */
399static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
400{
401	struct acpi_device *adev;
402	struct acpi_pci_root *root;
403
404	adev = container_of(context, struct acpi_device, wakeup.context);
405	root = acpi_driver_data(adev);
406	pci_pme_wakeup_bus(root->bus);
407}
408
409/**
410 * pci_acpi_wake_dev - PCI device wakeup notification work function.
411 * @context: Device wakeup context.
 
412 */
413static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
 
414{
415	struct pci_dev *pci_dev;
416
417	pci_dev = to_pci_dev(context->dev);
418
419	if (pci_dev->pme_poll)
420		pci_dev->pme_poll = false;
421
422	if (pci_dev->current_state == PCI_D3cold) {
423		pci_wakeup_event(pci_dev);
424		pm_request_resume(&pci_dev->dev);
425		return;
426	}
427
428	/* Clear PME Status if set. */
429	if (pci_dev->pme_support)
430		pci_check_pme_status(pci_dev);
431
432	pci_wakeup_event(pci_dev);
433	pm_request_resume(&pci_dev->dev);
434
435	pci_pme_wakeup_bus(pci_dev->subordinate);
436}
437
438/**
439 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
440 * @dev: PCI root bridge ACPI device.
441 */
442acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
443{
444	return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
445}
446
447/**
448 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
449 * @dev: ACPI device to add the notifier for.
450 * @pci_dev: PCI device to check for the PME status if an event is signaled.
451 */
452acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
453				     struct pci_dev *pci_dev)
454{
455	return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
 
 
 
 
 
 
 
 
 
456}
457
458/*
459 * _SxD returns the D-state with the highest power
460 * (lowest D-state number) supported in the S-state "x".
461 *
462 * If the devices does not have a _PRW
463 * (Power Resources for Wake) supporting system wakeup from "x"
464 * then the OS is free to choose a lower power (higher number
465 * D-state) than the return value from _SxD.
466 *
467 * But if _PRW is enabled at S-state "x", the OS
468 * must not choose a power lower than _SxD --
469 * unless the device has an _SxW method specifying
470 * the lowest power (highest D-state number) the device
471 * may enter while still able to wake the system.
472 *
473 * ie. depending on global OS policy:
474 *
475 * if (_PRW at S-state x)
476 *	choose from highest power _SxD to lowest power _SxW
477 * else // no _PRW at S-state x
478 *	choose highest power _SxD or any lower power
479 */
480
481static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
482{
483	int acpi_state, d_max;
484
485	if (pdev->no_d3cold)
486		d_max = ACPI_STATE_D3_HOT;
487	else
488		d_max = ACPI_STATE_D3_COLD;
489	acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
490	if (acpi_state < 0)
491		return PCI_POWER_ERROR;
492
493	switch (acpi_state) {
494	case ACPI_STATE_D0:
495		return PCI_D0;
496	case ACPI_STATE_D1:
497		return PCI_D1;
498	case ACPI_STATE_D2:
499		return PCI_D2;
500	case ACPI_STATE_D3_HOT:
501		return PCI_D3hot;
502	case ACPI_STATE_D3_COLD:
503		return PCI_D3cold;
504	}
505	return PCI_POWER_ERROR;
506}
507
508static bool acpi_pci_power_manageable(struct pci_dev *dev)
509{
510	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
511	return adev ? acpi_device_power_manageable(adev) : false;
 
512}
513
514static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
515{
516	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 
517	static const u8 state_conv[] = {
518		[PCI_D0] = ACPI_STATE_D0,
519		[PCI_D1] = ACPI_STATE_D1,
520		[PCI_D2] = ACPI_STATE_D2,
521		[PCI_D3hot] = ACPI_STATE_D3_HOT,
522		[PCI_D3cold] = ACPI_STATE_D3_COLD,
523	};
524	int error = -EINVAL;
525
526	/* If the ACPI device has _EJ0, ignore the device */
527	if (!adev || acpi_has_method(adev->handle, "_EJ0"))
528		return -ENODEV;
529
530	switch (state) {
531	case PCI_D3cold:
532		if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
533				PM_QOS_FLAGS_ALL) {
534			error = -EBUSY;
535			break;
536		}
537	case PCI_D0:
538	case PCI_D1:
539	case PCI_D2:
540	case PCI_D3hot:
541		error = acpi_device_set_power(adev, state_conv[state]);
 
542	}
543
544	if (!error)
545		pci_dbg(dev, "power state changed by ACPI to %s\n",
546			 acpi_power_state_string(state_conv[state]));
547
548	return error;
549}
550
551static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
552{
553	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
554	static const pci_power_t state_conv[] = {
555		[ACPI_STATE_D0]      = PCI_D0,
556		[ACPI_STATE_D1]      = PCI_D1,
557		[ACPI_STATE_D2]      = PCI_D2,
558		[ACPI_STATE_D3_HOT]  = PCI_D3hot,
559		[ACPI_STATE_D3_COLD] = PCI_D3cold,
560	};
561	int state;
562
563	if (!adev || !acpi_device_power_manageable(adev))
564		return PCI_UNKNOWN;
565
566	if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN)
567		return PCI_UNKNOWN;
568
569	return state_conv[state];
570}
571
572static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
573{
574	while (bus->parent) {
575		if (acpi_pm_device_can_wakeup(&bus->self->dev))
576			return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable);
577
578		bus = bus->parent;
579	}
580
581	/* We have reached the root bus. */
582	if (bus->bridge) {
583		if (acpi_pm_device_can_wakeup(bus->bridge))
584			return acpi_pm_set_bridge_wakeup(bus->bridge, enable);
585	}
586	return 0;
587}
588
589static int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
590{
591	if (acpi_pm_device_can_wakeup(&dev->dev))
592		return acpi_pm_set_device_wakeup(&dev->dev, enable);
593
594	return acpi_pci_propagate_wakeup(dev->bus, enable);
 
595}
596
597static bool acpi_pci_need_resume(struct pci_dev *dev)
598{
599	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 
 
 
 
 
 
 
 
600
601	if (!adev || !acpi_device_power_manageable(adev))
602		return false;
 
 
603
604	if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
605		return true;
 
 
606
607	if (acpi_target_system_state() == ACPI_STATE_S0)
608		return false;
609
610	return !!adev->power.flags.dsw_present;
 
611}
612
613static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
614	.is_manageable = acpi_pci_power_manageable,
615	.set_state = acpi_pci_set_power_state,
616	.get_state = acpi_pci_get_power_state,
617	.choose_state = acpi_pci_choose_state,
618	.set_wakeup = acpi_pci_wakeup,
619	.need_resume = acpi_pci_need_resume,
 
620};
621
622void acpi_pci_add_bus(struct pci_bus *bus)
623{
624	union acpi_object *obj;
625	struct pci_host_bridge *bridge;
626
627	if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
628		return;
629
630	acpi_pci_slot_enumerate(bus);
631	acpiphp_enumerate_slots(bus);
632
633	/*
634	 * For a host bridge, check its _DSM for function 8 and if
635	 * that is available, mark it in pci_host_bridge.
636	 */
637	if (!pci_is_root_bus(bus))
638		return;
639
640	obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
641				RESET_DELAY_DSM, NULL);
642	if (!obj)
643		return;
644
645	if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
646		bridge = pci_find_host_bridge(bus);
647		bridge->ignore_reset_delay = 1;
648	}
649	ACPI_FREE(obj);
650}
651
652void acpi_pci_remove_bus(struct pci_bus *bus)
653{
654	if (acpi_pci_disabled || !bus->bridge)
655		return;
656
657	acpiphp_remove_slots(bus);
658	acpi_pci_slot_remove(bus);
659}
660
661/* ACPI bus type */
662static struct acpi_device *acpi_pci_find_companion(struct device *dev)
663{
664	struct pci_dev *pci_dev = to_pci_dev(dev);
665	bool check_children;
666	u64 addr;
667
668	check_children = pci_is_bridge(pci_dev);
669	/* Please ref to ACPI spec for the syntax of _ADR */
670	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
671	return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
672				      check_children);
 
 
673}
674
675/**
676 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
677 * @pdev: the PCI device whose delay is to be updated
678 * @handle: ACPI handle of this device
679 *
680 * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
681 * control method of either the device itself or the PCI host bridge.
682 *
683 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
684 * host bridge.  If it returns one, the OS may assume that all devices in
685 * the hierarchy have already completed power-on reset delays.
686 *
687 * Function 9, "Device Readiness Durations," applies only to the object
688 * where it is located.  It returns delay durations required after various
689 * events if the device requires less time than the spec requires.  Delays
690 * from this function take precedence over the Reset Delay function.
691 *
692 * These _DSM functions are defined by the draft ECN of January 28, 2014,
693 * titled "ACPI additions for FW latency optimizations."
694 */
695static void pci_acpi_optimize_delay(struct pci_dev *pdev,
696				    acpi_handle handle)
697{
698	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
699	int value;
700	union acpi_object *obj, *elements;
701
702	if (bridge->ignore_reset_delay)
703		pdev->d3cold_delay = 0;
704
705	obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
706				FUNCTION_DELAY_DSM, NULL);
707	if (!obj)
708		return;
709
710	if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
711		elements = obj->package.elements;
712		if (elements[0].type == ACPI_TYPE_INTEGER) {
713			value = (int)elements[0].integer.value / 1000;
714			if (value < PCI_PM_D3COLD_WAIT)
715				pdev->d3cold_delay = value;
716		}
717		if (elements[3].type == ACPI_TYPE_INTEGER) {
718			value = (int)elements[3].integer.value / 1000;
719			if (value < PCI_PM_D3_WAIT)
720				pdev->d3_delay = value;
721		}
722	}
723	ACPI_FREE(obj);
724}
725
726static void pci_acpi_setup(struct device *dev)
727{
728	struct pci_dev *pci_dev = to_pci_dev(dev);
729	struct acpi_device *adev = ACPI_COMPANION(dev);
730
731	if (!adev)
732		return;
733
734	pci_acpi_optimize_delay(pci_dev, adev->handle);
735
736	pci_acpi_add_pm_notifier(adev, pci_dev);
737	if (!adev->wakeup.flags.valid)
738		return;
739
740	device_set_wakeup_capable(dev, true);
741	acpi_pci_wakeup(pci_dev, false);
742}
743
744static void pci_acpi_cleanup(struct device *dev)
745{
746	struct acpi_device *adev = ACPI_COMPANION(dev);
747
748	if (!adev)
749		return;
750
751	pci_acpi_remove_pm_notifier(adev);
752	if (adev->wakeup.flags.valid)
753		device_set_wakeup_capable(dev, false);
754}
755
756static bool pci_acpi_bus_match(struct device *dev)
757{
758	return dev_is_pci(dev);
759}
760
761static struct acpi_bus_type acpi_pci_bus = {
762	.name = "PCI",
763	.match = pci_acpi_bus_match,
764	.find_companion = acpi_pci_find_companion,
765	.setup = pci_acpi_setup,
766	.cleanup = pci_acpi_cleanup,
767};
768
769
770static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
771
772/**
773 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
774 * @fn:       Callback matching a device to a fwnode that identifies a PCI
775 *            MSI domain.
776 *
777 * This should be called by irqchip driver, which is the parent of
778 * the MSI domain to provide callback interface to query fwnode.
779 */
780void
781pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
782{
783	pci_msi_get_fwnode_cb = fn;
784}
785
786/**
787 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
788 * @bus:      The PCI host bridge bus.
789 *
790 * This function uses the callback function registered by
791 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
792 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
793 * This returns NULL on error or when the domain is not found.
794 */
795struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
796{
797	struct fwnode_handle *fwnode;
798
799	if (!pci_msi_get_fwnode_cb)
800		return NULL;
801
802	fwnode = pci_msi_get_fwnode_cb(&bus->dev);
803	if (!fwnode)
804		return NULL;
805
806	return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
807}
808
809static int __init acpi_pci_init(void)
810{
811	int ret;
812
813	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
814		pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
815		pci_no_msi();
816	}
817
818	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
819		pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
820		pcie_no_aspm();
821	}
822
823	ret = register_acpi_bus_type(&acpi_pci_bus);
824	if (ret)
825		return 0;
826
827	pci_set_platform_pm(&acpi_pci_platform_pm);
828	acpi_pci_slot_init();
829	acpiphp_init();
830
831	return 0;
832}
833arch_initcall(acpi_pci_init);