Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * File:	pci-acpi.c
  3 * Purpose:	Provide PCI support in ACPI
  4 *
  5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
  6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
  7 * Copyright (C) 2004 Intel Corp.
  8 */
  9
 10#include <linux/delay.h>
 11#include <linux/init.h>
 12#include <linux/irqdomain.h>
 13#include <linux/pci.h>
 14#include <linux/msi.h>
 15#include <linux/pci_hotplug.h>
 16#include <linux/module.h>
 17#include <linux/pci-aspm.h>
 18#include <linux/pci-acpi.h>
 19#include <linux/pm_runtime.h>
 20#include <linux/pm_qos.h>
 
 21#include "pci.h"
 22
 23/*
 24 * The UUID is defined in the PCI Firmware Specification available here:
 25 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
 
 26 */
 27const u8 pci_acpi_dsm_uuid[] = {
 28	0xd0, 0x37, 0xc9, 0xe5, 0x53, 0x35, 0x7a, 0x4d,
 29	0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
 30};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
 33{
 34	acpi_status status = AE_NOT_EXIST;
 35	unsigned long long mcfg_addr;
 36
 37	if (handle)
 38		status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
 39					       NULL, &mcfg_addr);
 40	if (ACPI_FAILURE(status))
 41		return 0;
 42
 43	return (phys_addr_t)mcfg_addr;
 44}
 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46static acpi_status decode_type0_hpx_record(union acpi_object *record,
 47					   struct hotplug_params *hpx)
 48{
 49	int i;
 50	union acpi_object *fields = record->package.elements;
 51	u32 revision = fields[1].integer.value;
 52
 53	switch (revision) {
 54	case 1:
 55		if (record->package.count != 6)
 56			return AE_ERROR;
 57		for (i = 2; i < 6; i++)
 58			if (fields[i].type != ACPI_TYPE_INTEGER)
 59				return AE_ERROR;
 60		hpx->t0 = &hpx->type0_data;
 61		hpx->t0->revision        = revision;
 62		hpx->t0->cache_line_size = fields[2].integer.value;
 63		hpx->t0->latency_timer   = fields[3].integer.value;
 64		hpx->t0->enable_serr     = fields[4].integer.value;
 65		hpx->t0->enable_perr     = fields[5].integer.value;
 66		break;
 67	default:
 68		printk(KERN_WARNING
 69		       "%s: Type 0 Revision %d record not supported\n",
 70		       __func__, revision);
 71		return AE_ERROR;
 72	}
 73	return AE_OK;
 74}
 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76static acpi_status decode_type1_hpx_record(union acpi_object *record,
 77					   struct hotplug_params *hpx)
 78{
 79	int i;
 80	union acpi_object *fields = record->package.elements;
 81	u32 revision = fields[1].integer.value;
 82
 83	switch (revision) {
 84	case 1:
 85		if (record->package.count != 5)
 86			return AE_ERROR;
 87		for (i = 2; i < 5; i++)
 88			if (fields[i].type != ACPI_TYPE_INTEGER)
 89				return AE_ERROR;
 90		hpx->t1 = &hpx->type1_data;
 91		hpx->t1->revision      = revision;
 92		hpx->t1->max_mem_read  = fields[2].integer.value;
 93		hpx->t1->avg_max_split = fields[3].integer.value;
 94		hpx->t1->tot_max_split = fields[4].integer.value;
 95		break;
 96	default:
 97		printk(KERN_WARNING
 98		       "%s: Type 1 Revision %d record not supported\n",
 99		       __func__, revision);
100		return AE_ERROR;
101	}
102	return AE_OK;
103}
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105static acpi_status decode_type2_hpx_record(union acpi_object *record,
106					   struct hotplug_params *hpx)
107{
108	int i;
109	union acpi_object *fields = record->package.elements;
110	u32 revision = fields[1].integer.value;
111
112	switch (revision) {
113	case 1:
114		if (record->package.count != 18)
115			return AE_ERROR;
116		for (i = 2; i < 18; i++)
117			if (fields[i].type != ACPI_TYPE_INTEGER)
118				return AE_ERROR;
119		hpx->t2 = &hpx->type2_data;
120		hpx->t2->revision      = revision;
121		hpx->t2->unc_err_mask_and      = fields[2].integer.value;
122		hpx->t2->unc_err_mask_or       = fields[3].integer.value;
123		hpx->t2->unc_err_sever_and     = fields[4].integer.value;
124		hpx->t2->unc_err_sever_or      = fields[5].integer.value;
125		hpx->t2->cor_err_mask_and      = fields[6].integer.value;
126		hpx->t2->cor_err_mask_or       = fields[7].integer.value;
127		hpx->t2->adv_err_cap_and       = fields[8].integer.value;
128		hpx->t2->adv_err_cap_or        = fields[9].integer.value;
129		hpx->t2->pci_exp_devctl_and    = fields[10].integer.value;
130		hpx->t2->pci_exp_devctl_or     = fields[11].integer.value;
131		hpx->t2->pci_exp_lnkctl_and    = fields[12].integer.value;
132		hpx->t2->pci_exp_lnkctl_or     = fields[13].integer.value;
133		hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
134		hpx->t2->sec_unc_err_sever_or  = fields[15].integer.value;
135		hpx->t2->sec_unc_err_mask_and  = fields[16].integer.value;
136		hpx->t2->sec_unc_err_mask_or   = fields[17].integer.value;
137		break;
138	default:
139		printk(KERN_WARNING
140		       "%s: Type 2 Revision %d record not supported\n",
141		       __func__, revision);
142		return AE_ERROR;
143	}
144	return AE_OK;
145}
146
147static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148{
149	acpi_status status;
150	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
151	union acpi_object *package, *record, *fields;
 
 
 
152	u32 type;
153	int i;
154
155	/* Clear the return buffer with zeros */
156	memset(hpx, 0, sizeof(struct hotplug_params));
157
158	status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
159	if (ACPI_FAILURE(status))
160		return status;
161
162	package = (union acpi_object *)buffer.pointer;
163	if (package->type != ACPI_TYPE_PACKAGE) {
164		status = AE_ERROR;
165		goto exit;
166	}
167
168	for (i = 0; i < package->package.count; i++) {
169		record = &package->package.elements[i];
170		if (record->type != ACPI_TYPE_PACKAGE) {
171			status = AE_ERROR;
172			goto exit;
173		}
174
175		fields = record->package.elements;
176		if (fields[0].type != ACPI_TYPE_INTEGER ||
177		    fields[1].type != ACPI_TYPE_INTEGER) {
178			status = AE_ERROR;
179			goto exit;
180		}
181
182		type = fields[0].integer.value;
183		switch (type) {
184		case 0:
185			status = decode_type0_hpx_record(record, hpx);
 
186			if (ACPI_FAILURE(status))
187				goto exit;
 
188			break;
189		case 1:
190			status = decode_type1_hpx_record(record, hpx);
 
191			if (ACPI_FAILURE(status))
192				goto exit;
 
193			break;
194		case 2:
195			status = decode_type2_hpx_record(record, hpx);
 
 
 
 
 
 
 
196			if (ACPI_FAILURE(status))
197				goto exit;
198			break;
199		default:
200			printk(KERN_ERR "%s: Type %d record not supported\n",
201			       __func__, type);
202			status = AE_ERROR;
203			goto exit;
204		}
205	}
206 exit:
207	kfree(buffer.pointer);
208	return status;
209}
210
211static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
212{
213	acpi_status status;
214	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
215	union acpi_object *package, *fields;
 
216	int i;
217
218	memset(hpp, 0, sizeof(struct hotplug_params));
219
220	status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
221	if (ACPI_FAILURE(status))
222		return status;
223
224	package = (union acpi_object *) buffer.pointer;
225	if (package->type != ACPI_TYPE_PACKAGE ||
226	    package->package.count != 4) {
227		status = AE_ERROR;
228		goto exit;
229	}
230
231	fields = package->package.elements;
232	for (i = 0; i < 4; i++) {
233		if (fields[i].type != ACPI_TYPE_INTEGER) {
234			status = AE_ERROR;
235			goto exit;
236		}
237	}
238
239	hpp->t0 = &hpp->type0_data;
240	hpp->t0->revision        = 1;
241	hpp->t0->cache_line_size = fields[0].integer.value;
242	hpp->t0->latency_timer   = fields[1].integer.value;
243	hpp->t0->enable_serr     = fields[2].integer.value;
244	hpp->t0->enable_perr     = fields[3].integer.value;
 
245
246exit:
247	kfree(buffer.pointer);
248	return status;
249}
250
251/* pci_get_hp_params
252 *
253 * @dev - the pci_dev for which we want parameters
254 * @hpp - allocated by the caller
255 */
256int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
257{
258	acpi_status status;
259	acpi_handle handle, phandle;
260	struct pci_bus *pbus;
261
262	if (acpi_pci_disabled)
263		return -ENODEV;
264
265	handle = NULL;
266	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
267		handle = acpi_pci_get_bridge_handle(pbus);
268		if (handle)
269			break;
270	}
271
272	/*
273	 * _HPP settings apply to all child buses, until another _HPP is
274	 * encountered. If we don't find an _HPP for the input pci dev,
275	 * look for it in the parent device scope since that would apply to
276	 * this pci dev.
277	 */
278	while (handle) {
279		status = acpi_run_hpx(handle, hpp);
280		if (ACPI_SUCCESS(status))
281			return 0;
282		status = acpi_run_hpp(handle, hpp);
283		if (ACPI_SUCCESS(status))
284			return 0;
285		if (acpi_is_root_bridge(handle))
286			break;
287		status = acpi_get_parent(handle, &phandle);
288		if (ACPI_FAILURE(status))
289			break;
290		handle = phandle;
291	}
292	return -ENODEV;
293}
294EXPORT_SYMBOL_GPL(pci_get_hp_params);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
296/**
297 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
298 * @work: Work item to handle.
299 */
300static void pci_acpi_wake_bus(struct work_struct *work)
301{
302	struct acpi_device *adev;
303	struct acpi_pci_root *root;
304
305	adev = container_of(work, struct acpi_device, wakeup.context.work);
306	root = acpi_driver_data(adev);
307	pci_pme_wakeup_bus(root->bus);
308}
309
310/**
311 * pci_acpi_wake_dev - PCI device wakeup notification work function.
312 * @handle: ACPI handle of a device the notification is for.
313 * @work: Work item to handle.
314 */
315static void pci_acpi_wake_dev(struct work_struct *work)
316{
317	struct acpi_device_wakeup_context *context;
318	struct pci_dev *pci_dev;
319
320	context = container_of(work, struct acpi_device_wakeup_context, work);
321	pci_dev = to_pci_dev(context->dev);
322
323	if (pci_dev->pme_poll)
324		pci_dev->pme_poll = false;
325
326	if (pci_dev->current_state == PCI_D3cold) {
327		pci_wakeup_event(pci_dev);
328		pm_runtime_resume(&pci_dev->dev);
329		return;
330	}
331
332	/* Clear PME Status if set. */
333	if (pci_dev->pme_support)
334		pci_check_pme_status(pci_dev);
335
336	pci_wakeup_event(pci_dev);
337	pm_runtime_resume(&pci_dev->dev);
338
339	pci_pme_wakeup_bus(pci_dev->subordinate);
340}
341
342/**
343 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
344 * @dev: PCI root bridge ACPI device.
345 */
346acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
347{
348	return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
349}
350
351/**
352 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
353 * @dev: ACPI device to add the notifier for.
354 * @pci_dev: PCI device to check for the PME status if an event is signaled.
355 */
356acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
357				     struct pci_dev *pci_dev)
358{
359	return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
360}
361
362/*
363 * _SxD returns the D-state with the highest power
364 * (lowest D-state number) supported in the S-state "x".
365 *
366 * If the devices does not have a _PRW
367 * (Power Resources for Wake) supporting system wakeup from "x"
368 * then the OS is free to choose a lower power (higher number
369 * D-state) than the return value from _SxD.
370 *
371 * But if _PRW is enabled at S-state "x", the OS
372 * must not choose a power lower than _SxD --
373 * unless the device has an _SxW method specifying
374 * the lowest power (highest D-state number) the device
375 * may enter while still able to wake the system.
376 *
377 * ie. depending on global OS policy:
378 *
379 * if (_PRW at S-state x)
380 *	choose from highest power _SxD to lowest power _SxW
381 * else // no _PRW at S-state x
382 *	choose highest power _SxD or any lower power
383 */
384
385static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
386{
387	int acpi_state, d_max;
388
389	if (pdev->no_d3cold)
390		d_max = ACPI_STATE_D3_HOT;
391	else
392		d_max = ACPI_STATE_D3_COLD;
393	acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
394	if (acpi_state < 0)
395		return PCI_POWER_ERROR;
396
397	switch (acpi_state) {
398	case ACPI_STATE_D0:
399		return PCI_D0;
400	case ACPI_STATE_D1:
401		return PCI_D1;
402	case ACPI_STATE_D2:
403		return PCI_D2;
404	case ACPI_STATE_D3_HOT:
405		return PCI_D3hot;
406	case ACPI_STATE_D3_COLD:
407		return PCI_D3cold;
408	}
409	return PCI_POWER_ERROR;
410}
411
412static bool acpi_pci_power_manageable(struct pci_dev *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413{
414	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
415	return adev ? acpi_device_power_manageable(adev) : false;
 
416}
417
418static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419{
420	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
421	static const u8 state_conv[] = {
422		[PCI_D0] = ACPI_STATE_D0,
423		[PCI_D1] = ACPI_STATE_D1,
424		[PCI_D2] = ACPI_STATE_D2,
425		[PCI_D3hot] = ACPI_STATE_D3_HOT,
426		[PCI_D3cold] = ACPI_STATE_D3_COLD,
427	};
428	int error = -EINVAL;
429
430	/* If the ACPI device has _EJ0, ignore the device */
431	if (!adev || acpi_has_method(adev->handle, "_EJ0"))
432		return -ENODEV;
433
434	switch (state) {
435	case PCI_D3cold:
436		if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
437				PM_QOS_FLAGS_ALL) {
438			error = -EBUSY;
439			break;
440		}
 
441	case PCI_D0:
442	case PCI_D1:
443	case PCI_D2:
444	case PCI_D3hot:
445		error = acpi_device_set_power(adev, state_conv[state]);
446	}
447
448	if (!error)
449		dev_dbg(&dev->dev, "power state changed by ACPI to %s\n",
450			 acpi_power_state_string(state_conv[state]));
451
452	return error;
453}
454
455static bool acpi_pci_can_wakeup(struct pci_dev *dev)
456{
457	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
458	return adev ? acpi_device_can_wakeup(adev) : false;
459}
 
 
 
 
 
 
460
461static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
462{
463	while (bus->parent) {
464		if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
465			return;
466		bus = bus->parent;
467	}
468
469	/* We have reached the root bus. */
470	if (bus->bridge)
471		acpi_pm_device_sleep_wake(bus->bridge, enable);
 
 
472}
473
474static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
475{
476	if (acpi_pci_can_wakeup(dev))
477		return acpi_pm_device_sleep_wake(&dev->dev, enable);
478
479	acpi_pci_propagate_wakeup_enable(dev->bus, enable);
480	return 0;
481}
482
483static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
484{
485	while (bus->parent) {
486		struct pci_dev *bridge = bus->self;
 
487
488		if (bridge->pme_interrupt)
489			return;
490		if (!acpi_pm_device_run_wake(&bridge->dev, enable))
491			return;
492		bus = bus->parent;
493	}
494
495	/* We have reached the root bus. */
496	if (bus->bridge)
497		acpi_pm_device_run_wake(bus->bridge, enable);
 
 
 
498}
499
500static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
501{
502	/*
503	 * Per PCI Express Base Specification Revision 2.0 section
504	 * 5.3.3.2 Link Wakeup, platform support is needed for D3cold
505	 * waking up to power on the main link even if there is PME
506	 * support for D3cold
507	 */
508	if (dev->pme_interrupt && !dev->runtime_d3cold)
509		return 0;
510
511	if (!acpi_pm_device_run_wake(&dev->dev, enable))
512		return 0;
513
514	acpi_pci_propagate_run_wake(dev->bus, enable);
515	return 0;
516}
517
518static bool acpi_pci_need_resume(struct pci_dev *dev)
519{
520	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 
 
 
521
 
 
 
 
 
 
 
 
 
 
 
522	if (!adev || !acpi_device_power_manageable(adev))
523		return false;
524
525	if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
 
526		return true;
527
528	if (acpi_target_system_state() == ACPI_STATE_S0)
529		return false;
530
531	return !!adev->power.flags.dsw_present;
532}
533
534static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
535	.is_manageable = acpi_pci_power_manageable,
536	.set_state = acpi_pci_set_power_state,
537	.choose_state = acpi_pci_choose_state,
538	.sleep_wake = acpi_pci_sleep_wake,
539	.run_wake = acpi_pci_run_wake,
540	.need_resume = acpi_pci_need_resume,
541};
542
543void acpi_pci_add_bus(struct pci_bus *bus)
544{
545	union acpi_object *obj;
546	struct pci_host_bridge *bridge;
547
548	if (acpi_pci_disabled || !bus->bridge)
549		return;
550
551	acpi_pci_slot_enumerate(bus);
552	acpiphp_enumerate_slots(bus);
553
554	/*
555	 * For a host bridge, check its _DSM for function 8 and if
556	 * that is available, mark it in pci_host_bridge.
557	 */
558	if (!pci_is_root_bus(bus))
559		return;
560
561	obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), pci_acpi_dsm_uuid, 3,
562				RESET_DELAY_DSM, NULL);
563	if (!obj)
564		return;
565
566	if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
567		bridge = pci_find_host_bridge(bus);
568		bridge->ignore_reset_delay = 1;
569	}
570	ACPI_FREE(obj);
571}
572
573void acpi_pci_remove_bus(struct pci_bus *bus)
574{
575	if (acpi_pci_disabled || !bus->bridge)
576		return;
577
578	acpiphp_remove_slots(bus);
579	acpi_pci_slot_remove(bus);
580}
581
582/* ACPI bus type */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583static struct acpi_device *acpi_pci_find_companion(struct device *dev)
584{
585	struct pci_dev *pci_dev = to_pci_dev(dev);
 
586	bool check_children;
587	u64 addr;
588
 
 
 
 
 
 
 
 
 
 
 
 
 
589	check_children = pci_is_bridge(pci_dev);
590	/* Please ref to ACPI spec for the syntax of _ADR */
591	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
592	return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
593				      check_children);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594}
595
596/**
597 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
598 * @pdev: the PCI device whose delay is to be updated
599 * @handle: ACPI handle of this device
600 *
601 * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
602 * control method of either the device itself or the PCI host bridge.
603 *
604 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
605 * host bridge.  If it returns one, the OS may assume that all devices in
606 * the hierarchy have already completed power-on reset delays.
607 *
608 * Function 9, "Device Readiness Durations," applies only to the object
609 * where it is located.  It returns delay durations required after various
610 * events if the device requires less time than the spec requires.  Delays
611 * from this function take precedence over the Reset Delay function.
612 *
613 * These _DSM functions are defined by the draft ECN of January 28, 2014,
614 * titled "ACPI additions for FW latency optimizations."
615 */
616static void pci_acpi_optimize_delay(struct pci_dev *pdev,
617				    acpi_handle handle)
618{
619	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
620	int value;
621	union acpi_object *obj, *elements;
622
623	if (bridge->ignore_reset_delay)
624		pdev->d3cold_delay = 0;
625
626	obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 3,
627				FUNCTION_DELAY_DSM, NULL);
628	if (!obj)
629		return;
630
631	if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
632		elements = obj->package.elements;
633		if (elements[0].type == ACPI_TYPE_INTEGER) {
634			value = (int)elements[0].integer.value / 1000;
635			if (value < PCI_PM_D3COLD_WAIT)
636				pdev->d3cold_delay = value;
637		}
638		if (elements[3].type == ACPI_TYPE_INTEGER) {
639			value = (int)elements[3].integer.value / 1000;
640			if (value < PCI_PM_D3_WAIT)
641				pdev->d3_delay = value;
642		}
643	}
644	ACPI_FREE(obj);
645}
646
647static void pci_acpi_setup(struct device *dev)
648{
649	struct pci_dev *pci_dev = to_pci_dev(dev);
650	struct acpi_device *adev = ACPI_COMPANION(dev);
651
652	if (!adev)
653		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654
655	pci_acpi_optimize_delay(pci_dev, adev->handle);
 
 
656
657	pci_acpi_add_pm_notifier(adev, pci_dev);
658	if (!adev->wakeup.flags.valid)
659		return;
660
661	device_set_wakeup_capable(dev, true);
662	acpi_pci_sleep_wake(pci_dev, false);
663	if (adev->wakeup.flags.run_wake)
664		device_set_run_wake(dev, true);
 
 
 
 
 
 
 
 
 
 
 
665}
666
667static void pci_acpi_cleanup(struct device *dev)
668{
669	struct acpi_device *adev = ACPI_COMPANION(dev);
670
671	if (!adev)
672		return;
673
 
674	pci_acpi_remove_pm_notifier(adev);
675	if (adev->wakeup.flags.valid) {
 
 
 
 
676		device_set_wakeup_capable(dev, false);
677		device_set_run_wake(dev, false);
678	}
679}
680
681static bool pci_acpi_bus_match(struct device *dev)
682{
683	return dev_is_pci(dev);
684}
685
686static struct acpi_bus_type acpi_pci_bus = {
687	.name = "PCI",
688	.match = pci_acpi_bus_match,
689	.find_companion = acpi_pci_find_companion,
690	.setup = pci_acpi_setup,
691	.cleanup = pci_acpi_cleanup,
692};
693
694
695static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
696
697/**
698 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
699 * @fn:       Callback matching a device to a fwnode that identifies a PCI
700 *            MSI domain.
701 *
702 * This should be called by irqchip driver, which is the parent of
703 * the MSI domain to provide callback interface to query fwnode.
704 */
705void
706pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
707{
708	pci_msi_get_fwnode_cb = fn;
709}
710
711/**
712 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
713 * @bus:      The PCI host bridge bus.
714 *
715 * This function uses the callback function registered by
716 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
717 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
718 * This returns NULL on error or when the domain is not found.
719 */
720struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
721{
722	struct fwnode_handle *fwnode;
723
724	if (!pci_msi_get_fwnode_cb)
725		return NULL;
726
727	fwnode = pci_msi_get_fwnode_cb(&bus->dev);
728	if (!fwnode)
729		return NULL;
730
731	return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
732}
733
734static int __init acpi_pci_init(void)
735{
736	int ret;
737
738	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
739		pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
740		pci_no_msi();
741	}
742
743	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
744		pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
745		pcie_no_aspm();
746	}
747
748	ret = register_acpi_bus_type(&acpi_pci_bus);
749	if (ret)
750		return 0;
751
752	pci_set_platform_pm(&acpi_pci_platform_pm);
753	acpi_pci_slot_init();
754	acpiphp_init();
755
756	return 0;
757}
758arch_initcall(acpi_pci_init);
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI support in ACPI
 
   4 *
   5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
   6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
   7 * Copyright (C) 2004 Intel Corp.
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/init.h>
  12#include <linux/irqdomain.h>
  13#include <linux/pci.h>
  14#include <linux/msi.h>
  15#include <linux/pci_hotplug.h>
  16#include <linux/module.h>
 
  17#include <linux/pci-acpi.h>
  18#include <linux/pm_runtime.h>
  19#include <linux/pm_qos.h>
  20#include <linux/rwsem.h>
  21#include "pci.h"
  22
  23/*
  24 * The GUID is defined in the PCI Firmware Specification available
  25 * here to PCI-SIG members:
  26 * https://members.pcisig.com/wg/PCI-SIG/document/15350
  27 */
  28const guid_t pci_acpi_dsm_guid =
  29	GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
  30		  0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
  31
  32#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
  33static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
  34{
  35	struct device *dev = &adev->dev;
  36	struct resource_entry *entry;
  37	struct list_head list;
  38	unsigned long flags;
  39	int ret;
  40
  41	INIT_LIST_HEAD(&list);
  42	flags = IORESOURCE_MEM;
  43	ret = acpi_dev_get_resources(adev, &list,
  44				     acpi_dev_filter_resource_type_cb,
  45				     (void *) flags);
  46	if (ret < 0) {
  47		dev_err(dev, "failed to parse _CRS method, error code %d\n",
  48			ret);
  49		return ret;
  50	}
  51
  52	if (ret == 0) {
  53		dev_err(dev, "no IO and memory resources present in _CRS\n");
  54		return -EINVAL;
  55	}
  56
  57	entry = list_first_entry(&list, struct resource_entry, node);
  58	*res = *entry->res;
  59	acpi_dev_free_resource_list(&list);
  60	return 0;
  61}
  62
  63static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
  64				 void **retval)
  65{
  66	u16 *segment = context;
  67	unsigned long long uid;
  68	acpi_status status;
  69
  70	status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid);
  71	if (ACPI_FAILURE(status) || uid != *segment)
  72		return AE_CTRL_DEPTH;
  73
  74	*(acpi_handle *)retval = handle;
  75	return AE_CTRL_TERMINATE;
  76}
  77
  78int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
  79			  struct resource *res)
  80{
  81	struct acpi_device *adev;
  82	acpi_status status;
  83	acpi_handle handle;
  84	int ret;
  85
  86	status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
  87	if (ACPI_FAILURE(status)) {
  88		dev_err(dev, "can't find _HID %s device to locate resources\n",
  89			hid);
  90		return -ENODEV;
  91	}
  92
  93	adev = acpi_fetch_acpi_dev(handle);
  94	if (!adev)
  95		return -ENODEV;
  96
  97	ret = acpi_get_rc_addr(adev, res);
  98	if (ret) {
  99		dev_err(dev, "can't get resource from %s\n",
 100			dev_name(&adev->dev));
 101		return ret;
 102	}
 103
 104	return 0;
 105}
 106#endif
 107
 108phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
 109{
 110	acpi_status status = AE_NOT_EXIST;
 111	unsigned long long mcfg_addr;
 112
 113	if (handle)
 114		status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
 115					       NULL, &mcfg_addr);
 116	if (ACPI_FAILURE(status))
 117		return 0;
 118
 119	return (phys_addr_t)mcfg_addr;
 120}
 121
 122/* _HPX PCI Setting Record (Type 0); same as _HPP */
 123struct hpx_type0 {
 124	u32 revision;		/* Not present in _HPP */
 125	u8  cache_line_size;	/* Not applicable to PCIe */
 126	u8  latency_timer;	/* Not applicable to PCIe */
 127	u8  enable_serr;
 128	u8  enable_perr;
 129};
 130
 131static struct hpx_type0 pci_default_type0 = {
 132	.revision = 1,
 133	.cache_line_size = 8,
 134	.latency_timer = 0x40,
 135	.enable_serr = 0,
 136	.enable_perr = 0,
 137};
 138
 139static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
 140{
 141	u16 pci_cmd, pci_bctl;
 142
 143	if (!hpx)
 144		hpx = &pci_default_type0;
 145
 146	if (hpx->revision > 1) {
 147		pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
 148			 hpx->revision);
 149		hpx = &pci_default_type0;
 150	}
 151
 152	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
 153	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
 154	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
 155	if (hpx->enable_serr)
 156		pci_cmd |= PCI_COMMAND_SERR;
 157	if (hpx->enable_perr)
 158		pci_cmd |= PCI_COMMAND_PARITY;
 159	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
 160
 161	/* Program bridge control value */
 162	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
 163		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
 164				      hpx->latency_timer);
 165		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
 166		if (hpx->enable_perr)
 167			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
 168		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
 169	}
 170}
 171
 172static acpi_status decode_type0_hpx_record(union acpi_object *record,
 173					   struct hpx_type0 *hpx0)
 174{
 175	int i;
 176	union acpi_object *fields = record->package.elements;
 177	u32 revision = fields[1].integer.value;
 178
 179	switch (revision) {
 180	case 1:
 181		if (record->package.count != 6)
 182			return AE_ERROR;
 183		for (i = 2; i < 6; i++)
 184			if (fields[i].type != ACPI_TYPE_INTEGER)
 185				return AE_ERROR;
 186		hpx0->revision        = revision;
 187		hpx0->cache_line_size = fields[2].integer.value;
 188		hpx0->latency_timer   = fields[3].integer.value;
 189		hpx0->enable_serr     = fields[4].integer.value;
 190		hpx0->enable_perr     = fields[5].integer.value;
 
 191		break;
 192	default:
 193		pr_warn("%s: Type 0 Revision %d record not supported\n",
 
 194		       __func__, revision);
 195		return AE_ERROR;
 196	}
 197	return AE_OK;
 198}
 199
 200/* _HPX PCI-X Setting Record (Type 1) */
 201struct hpx_type1 {
 202	u32 revision;
 203	u8  max_mem_read;
 204	u8  avg_max_split;
 205	u16 tot_max_split;
 206};
 207
 208static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
 209{
 210	int pos;
 211
 212	if (!hpx)
 213		return;
 214
 215	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 216	if (!pos)
 217		return;
 218
 219	pci_warn(dev, "PCI-X settings not supported\n");
 220}
 221
 222static acpi_status decode_type1_hpx_record(union acpi_object *record,
 223					   struct hpx_type1 *hpx1)
 224{
 225	int i;
 226	union acpi_object *fields = record->package.elements;
 227	u32 revision = fields[1].integer.value;
 228
 229	switch (revision) {
 230	case 1:
 231		if (record->package.count != 5)
 232			return AE_ERROR;
 233		for (i = 2; i < 5; i++)
 234			if (fields[i].type != ACPI_TYPE_INTEGER)
 235				return AE_ERROR;
 236		hpx1->revision      = revision;
 237		hpx1->max_mem_read  = fields[2].integer.value;
 238		hpx1->avg_max_split = fields[3].integer.value;
 239		hpx1->tot_max_split = fields[4].integer.value;
 
 240		break;
 241	default:
 242		pr_warn("%s: Type 1 Revision %d record not supported\n",
 
 243		       __func__, revision);
 244		return AE_ERROR;
 245	}
 246	return AE_OK;
 247}
 248
 249static bool pcie_root_rcb_set(struct pci_dev *dev)
 250{
 251	struct pci_dev *rp = pcie_find_root_port(dev);
 252	u16 lnkctl;
 253
 254	if (!rp)
 255		return false;
 256
 257	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
 258	if (lnkctl & PCI_EXP_LNKCTL_RCB)
 259		return true;
 260
 261	return false;
 262}
 263
 264/* _HPX PCI Express Setting Record (Type 2) */
 265struct hpx_type2 {
 266	u32 revision;
 267	u32 unc_err_mask_and;
 268	u32 unc_err_mask_or;
 269	u32 unc_err_sever_and;
 270	u32 unc_err_sever_or;
 271	u32 cor_err_mask_and;
 272	u32 cor_err_mask_or;
 273	u32 adv_err_cap_and;
 274	u32 adv_err_cap_or;
 275	u16 pci_exp_devctl_and;
 276	u16 pci_exp_devctl_or;
 277	u16 pci_exp_lnkctl_and;
 278	u16 pci_exp_lnkctl_or;
 279	u32 sec_unc_err_sever_and;
 280	u32 sec_unc_err_sever_or;
 281	u32 sec_unc_err_mask_and;
 282	u32 sec_unc_err_mask_or;
 283};
 284
 285static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
 286{
 287	int pos;
 288	u32 reg32;
 289
 290	if (!hpx)
 291		return;
 292
 293	if (!pci_is_pcie(dev))
 294		return;
 295
 296	if (hpx->revision > 1) {
 297		pci_warn(dev, "PCIe settings rev %d not supported\n",
 298			 hpx->revision);
 299		return;
 300	}
 301
 302	/*
 303	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
 304	 * those to make sure they're consistent with the rest of the
 305	 * platform.
 306	 */
 307	hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
 308				    PCI_EXP_DEVCTL_READRQ;
 309	hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
 310				    PCI_EXP_DEVCTL_READRQ);
 311
 312	/* Initialize Device Control Register */
 313	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
 314			~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
 315
 316	/* Initialize Link Control Register */
 317	if (pcie_cap_has_lnkctl(dev)) {
 318
 319		/*
 320		 * If the Root Port supports Read Completion Boundary of
 321		 * 128, set RCB to 128.  Otherwise, clear it.
 322		 */
 323		hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
 324		hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
 325		if (pcie_root_rcb_set(dev))
 326			hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
 327
 328		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
 329			~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
 330	}
 331
 332	/* Find Advanced Error Reporting Enhanced Capability */
 333	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 334	if (!pos)
 335		return;
 336
 337	/* Initialize Uncorrectable Error Mask Register */
 338	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
 339	reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
 340	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
 341
 342	/* Initialize Uncorrectable Error Severity Register */
 343	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
 344	reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
 345	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
 346
 347	/* Initialize Correctable Error Mask Register */
 348	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
 349	reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
 350	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
 351
 352	/* Initialize Advanced Error Capabilities and Control Register */
 353	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
 354	reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
 355
 356	/* Don't enable ECRC generation or checking if unsupported */
 357	if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
 358		reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
 359	if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
 360		reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
 361	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
 362
 363	/*
 364	 * FIXME: The following two registers are not supported yet.
 365	 *
 366	 *   o Secondary Uncorrectable Error Severity Register
 367	 *   o Secondary Uncorrectable Error Mask Register
 368	 */
 369}
 370
 371static acpi_status decode_type2_hpx_record(union acpi_object *record,
 372					   struct hpx_type2 *hpx2)
 373{
 374	int i;
 375	union acpi_object *fields = record->package.elements;
 376	u32 revision = fields[1].integer.value;
 377
 378	switch (revision) {
 379	case 1:
 380		if (record->package.count != 18)
 381			return AE_ERROR;
 382		for (i = 2; i < 18; i++)
 383			if (fields[i].type != ACPI_TYPE_INTEGER)
 384				return AE_ERROR;
 385		hpx2->revision      = revision;
 386		hpx2->unc_err_mask_and      = fields[2].integer.value;
 387		hpx2->unc_err_mask_or       = fields[3].integer.value;
 388		hpx2->unc_err_sever_and     = fields[4].integer.value;
 389		hpx2->unc_err_sever_or      = fields[5].integer.value;
 390		hpx2->cor_err_mask_and      = fields[6].integer.value;
 391		hpx2->cor_err_mask_or       = fields[7].integer.value;
 392		hpx2->adv_err_cap_and       = fields[8].integer.value;
 393		hpx2->adv_err_cap_or        = fields[9].integer.value;
 394		hpx2->pci_exp_devctl_and    = fields[10].integer.value;
 395		hpx2->pci_exp_devctl_or     = fields[11].integer.value;
 396		hpx2->pci_exp_lnkctl_and    = fields[12].integer.value;
 397		hpx2->pci_exp_lnkctl_or     = fields[13].integer.value;
 398		hpx2->sec_unc_err_sever_and = fields[14].integer.value;
 399		hpx2->sec_unc_err_sever_or  = fields[15].integer.value;
 400		hpx2->sec_unc_err_mask_and  = fields[16].integer.value;
 401		hpx2->sec_unc_err_mask_or   = fields[17].integer.value;
 
 402		break;
 403	default:
 404		pr_warn("%s: Type 2 Revision %d record not supported\n",
 
 405		       __func__, revision);
 406		return AE_ERROR;
 407	}
 408	return AE_OK;
 409}
 410
 411/* _HPX PCI Express Setting Record (Type 3) */
 412struct hpx_type3 {
 413	u16 device_type;
 414	u16 function_type;
 415	u16 config_space_location;
 416	u16 pci_exp_cap_id;
 417	u16 pci_exp_cap_ver;
 418	u16 pci_exp_vendor_id;
 419	u16 dvsec_id;
 420	u16 dvsec_rev;
 421	u16 match_offset;
 422	u32 match_mask_and;
 423	u32 match_value;
 424	u16 reg_offset;
 425	u32 reg_mask_and;
 426	u32 reg_mask_or;
 427};
 428
 429enum hpx_type3_dev_type {
 430	HPX_TYPE_ENDPOINT	= BIT(0),
 431	HPX_TYPE_LEG_END	= BIT(1),
 432	HPX_TYPE_RC_END		= BIT(2),
 433	HPX_TYPE_RC_EC		= BIT(3),
 434	HPX_TYPE_ROOT_PORT	= BIT(4),
 435	HPX_TYPE_UPSTREAM	= BIT(5),
 436	HPX_TYPE_DOWNSTREAM	= BIT(6),
 437	HPX_TYPE_PCI_BRIDGE	= BIT(7),
 438	HPX_TYPE_PCIE_BRIDGE	= BIT(8),
 439};
 440
 441static u16 hpx3_device_type(struct pci_dev *dev)
 442{
 443	u16 pcie_type = pci_pcie_type(dev);
 444	static const int pcie_to_hpx3_type[] = {
 445		[PCI_EXP_TYPE_ENDPOINT]    = HPX_TYPE_ENDPOINT,
 446		[PCI_EXP_TYPE_LEG_END]     = HPX_TYPE_LEG_END,
 447		[PCI_EXP_TYPE_RC_END]      = HPX_TYPE_RC_END,
 448		[PCI_EXP_TYPE_RC_EC]       = HPX_TYPE_RC_EC,
 449		[PCI_EXP_TYPE_ROOT_PORT]   = HPX_TYPE_ROOT_PORT,
 450		[PCI_EXP_TYPE_UPSTREAM]    = HPX_TYPE_UPSTREAM,
 451		[PCI_EXP_TYPE_DOWNSTREAM]  = HPX_TYPE_DOWNSTREAM,
 452		[PCI_EXP_TYPE_PCI_BRIDGE]  = HPX_TYPE_PCI_BRIDGE,
 453		[PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
 454	};
 455
 456	if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
 457		return 0;
 458
 459	return pcie_to_hpx3_type[pcie_type];
 460}
 461
 462enum hpx_type3_fn_type {
 463	HPX_FN_NORMAL		= BIT(0),
 464	HPX_FN_SRIOV_PHYS	= BIT(1),
 465	HPX_FN_SRIOV_VIRT	= BIT(2),
 466};
 467
 468static u8 hpx3_function_type(struct pci_dev *dev)
 469{
 470	if (dev->is_virtfn)
 471		return HPX_FN_SRIOV_VIRT;
 472	else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
 473		return HPX_FN_SRIOV_PHYS;
 474	else
 475		return HPX_FN_NORMAL;
 476}
 477
 478static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
 479{
 480	u8 cap_ver = hpx3_cap_id & 0xf;
 481
 482	if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
 483		return true;
 484	else if (cap_ver == pcie_cap_id)
 485		return true;
 486
 487	return false;
 488}
 489
 490enum hpx_type3_cfg_loc {
 491	HPX_CFG_PCICFG		= 0,
 492	HPX_CFG_PCIE_CAP	= 1,
 493	HPX_CFG_PCIE_CAP_EXT	= 2,
 494	HPX_CFG_VEND_CAP	= 3,
 495	HPX_CFG_DVSEC		= 4,
 496	HPX_CFG_MAX,
 497};
 498
 499static void program_hpx_type3_register(struct pci_dev *dev,
 500				       const struct hpx_type3 *reg)
 501{
 502	u32 match_reg, write_reg, header, orig_value;
 503	u16 pos;
 504
 505	if (!(hpx3_device_type(dev) & reg->device_type))
 506		return;
 507
 508	if (!(hpx3_function_type(dev) & reg->function_type))
 509		return;
 510
 511	switch (reg->config_space_location) {
 512	case HPX_CFG_PCICFG:
 513		pos = 0;
 514		break;
 515	case HPX_CFG_PCIE_CAP:
 516		pos = pci_find_capability(dev, reg->pci_exp_cap_id);
 517		if (pos == 0)
 518			return;
 519
 520		break;
 521	case HPX_CFG_PCIE_CAP_EXT:
 522		pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
 523		if (pos == 0)
 524			return;
 525
 526		pci_read_config_dword(dev, pos, &header);
 527		if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
 528					  reg->pci_exp_cap_ver))
 529			return;
 530
 531		break;
 532	case HPX_CFG_VEND_CAP:
 533	case HPX_CFG_DVSEC:
 534	default:
 535		pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
 536		return;
 537	}
 538
 539	pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
 540
 541	if ((match_reg & reg->match_mask_and) != reg->match_value)
 542		return;
 543
 544	pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
 545	orig_value = write_reg;
 546	write_reg &= reg->reg_mask_and;
 547	write_reg |= reg->reg_mask_or;
 548
 549	if (orig_value == write_reg)
 550		return;
 551
 552	pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
 553
 554	pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
 555		pos, orig_value, write_reg);
 556}
 557
 558static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
 559{
 560	if (!hpx)
 561		return;
 562
 563	if (!pci_is_pcie(dev))
 564		return;
 565
 566	program_hpx_type3_register(dev, hpx);
 567}
 568
 569static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
 570				union acpi_object *reg_fields)
 571{
 572	hpx3_reg->device_type            = reg_fields[0].integer.value;
 573	hpx3_reg->function_type          = reg_fields[1].integer.value;
 574	hpx3_reg->config_space_location  = reg_fields[2].integer.value;
 575	hpx3_reg->pci_exp_cap_id         = reg_fields[3].integer.value;
 576	hpx3_reg->pci_exp_cap_ver        = reg_fields[4].integer.value;
 577	hpx3_reg->pci_exp_vendor_id      = reg_fields[5].integer.value;
 578	hpx3_reg->dvsec_id               = reg_fields[6].integer.value;
 579	hpx3_reg->dvsec_rev              = reg_fields[7].integer.value;
 580	hpx3_reg->match_offset           = reg_fields[8].integer.value;
 581	hpx3_reg->match_mask_and         = reg_fields[9].integer.value;
 582	hpx3_reg->match_value            = reg_fields[10].integer.value;
 583	hpx3_reg->reg_offset             = reg_fields[11].integer.value;
 584	hpx3_reg->reg_mask_and           = reg_fields[12].integer.value;
 585	hpx3_reg->reg_mask_or            = reg_fields[13].integer.value;
 586}
 587
 588static acpi_status program_type3_hpx_record(struct pci_dev *dev,
 589					   union acpi_object *record)
 590{
 591	union acpi_object *fields = record->package.elements;
 592	u32 desc_count, expected_length, revision;
 593	union acpi_object *reg_fields;
 594	struct hpx_type3 hpx3;
 595	int i;
 596
 597	revision = fields[1].integer.value;
 598	switch (revision) {
 599	case 1:
 600		desc_count = fields[2].integer.value;
 601		expected_length = 3 + desc_count * 14;
 602
 603		if (record->package.count != expected_length)
 604			return AE_ERROR;
 605
 606		for (i = 2; i < expected_length; i++)
 607			if (fields[i].type != ACPI_TYPE_INTEGER)
 608				return AE_ERROR;
 609
 610		for (i = 0; i < desc_count; i++) {
 611			reg_fields = fields + 3 + i * 14;
 612			parse_hpx3_register(&hpx3, reg_fields);
 613			program_hpx_type3(dev, &hpx3);
 614		}
 615
 616		break;
 617	default:
 618		printk(KERN_WARNING
 619			"%s: Type 3 Revision %d record not supported\n",
 620			__func__, revision);
 621		return AE_ERROR;
 622	}
 623	return AE_OK;
 624}
 625
 626static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
 627{
 628	acpi_status status;
 629	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 630	union acpi_object *package, *record, *fields;
 631	struct hpx_type0 hpx0;
 632	struct hpx_type1 hpx1;
 633	struct hpx_type2 hpx2;
 634	u32 type;
 635	int i;
 636
 
 
 
 637	status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
 638	if (ACPI_FAILURE(status))
 639		return status;
 640
 641	package = (union acpi_object *)buffer.pointer;
 642	if (package->type != ACPI_TYPE_PACKAGE) {
 643		status = AE_ERROR;
 644		goto exit;
 645	}
 646
 647	for (i = 0; i < package->package.count; i++) {
 648		record = &package->package.elements[i];
 649		if (record->type != ACPI_TYPE_PACKAGE) {
 650			status = AE_ERROR;
 651			goto exit;
 652		}
 653
 654		fields = record->package.elements;
 655		if (fields[0].type != ACPI_TYPE_INTEGER ||
 656		    fields[1].type != ACPI_TYPE_INTEGER) {
 657			status = AE_ERROR;
 658			goto exit;
 659		}
 660
 661		type = fields[0].integer.value;
 662		switch (type) {
 663		case 0:
 664			memset(&hpx0, 0, sizeof(hpx0));
 665			status = decode_type0_hpx_record(record, &hpx0);
 666			if (ACPI_FAILURE(status))
 667				goto exit;
 668			program_hpx_type0(dev, &hpx0);
 669			break;
 670		case 1:
 671			memset(&hpx1, 0, sizeof(hpx1));
 672			status = decode_type1_hpx_record(record, &hpx1);
 673			if (ACPI_FAILURE(status))
 674				goto exit;
 675			program_hpx_type1(dev, &hpx1);
 676			break;
 677		case 2:
 678			memset(&hpx2, 0, sizeof(hpx2));
 679			status = decode_type2_hpx_record(record, &hpx2);
 680			if (ACPI_FAILURE(status))
 681				goto exit;
 682			program_hpx_type2(dev, &hpx2);
 683			break;
 684		case 3:
 685			status = program_type3_hpx_record(dev, record);
 686			if (ACPI_FAILURE(status))
 687				goto exit;
 688			break;
 689		default:
 690			pr_err("%s: Type %d record not supported\n",
 691			       __func__, type);
 692			status = AE_ERROR;
 693			goto exit;
 694		}
 695	}
 696 exit:
 697	kfree(buffer.pointer);
 698	return status;
 699}
 700
 701static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
 702{
 703	acpi_status status;
 704	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 705	union acpi_object *package, *fields;
 706	struct hpx_type0 hpx0;
 707	int i;
 708
 709	memset(&hpx0, 0, sizeof(hpx0));
 710
 711	status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
 712	if (ACPI_FAILURE(status))
 713		return status;
 714
 715	package = (union acpi_object *) buffer.pointer;
 716	if (package->type != ACPI_TYPE_PACKAGE ||
 717	    package->package.count != 4) {
 718		status = AE_ERROR;
 719		goto exit;
 720	}
 721
 722	fields = package->package.elements;
 723	for (i = 0; i < 4; i++) {
 724		if (fields[i].type != ACPI_TYPE_INTEGER) {
 725			status = AE_ERROR;
 726			goto exit;
 727		}
 728	}
 729
 730	hpx0.revision        = 1;
 731	hpx0.cache_line_size = fields[0].integer.value;
 732	hpx0.latency_timer   = fields[1].integer.value;
 733	hpx0.enable_serr     = fields[2].integer.value;
 734	hpx0.enable_perr     = fields[3].integer.value;
 735
 736	program_hpx_type0(dev, &hpx0);
 737
 738exit:
 739	kfree(buffer.pointer);
 740	return status;
 741}
 742
 743/* pci_acpi_program_hp_params
 744 *
 745 * @dev - the pci_dev for which we want parameters
 
 746 */
 747int pci_acpi_program_hp_params(struct pci_dev *dev)
 748{
 749	acpi_status status;
 750	acpi_handle handle, phandle;
 751	struct pci_bus *pbus;
 752
 753	if (acpi_pci_disabled)
 754		return -ENODEV;
 755
 756	handle = NULL;
 757	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
 758		handle = acpi_pci_get_bridge_handle(pbus);
 759		if (handle)
 760			break;
 761	}
 762
 763	/*
 764	 * _HPP settings apply to all child buses, until another _HPP is
 765	 * encountered. If we don't find an _HPP for the input pci dev,
 766	 * look for it in the parent device scope since that would apply to
 767	 * this pci dev.
 768	 */
 769	while (handle) {
 770		status = acpi_run_hpx(dev, handle);
 771		if (ACPI_SUCCESS(status))
 772			return 0;
 773		status = acpi_run_hpp(dev, handle);
 774		if (ACPI_SUCCESS(status))
 775			return 0;
 776		if (acpi_is_root_bridge(handle))
 777			break;
 778		status = acpi_get_parent(handle, &phandle);
 779		if (ACPI_FAILURE(status))
 780			break;
 781		handle = phandle;
 782	}
 783	return -ENODEV;
 784}
 785
 786/**
 787 * pciehp_is_native - Check whether a hotplug port is handled by the OS
 788 * @bridge: Hotplug port to check
 789 *
 790 * Returns true if the given @bridge is handled by the native PCIe hotplug
 791 * driver.
 792 */
 793bool pciehp_is_native(struct pci_dev *bridge)
 794{
 795	const struct pci_host_bridge *host;
 796	u32 slot_cap;
 797
 798	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
 799		return false;
 800
 801	pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
 802	if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
 803		return false;
 804
 805	if (pcie_ports_native)
 806		return true;
 807
 808	host = pci_find_host_bridge(bridge->bus);
 809	return host->native_pcie_hotplug;
 810}
 811
 812/**
 813 * shpchp_is_native - Check whether a hotplug port is handled by the OS
 814 * @bridge: Hotplug port to check
 815 *
 816 * Returns true if the given @bridge is handled by the native SHPC hotplug
 817 * driver.
 818 */
 819bool shpchp_is_native(struct pci_dev *bridge)
 820{
 821	return bridge->shpc_managed;
 822}
 823
 824/**
 825 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
 826 * @context: Device wakeup context.
 827 */
 828static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
 829{
 830	struct acpi_device *adev;
 831	struct acpi_pci_root *root;
 832
 833	adev = container_of(context, struct acpi_device, wakeup.context);
 834	root = acpi_driver_data(adev);
 835	pci_pme_wakeup_bus(root->bus);
 836}
 837
 838/**
 839 * pci_acpi_wake_dev - PCI device wakeup notification work function.
 840 * @context: Device wakeup context.
 
 841 */
 842static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
 843{
 
 844	struct pci_dev *pci_dev;
 845
 
 846	pci_dev = to_pci_dev(context->dev);
 847
 848	if (pci_dev->pme_poll)
 849		pci_dev->pme_poll = false;
 850
 851	if (pci_dev->current_state == PCI_D3cold) {
 852		pci_wakeup_event(pci_dev);
 853		pm_request_resume(&pci_dev->dev);
 854		return;
 855	}
 856
 857	/* Clear PME Status if set. */
 858	if (pci_dev->pme_support)
 859		pci_check_pme_status(pci_dev);
 860
 861	pci_wakeup_event(pci_dev);
 862	pm_request_resume(&pci_dev->dev);
 863
 864	pci_pme_wakeup_bus(pci_dev->subordinate);
 865}
 866
 867/**
 868 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
 869 * @dev: PCI root bridge ACPI device.
 870 */
 871acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
 872{
 873	return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
 874}
 875
 876/**
 877 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
 878 * @dev: ACPI device to add the notifier for.
 879 * @pci_dev: PCI device to check for the PME status if an event is signaled.
 880 */
 881acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
 882				     struct pci_dev *pci_dev)
 883{
 884	return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
 885}
 886
 887/*
 888 * _SxD returns the D-state with the highest power
 889 * (lowest D-state number) supported in the S-state "x".
 890 *
 891 * If the devices does not have a _PRW
 892 * (Power Resources for Wake) supporting system wakeup from "x"
 893 * then the OS is free to choose a lower power (higher number
 894 * D-state) than the return value from _SxD.
 895 *
 896 * But if _PRW is enabled at S-state "x", the OS
 897 * must not choose a power lower than _SxD --
 898 * unless the device has an _SxW method specifying
 899 * the lowest power (highest D-state number) the device
 900 * may enter while still able to wake the system.
 901 *
 902 * ie. depending on global OS policy:
 903 *
 904 * if (_PRW at S-state x)
 905 *	choose from highest power _SxD to lowest power _SxW
 906 * else // no _PRW at S-state x
 907 *	choose highest power _SxD or any lower power
 908 */
 909
 910pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
 911{
 912	int acpi_state, d_max;
 913
 914	if (pdev->no_d3cold)
 915		d_max = ACPI_STATE_D3_HOT;
 916	else
 917		d_max = ACPI_STATE_D3_COLD;
 918	acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
 919	if (acpi_state < 0)
 920		return PCI_POWER_ERROR;
 921
 922	switch (acpi_state) {
 923	case ACPI_STATE_D0:
 924		return PCI_D0;
 925	case ACPI_STATE_D1:
 926		return PCI_D1;
 927	case ACPI_STATE_D2:
 928		return PCI_D2;
 929	case ACPI_STATE_D3_HOT:
 930		return PCI_D3hot;
 931	case ACPI_STATE_D3_COLD:
 932		return PCI_D3cold;
 933	}
 934	return PCI_POWER_ERROR;
 935}
 936
 937static struct acpi_device *acpi_pci_find_companion(struct device *dev);
 938
 939void pci_set_acpi_fwnode(struct pci_dev *dev)
 940{
 941	if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
 942		ACPI_COMPANION_SET(&dev->dev,
 943				   acpi_pci_find_companion(&dev->dev));
 944}
 945
 946/**
 947 * pci_dev_acpi_reset - do a function level reset using _RST method
 948 * @dev: device to reset
 949 * @probe: if true, return 0 if device supports _RST
 950 */
 951int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
 952{
 953	acpi_handle handle = ACPI_HANDLE(&dev->dev);
 954
 955	if (!handle || !acpi_has_method(handle, "_RST"))
 956		return -ENOTTY;
 957
 958	if (probe)
 959		return 0;
 960
 961	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) {
 962		pci_warn(dev, "ACPI _RST failed\n");
 963		return -ENOTTY;
 964	}
 965
 966	return 0;
 967}
 968
 969bool acpi_pci_power_manageable(struct pci_dev *dev)
 970{
 971	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 972
 973	return adev && acpi_device_power_manageable(adev);
 974}
 975
 976bool acpi_pci_bridge_d3(struct pci_dev *dev)
 977{
 978	struct pci_dev *rpdev;
 979	struct acpi_device *adev;
 980	acpi_status status;
 981	unsigned long long state;
 982	const union acpi_object *obj;
 983
 984	if (acpi_pci_disabled || !dev->is_hotplug_bridge)
 985		return false;
 986
 987	/* Assume D3 support if the bridge is power-manageable by ACPI. */
 988	if (acpi_pci_power_manageable(dev))
 989		return true;
 990
 991	rpdev = pcie_find_root_port(dev);
 992	if (!rpdev)
 993		return false;
 994
 995	adev = ACPI_COMPANION(&rpdev->dev);
 996	if (!adev)
 997		return false;
 998
 999	/*
1000	 * If the Root Port cannot signal wakeup signals at all, i.e., it
1001	 * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
1002	 * events from low-power states including D3hot and D3cold.
1003	 */
1004	if (!adev->wakeup.flags.valid)
1005		return false;
1006
1007	/*
1008	 * If the Root Port cannot wake itself from D3hot or D3cold, we
1009	 * can't use D3.
1010	 */
1011	status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
1012	if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT)
1013		return false;
1014
1015	/*
1016	 * The "HotPlugSupportInD3" property in a Root Port _DSD indicates
1017	 * the Port can signal hotplug events while in D3.  We assume any
1018	 * bridges *below* that Root Port can also signal hotplug events
1019	 * while in D3.
1020	 */
1021	if (!acpi_dev_get_property(adev, "HotPlugSupportInD3",
1022				   ACPI_TYPE_INTEGER, &obj) &&
1023	    obj->integer.value == 1)
1024		return true;
1025
1026	return false;
1027}
1028
1029int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1030{
1031	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1032	static const u8 state_conv[] = {
1033		[PCI_D0] = ACPI_STATE_D0,
1034		[PCI_D1] = ACPI_STATE_D1,
1035		[PCI_D2] = ACPI_STATE_D2,
1036		[PCI_D3hot] = ACPI_STATE_D3_HOT,
1037		[PCI_D3cold] = ACPI_STATE_D3_COLD,
1038	};
1039	int error = -EINVAL;
1040
1041	/* If the ACPI device has _EJ0, ignore the device */
1042	if (!adev || acpi_has_method(adev->handle, "_EJ0"))
1043		return -ENODEV;
1044
1045	switch (state) {
1046	case PCI_D3cold:
1047		if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
1048				PM_QOS_FLAGS_ALL) {
1049			error = -EBUSY;
1050			break;
1051		}
1052		fallthrough;
1053	case PCI_D0:
1054	case PCI_D1:
1055	case PCI_D2:
1056	case PCI_D3hot:
1057		error = acpi_device_set_power(adev, state_conv[state]);
1058	}
1059
1060	if (!error)
1061		pci_dbg(dev, "power state changed by ACPI to %s\n",
1062		        acpi_power_state_string(adev->power.state));
1063
1064	return error;
1065}
1066
1067pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
1068{
1069	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1070	static const pci_power_t state_conv[] = {
1071		[ACPI_STATE_D0]      = PCI_D0,
1072		[ACPI_STATE_D1]      = PCI_D1,
1073		[ACPI_STATE_D2]      = PCI_D2,
1074		[ACPI_STATE_D3_HOT]  = PCI_D3hot,
1075		[ACPI_STATE_D3_COLD] = PCI_D3cold,
1076	};
1077	int state;
1078
1079	if (!adev || !acpi_device_power_manageable(adev))
1080		return PCI_UNKNOWN;
 
 
 
 
 
1081
1082	state = adev->power.state;
1083	if (state == ACPI_STATE_UNKNOWN)
1084		return PCI_UNKNOWN;
1085
1086	return state_conv[state];
1087}
1088
1089void acpi_pci_refresh_power_state(struct pci_dev *dev)
1090{
1091	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 
1092
1093	if (adev && acpi_device_power_manageable(adev))
1094		acpi_device_update_power(adev, NULL);
1095}
1096
1097static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
1098{
1099	while (bus->parent) {
1100		if (acpi_pm_device_can_wakeup(&bus->self->dev))
1101			return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
1102
 
 
 
 
1103		bus = bus->parent;
1104	}
1105
1106	/* We have reached the root bus. */
1107	if (bus->bridge) {
1108		if (acpi_pm_device_can_wakeup(bus->bridge))
1109			return acpi_pm_set_device_wakeup(bus->bridge, enable);
1110	}
1111	return 0;
1112}
1113
1114int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
1115{
1116	if (acpi_pci_disabled)
 
 
 
 
 
 
1117		return 0;
1118
1119	if (acpi_pm_device_can_wakeup(&dev->dev))
1120		return acpi_pm_set_device_wakeup(&dev->dev, enable);
1121
1122	return acpi_pci_propagate_wakeup(dev->bus, enable);
 
1123}
1124
1125bool acpi_pci_need_resume(struct pci_dev *dev)
1126{
1127	struct acpi_device *adev;
1128
1129	if (acpi_pci_disabled)
1130		return false;
1131
1132	/*
1133	 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
1134	 * system-wide suspend/resume confuses the platform firmware, so avoid
1135	 * doing that.  According to Section 16.1.6 of ACPI 6.2, endpoint
1136	 * devices are expected to be in D3 before invoking the S3 entry path
1137	 * from the firmware, so they should not be affected by this issue.
1138	 */
1139	if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
1140		return true;
1141
1142	adev = ACPI_COMPANION(&dev->dev);
1143	if (!adev || !acpi_device_power_manageable(adev))
1144		return false;
1145
1146	if (adev->wakeup.flags.valid &&
1147	    device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
1148		return true;
1149
1150	if (acpi_target_system_state() == ACPI_STATE_S0)
1151		return false;
1152
1153	return !!adev->power.flags.dsw_present;
1154}
1155
 
 
 
 
 
 
 
 
 
1156void acpi_pci_add_bus(struct pci_bus *bus)
1157{
1158	union acpi_object *obj;
1159	struct pci_host_bridge *bridge;
1160
1161	if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
1162		return;
1163
1164	acpi_pci_slot_enumerate(bus);
1165	acpiphp_enumerate_slots(bus);
1166
1167	/*
1168	 * For a host bridge, check its _DSM for function 8 and if
1169	 * that is available, mark it in pci_host_bridge.
1170	 */
1171	if (!pci_is_root_bus(bus))
1172		return;
1173
1174	obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
1175				DSM_PCI_POWER_ON_RESET_DELAY, NULL);
1176	if (!obj)
1177		return;
1178
1179	if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
1180		bridge = pci_find_host_bridge(bus);
1181		bridge->ignore_reset_delay = 1;
1182	}
1183	ACPI_FREE(obj);
1184}
1185
1186void acpi_pci_remove_bus(struct pci_bus *bus)
1187{
1188	if (acpi_pci_disabled || !bus->bridge)
1189		return;
1190
1191	acpiphp_remove_slots(bus);
1192	acpi_pci_slot_remove(bus);
1193}
1194
1195/* ACPI bus type */
1196
1197
1198static DECLARE_RWSEM(pci_acpi_companion_lookup_sem);
1199static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *);
1200
1201/**
1202 * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback.
1203 * @func: ACPI companion lookup callback pointer or NULL.
1204 *
1205 * Set a special ACPI companion lookup callback for PCI devices whose companion
1206 * objects in the ACPI namespace have _ADR with non-standard bus-device-function
1207 * encodings.
1208 *
1209 * Return 0 on success or a negative error code on failure (in which case no
1210 * changes are made).
1211 *
1212 * The caller is responsible for the appropriate ordering of the invocations of
1213 * this function with respect to the enumeration of the PCI devices needing the
1214 * callback installed by it.
1215 */
1216int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *))
1217{
1218	int ret;
1219
1220	if (!func)
1221		return -EINVAL;
1222
1223	down_write(&pci_acpi_companion_lookup_sem);
1224
1225	if (pci_acpi_find_companion_hook) {
1226		ret = -EBUSY;
1227	} else {
1228		pci_acpi_find_companion_hook = func;
1229		ret = 0;
1230	}
1231
1232	up_write(&pci_acpi_companion_lookup_sem);
1233
1234	return ret;
1235}
1236EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook);
1237
1238/**
1239 * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback.
1240 *
1241 * Clear the special ACPI companion lookup callback previously set by
1242 * pci_acpi_set_companion_lookup_hook().  Block until the last running instance
1243 * of the callback returns before clearing it.
1244 *
1245 * The caller is responsible for the appropriate ordering of the invocations of
1246 * this function with respect to the enumeration of the PCI devices needing the
1247 * callback cleared by it.
1248 */
1249void pci_acpi_clear_companion_lookup_hook(void)
1250{
1251	down_write(&pci_acpi_companion_lookup_sem);
1252
1253	pci_acpi_find_companion_hook = NULL;
1254
1255	up_write(&pci_acpi_companion_lookup_sem);
1256}
1257EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook);
1258
1259static struct acpi_device *acpi_pci_find_companion(struct device *dev)
1260{
1261	struct pci_dev *pci_dev = to_pci_dev(dev);
1262	struct acpi_device *adev;
1263	bool check_children;
1264	u64 addr;
1265
1266	if (!dev->parent)
1267		return NULL;
1268
1269	down_read(&pci_acpi_companion_lookup_sem);
1270
1271	adev = pci_acpi_find_companion_hook ?
1272		pci_acpi_find_companion_hook(pci_dev) : NULL;
1273
1274	up_read(&pci_acpi_companion_lookup_sem);
1275
1276	if (adev)
1277		return adev;
1278
1279	check_children = pci_is_bridge(pci_dev);
1280	/* Please ref to ACPI spec for the syntax of _ADR */
1281	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
1282	adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
1283				      check_children);
1284
1285	/*
1286	 * There may be ACPI device objects in the ACPI namespace that are
1287	 * children of the device object representing the host bridge, but don't
1288	 * represent PCI devices.  Both _HID and _ADR may be present for them,
1289	 * even though that is against the specification (for example, see
1290	 * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which
1291	 * appears to indicate that they should not be taken into consideration
1292	 * as potential companions of PCI devices on the root bus.
1293	 *
1294	 * To catch this special case, disregard the returned device object if
1295	 * it has a valid _HID, addr is 0 and the PCI device at hand is on the
1296	 * root bus.
1297	 */
1298	if (adev && adev->pnp.type.platform_id && !addr &&
1299	    pci_is_root_bus(pci_dev->bus))
1300		return NULL;
1301
1302	return adev;
1303}
1304
1305/**
1306 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
1307 * @pdev: the PCI device whose delay is to be updated
1308 * @handle: ACPI handle of this device
1309 *
1310 * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
1311 * control method of either the device itself or the PCI host bridge.
1312 *
1313 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
1314 * host bridge.  If it returns one, the OS may assume that all devices in
1315 * the hierarchy have already completed power-on reset delays.
1316 *
1317 * Function 9, "Device Readiness Durations," applies only to the object
1318 * where it is located.  It returns delay durations required after various
1319 * events if the device requires less time than the spec requires.  Delays
1320 * from this function take precedence over the Reset Delay function.
1321 *
1322 * These _DSM functions are defined by the draft ECN of January 28, 2014,
1323 * titled "ACPI additions for FW latency optimizations."
1324 */
1325static void pci_acpi_optimize_delay(struct pci_dev *pdev,
1326				    acpi_handle handle)
1327{
1328	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
1329	int value;
1330	union acpi_object *obj, *elements;
1331
1332	if (bridge->ignore_reset_delay)
1333		pdev->d3cold_delay = 0;
1334
1335	obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
1336				DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
1337	if (!obj)
1338		return;
1339
1340	if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
1341		elements = obj->package.elements;
1342		if (elements[0].type == ACPI_TYPE_INTEGER) {
1343			value = (int)elements[0].integer.value / 1000;
1344			if (value < PCI_PM_D3COLD_WAIT)
1345				pdev->d3cold_delay = value;
1346		}
1347		if (elements[3].type == ACPI_TYPE_INTEGER) {
1348			value = (int)elements[3].integer.value / 1000;
1349			if (value < PCI_PM_D3HOT_WAIT)
1350				pdev->d3hot_delay = value;
1351		}
1352	}
1353	ACPI_FREE(obj);
1354}
1355
1356static void pci_acpi_set_external_facing(struct pci_dev *dev)
1357{
1358	u8 val;
 
1359
1360	if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1361		return;
1362	if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
1363		return;
1364
1365	/*
1366	 * These root ports expose PCIe (including DMA) outside of the
1367	 * system.  Everything downstream from them is external.
1368	 */
1369	if (val)
1370		dev->external_facing = 1;
1371}
1372
1373void pci_acpi_setup(struct device *dev, struct acpi_device *adev)
1374{
1375	struct pci_dev *pci_dev = to_pci_dev(dev);
1376
1377	pci_acpi_optimize_delay(pci_dev, adev->handle);
1378	pci_acpi_set_external_facing(pci_dev);
1379	pci_acpi_add_edr_notifier(pci_dev);
1380
1381	pci_acpi_add_pm_notifier(adev, pci_dev);
1382	if (!adev->wakeup.flags.valid)
1383		return;
1384
1385	device_set_wakeup_capable(dev, true);
1386	/*
1387	 * For bridges that can do D3 we enable wake automatically (as
1388	 * we do for the power management itself in that case). The
1389	 * reason is that the bridge may have additional methods such as
1390	 * _DSW that need to be called.
1391	 */
1392	if (pci_dev->bridge_d3)
1393		device_wakeup_enable(dev);
1394
1395	acpi_pci_wakeup(pci_dev, false);
1396	acpi_device_power_add_dependent(adev, dev);
1397
1398	if (pci_is_bridge(pci_dev))
1399		acpi_dev_power_up_children_with_adr(adev);
1400}
1401
1402void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev)
1403{
1404	struct pci_dev *pci_dev = to_pci_dev(dev);
 
 
 
1405
1406	pci_acpi_remove_edr_notifier(pci_dev);
1407	pci_acpi_remove_pm_notifier(adev);
1408	if (adev->wakeup.flags.valid) {
1409		acpi_device_power_remove_dependent(adev, dev);
1410		if (pci_dev->bridge_d3)
1411			device_wakeup_disable(dev);
1412
1413		device_set_wakeup_capable(dev, false);
 
1414	}
1415}
1416
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1417static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
1418
1419/**
1420 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
1421 * @fn:       Callback matching a device to a fwnode that identifies a PCI
1422 *            MSI domain.
1423 *
1424 * This should be called by irqchip driver, which is the parent of
1425 * the MSI domain to provide callback interface to query fwnode.
1426 */
1427void
1428pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
1429{
1430	pci_msi_get_fwnode_cb = fn;
1431}
1432
1433/**
1434 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
1435 * @bus:      The PCI host bridge bus.
1436 *
1437 * This function uses the callback function registered by
1438 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
1439 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
1440 * This returns NULL on error or when the domain is not found.
1441 */
1442struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
1443{
1444	struct fwnode_handle *fwnode;
1445
1446	if (!pci_msi_get_fwnode_cb)
1447		return NULL;
1448
1449	fwnode = pci_msi_get_fwnode_cb(&bus->dev);
1450	if (!fwnode)
1451		return NULL;
1452
1453	return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
1454}
1455
1456static int __init acpi_pci_init(void)
1457{
 
 
1458	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
1459		pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
1460		pci_no_msi();
1461	}
1462
1463	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
1464		pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
1465		pcie_no_aspm();
1466	}
1467
1468	if (acpi_pci_disabled)
 
1469		return 0;
1470
 
1471	acpi_pci_slot_init();
1472	acpiphp_init();
1473
1474	return 0;
1475}
1476arch_initcall(acpi_pci_init);