Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCI Express PCI Hot Plug Driver
   4 *
   5 * Copyright (C) 1995,2001 Compaq Computer Corporation
   6 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
   7 * Copyright (C) 2001 IBM Corp.
   8 * Copyright (C) 2003-2004 Intel Corporation
   9 *
  10 * All rights reserved.
  11 *
  12 * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
  13 */
  14
  15#define dev_fmt(fmt) "pciehp: " fmt
  16
  17#include <linux/bitfield.h>
  18#include <linux/dmi.h>
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/jiffies.h>
  22#include <linux/kthread.h>
  23#include <linux/pci.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/interrupt.h>
  26#include <linux/slab.h>
  27
  28#include "../pci.h"
  29#include "pciehp.h"
  30
  31static const struct dmi_system_id inband_presence_disabled_dmi_table[] = {
  32	/*
  33	 * Match all Dell systems, as some Dell systems have inband
  34	 * presence disabled on NVMe slots (but don't support the bit to
  35	 * report it). Setting inband presence disabled should have no
  36	 * negative effect, except on broken hotplug slots that never
  37	 * assert presence detect--and those will still work, they will
  38	 * just have a bit of extra delay before being probed.
  39	 */
  40	{
  41		.ident = "Dell System",
  42		.matches = {
  43			DMI_MATCH(DMI_OEM_STRING, "Dell System"),
  44		},
  45	},
  46	{}
  47};
  48
  49static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
  50{
  51	return ctrl->pcie->port;
  52}
  53
  54static irqreturn_t pciehp_isr(int irq, void *dev_id);
  55static irqreturn_t pciehp_ist(int irq, void *dev_id);
  56static int pciehp_poll(void *data);
  57
  58static inline int pciehp_request_irq(struct controller *ctrl)
  59{
  60	int retval, irq = ctrl->pcie->irq;
  61
  62	if (pciehp_poll_mode) {
  63		ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
  64						"pciehp_poll-%s",
  65						slot_name(ctrl));
  66		return PTR_ERR_OR_ZERO(ctrl->poll_thread);
  67	}
  68
  69	/* Installs the interrupt handler */
  70	retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
  71				      IRQF_SHARED, "pciehp", ctrl);
  72	if (retval)
  73		ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
  74			 irq);
  75	return retval;
  76}
  77
  78static inline void pciehp_free_irq(struct controller *ctrl)
  79{
  80	if (pciehp_poll_mode)
  81		kthread_stop(ctrl->poll_thread);
  82	else
  83		free_irq(ctrl->pcie->irq, ctrl);
  84}
  85
  86static int pcie_poll_cmd(struct controller *ctrl, int timeout)
  87{
  88	struct pci_dev *pdev = ctrl_dev(ctrl);
  89	u16 slot_status;
  90
  91	do {
  92		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  93		if (PCI_POSSIBLE_ERROR(slot_status)) {
  94			ctrl_info(ctrl, "%s: no response from device\n",
  95				  __func__);
  96			return 0;
  97		}
  98
  99		if (slot_status & PCI_EXP_SLTSTA_CC) {
 100			pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 101						   PCI_EXP_SLTSTA_CC);
 102			ctrl->cmd_busy = 0;
 103			smp_mb();
 104			return 1;
 105		}
 106		msleep(10);
 107		timeout -= 10;
 108	} while (timeout >= 0);
 109	return 0;	/* timeout */
 110}
 111
 112static void pcie_wait_cmd(struct controller *ctrl)
 113{
 114	unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
 115	unsigned long duration = msecs_to_jiffies(msecs);
 116	unsigned long cmd_timeout = ctrl->cmd_started + duration;
 117	unsigned long now, timeout;
 118	int rc;
 119
 120	/*
 121	 * If the controller does not generate notifications for command
 122	 * completions, we never need to wait between writes.
 123	 */
 124	if (NO_CMD_CMPL(ctrl))
 125		return;
 126
 127	if (!ctrl->cmd_busy)
 128		return;
 129
 130	/*
 131	 * Even if the command has already timed out, we want to call
 132	 * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
 133	 */
 134	now = jiffies;
 135	if (time_before_eq(cmd_timeout, now))
 136		timeout = 1;
 137	else
 138		timeout = cmd_timeout - now;
 139
 140	if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
 141	    ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
 142		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
 143	else
 144		rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
 145
 146	if (!rc)
 147		ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
 148			  ctrl->slot_ctrl,
 149			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
 150}
 151
 152#define CC_ERRATUM_MASK		(PCI_EXP_SLTCTL_PCC |	\
 153				 PCI_EXP_SLTCTL_PIC |	\
 154				 PCI_EXP_SLTCTL_AIC |	\
 155				 PCI_EXP_SLTCTL_EIC)
 156
 157static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
 158			      u16 mask, bool wait)
 159{
 160	struct pci_dev *pdev = ctrl_dev(ctrl);
 161	u16 slot_ctrl_orig, slot_ctrl;
 162
 163	mutex_lock(&ctrl->ctrl_lock);
 164
 165	/*
 166	 * Always wait for any previous command that might still be in progress
 167	 */
 168	pcie_wait_cmd(ctrl);
 169
 170	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 171	if (PCI_POSSIBLE_ERROR(slot_ctrl)) {
 172		ctrl_info(ctrl, "%s: no response from device\n", __func__);
 173		goto out;
 174	}
 175
 176	slot_ctrl_orig = slot_ctrl;
 177	slot_ctrl &= ~mask;
 178	slot_ctrl |= (cmd & mask);
 179	ctrl->cmd_busy = 1;
 180	smp_mb();
 181	ctrl->slot_ctrl = slot_ctrl;
 182	pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
 183	ctrl->cmd_started = jiffies;
 184
 185	/*
 186	 * Controllers with the Intel CF118 and similar errata advertise
 187	 * Command Completed support, but they only set Command Completed
 188	 * if we change the "Control" bits for power, power indicator,
 189	 * attention indicator, or interlock.  If we only change the
 190	 * "Enable" bits, they never set the Command Completed bit.
 191	 */
 192	if (pdev->broken_cmd_compl &&
 193	    (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
 194		ctrl->cmd_busy = 0;
 195
 196	/*
 197	 * Optionally wait for the hardware to be ready for a new command,
 198	 * indicating completion of the above issued command.
 199	 */
 200	if (wait)
 201		pcie_wait_cmd(ctrl);
 202
 203out:
 204	mutex_unlock(&ctrl->ctrl_lock);
 205}
 206
 207/**
 208 * pcie_write_cmd - Issue controller command
 209 * @ctrl: controller to which the command is issued
 210 * @cmd:  command value written to slot control register
 211 * @mask: bitmask of slot control register to be modified
 212 */
 213static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
 214{
 215	pcie_do_write_cmd(ctrl, cmd, mask, true);
 216}
 217
 218/* Same as above without waiting for the hardware to latch */
 219static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
 220{
 221	pcie_do_write_cmd(ctrl, cmd, mask, false);
 222}
 223
 224/**
 225 * pciehp_check_link_active() - Is the link active
 226 * @ctrl: PCIe hotplug controller
 227 *
 228 * Check whether the downstream link is currently active. Note it is
 229 * possible that the card is removed immediately after this so the
 230 * caller may need to take it into account.
 231 *
 232 * If the hotplug controller itself is not available anymore returns
 233 * %-ENODEV.
 234 */
 235int pciehp_check_link_active(struct controller *ctrl)
 236{
 237	struct pci_dev *pdev = ctrl_dev(ctrl);
 238	u16 lnk_status;
 239	int ret;
 240
 241	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
 242	if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
 243		return -ENODEV;
 244
 245	ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
 246	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 247
 248	return ret;
 249}
 250
 251static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
 252{
 253	u32 l;
 254	int count = 0;
 255	int delay = 1000, step = 20;
 256	bool found = false;
 257
 258	do {
 259		found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
 260		count++;
 261
 262		if (found)
 263			break;
 264
 265		msleep(step);
 266		delay -= step;
 267	} while (delay > 0);
 268
 269	if (count > 1)
 270		pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
 271			pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
 272			PCI_FUNC(devfn), count, step, l);
 273
 274	return found;
 275}
 276
 277static void pcie_wait_for_presence(struct pci_dev *pdev)
 278{
 279	int timeout = 1250;
 280	u16 slot_status;
 281
 282	do {
 283		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 284		if (slot_status & PCI_EXP_SLTSTA_PDS)
 285			return;
 286		msleep(10);
 287		timeout -= 10;
 288	} while (timeout > 0);
 289}
 290
 291int pciehp_check_link_status(struct controller *ctrl)
 292{
 293	struct pci_dev *pdev = ctrl_dev(ctrl);
 294	bool found;
 295	u16 lnk_status;
 296
 297	if (!pcie_wait_for_link(pdev, true)) {
 298		ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
 299		return -1;
 300	}
 301
 302	if (ctrl->inband_presence_disabled)
 303		pcie_wait_for_presence(pdev);
 304
 305	found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
 306					PCI_DEVFN(0, 0));
 307
 308	/* ignore link or presence changes up to this point */
 309	if (found)
 310		atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
 311			   &ctrl->pending_events);
 312
 313	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
 314	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 315	if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
 316	    !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
 317		ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n",
 318			  slot_name(ctrl), lnk_status);
 319		return -1;
 320	}
 321
 322	__pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
 323
 324	if (!found) {
 325		ctrl_info(ctrl, "Slot(%s): No device found\n",
 326			  slot_name(ctrl));
 327		return -1;
 328	}
 329
 330	return 0;
 331}
 332
 333static int __pciehp_link_set(struct controller *ctrl, bool enable)
 334{
 335	struct pci_dev *pdev = ctrl_dev(ctrl);
 336
 337	pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
 338					   PCI_EXP_LNKCTL_LD,
 339					   enable ? 0 : PCI_EXP_LNKCTL_LD);
 340
 341	return 0;
 342}
 343
 344static int pciehp_link_enable(struct controller *ctrl)
 345{
 346	return __pciehp_link_set(ctrl, true);
 347}
 348
 349int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
 350				    u8 *status)
 351{
 352	struct controller *ctrl = to_ctrl(hotplug_slot);
 353	struct pci_dev *pdev = ctrl_dev(ctrl);
 354	u16 slot_ctrl;
 355
 356	pci_config_pm_runtime_get(pdev);
 357	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 358	pci_config_pm_runtime_put(pdev);
 359	*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
 360	return 0;
 361}
 362
 363int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
 364{
 365	struct controller *ctrl = to_ctrl(hotplug_slot);
 366	struct pci_dev *pdev = ctrl_dev(ctrl);
 367	u16 slot_ctrl;
 368
 369	pci_config_pm_runtime_get(pdev);
 370	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 371	pci_config_pm_runtime_put(pdev);
 372	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
 373		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 374
 375	switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
 376	case PCI_EXP_SLTCTL_ATTN_IND_ON:
 377		*status = 1;	/* On */
 378		break;
 379	case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
 380		*status = 2;	/* Blink */
 381		break;
 382	case PCI_EXP_SLTCTL_ATTN_IND_OFF:
 383		*status = 0;	/* Off */
 384		break;
 385	default:
 386		*status = 0xFF;
 387		break;
 388	}
 389
 390	return 0;
 391}
 392
 393void pciehp_get_power_status(struct controller *ctrl, u8 *status)
 394{
 395	struct pci_dev *pdev = ctrl_dev(ctrl);
 396	u16 slot_ctrl;
 397
 398	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 399	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
 400		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 401
 402	switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
 403	case PCI_EXP_SLTCTL_PWR_ON:
 404		*status = 1;	/* On */
 405		break;
 406	case PCI_EXP_SLTCTL_PWR_OFF:
 407		*status = 0;	/* Off */
 408		break;
 409	default:
 410		*status = 0xFF;
 411		break;
 412	}
 413}
 414
 415void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
 416{
 417	struct pci_dev *pdev = ctrl_dev(ctrl);
 418	u16 slot_status;
 419
 420	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 421	*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
 422}
 423
 424/**
 425 * pciehp_card_present() - Is the card present
 426 * @ctrl: PCIe hotplug controller
 427 *
 428 * Function checks whether the card is currently present in the slot and
 429 * in that case returns true. Note it is possible that the card is
 430 * removed immediately after the check so the caller may need to take
 431 * this into account.
 432 *
 433 * It the hotplug controller itself is not available anymore returns
 434 * %-ENODEV.
 435 */
 436int pciehp_card_present(struct controller *ctrl)
 437{
 438	struct pci_dev *pdev = ctrl_dev(ctrl);
 439	u16 slot_status;
 440	int ret;
 441
 442	ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 443	if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(slot_status))
 444		return -ENODEV;
 445
 446	return !!(slot_status & PCI_EXP_SLTSTA_PDS);
 447}
 448
 449/**
 450 * pciehp_card_present_or_link_active() - whether given slot is occupied
 451 * @ctrl: PCIe hotplug controller
 452 *
 453 * Unlike pciehp_card_present(), which determines presence solely from the
 454 * Presence Detect State bit, this helper also returns true if the Link Active
 455 * bit is set.  This is a concession to broken hotplug ports which hardwire
 456 * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
 457 *
 458 * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
 459 *	    port is not present anymore returns %-ENODEV.
 460 */
 461int pciehp_card_present_or_link_active(struct controller *ctrl)
 462{
 463	int ret;
 464
 465	ret = pciehp_card_present(ctrl);
 466	if (ret)
 467		return ret;
 468
 469	return pciehp_check_link_active(ctrl);
 470}
 471
 472int pciehp_query_power_fault(struct controller *ctrl)
 473{
 474	struct pci_dev *pdev = ctrl_dev(ctrl);
 475	u16 slot_status;
 476
 477	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 478	return !!(slot_status & PCI_EXP_SLTSTA_PFD);
 479}
 480
 481int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
 482				    u8 status)
 483{
 484	struct controller *ctrl = to_ctrl(hotplug_slot);
 485	struct pci_dev *pdev = ctrl_dev(ctrl);
 486
 487	pci_config_pm_runtime_get(pdev);
 488
 489	/* Attention and Power Indicator Control bits are supported */
 490	pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC, status),
 491			      PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
 492	pci_config_pm_runtime_put(pdev);
 493	return 0;
 494}
 495
 496/**
 497 * pciehp_set_indicators() - set attention indicator, power indicator, or both
 498 * @ctrl: PCIe hotplug controller
 499 * @pwr: one of:
 500 *	PCI_EXP_SLTCTL_PWR_IND_ON
 501 *	PCI_EXP_SLTCTL_PWR_IND_BLINK
 502 *	PCI_EXP_SLTCTL_PWR_IND_OFF
 503 * @attn: one of:
 504 *	PCI_EXP_SLTCTL_ATTN_IND_ON
 505 *	PCI_EXP_SLTCTL_ATTN_IND_BLINK
 506 *	PCI_EXP_SLTCTL_ATTN_IND_OFF
 507 *
 508 * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator
 509 * unchanged.
 510 */
 511void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
 512{
 513	u16 cmd = 0, mask = 0;
 514
 515	if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
 516		cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
 517		mask |= PCI_EXP_SLTCTL_PIC;
 518	}
 519
 520	if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
 521		cmd |= (attn & PCI_EXP_SLTCTL_AIC);
 522		mask |= PCI_EXP_SLTCTL_AIC;
 523	}
 524
 525	if (cmd) {
 526		pcie_write_cmd_nowait(ctrl, cmd, mask);
 527		ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 528			 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 529	}
 530}
 531
 532int pciehp_power_on_slot(struct controller *ctrl)
 533{
 534	struct pci_dev *pdev = ctrl_dev(ctrl);
 535	u16 slot_status;
 536	int retval;
 537
 538	/* Clear power-fault bit from previous power failures */
 539	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 540	if (slot_status & PCI_EXP_SLTSTA_PFD)
 541		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 542					   PCI_EXP_SLTSTA_PFD);
 543	ctrl->power_fault_detected = 0;
 544
 545	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
 546	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 547		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
 548		 PCI_EXP_SLTCTL_PWR_ON);
 549
 550	retval = pciehp_link_enable(ctrl);
 551	if (retval)
 552		ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
 553
 554	return retval;
 555}
 556
 557void pciehp_power_off_slot(struct controller *ctrl)
 558{
 559	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
 560	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 561		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
 562		 PCI_EXP_SLTCTL_PWR_OFF);
 563}
 564
 565static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
 566					  struct pci_dev *pdev, int irq)
 567{
 568	/*
 569	 * Ignore link changes which occurred while waiting for DPC recovery.
 570	 * Could be several if DPC triggered multiple times consecutively.
 571	 */
 572	synchronize_hardirq(irq);
 573	atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
 574	if (pciehp_poll_mode)
 575		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 576					   PCI_EXP_SLTSTA_DLLSC);
 577	ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
 578		  slot_name(ctrl));
 579
 580	/*
 581	 * If the link is unexpectedly down after successful recovery,
 582	 * the corresponding link change may have been ignored above.
 583	 * Synthesize it to ensure that it is acted on.
 584	 */
 585	down_read_nested(&ctrl->reset_lock, ctrl->depth);
 586	if (!pciehp_check_link_active(ctrl))
 587		pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
 588	up_read(&ctrl->reset_lock);
 589}
 590
 591static irqreturn_t pciehp_isr(int irq, void *dev_id)
 592{
 593	struct controller *ctrl = (struct controller *)dev_id;
 594	struct pci_dev *pdev = ctrl_dev(ctrl);
 595	struct device *parent = pdev->dev.parent;
 596	u16 status, events = 0;
 597
 598	/*
 599	 * Interrupts only occur in D3hot or shallower and only if enabled
 600	 * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
 601	 */
 602	if (pdev->current_state == PCI_D3cold ||
 603	    (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
 604		return IRQ_NONE;
 605
 606	/*
 607	 * Keep the port accessible by holding a runtime PM ref on its parent.
 608	 * Defer resume of the parent to the IRQ thread if it's suspended.
 609	 * Mask the interrupt until then.
 610	 */
 611	if (parent) {
 612		pm_runtime_get_noresume(parent);
 613		if (!pm_runtime_active(parent)) {
 614			pm_runtime_put(parent);
 615			disable_irq_nosync(irq);
 616			atomic_or(RERUN_ISR, &ctrl->pending_events);
 617			return IRQ_WAKE_THREAD;
 618		}
 619	}
 620
 621read_status:
 622	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
 623	if (PCI_POSSIBLE_ERROR(status)) {
 624		ctrl_info(ctrl, "%s: no response from device\n", __func__);
 625		if (parent)
 626			pm_runtime_put(parent);
 627		return IRQ_NONE;
 628	}
 629
 630	/*
 631	 * Slot Status contains plain status bits as well as event
 632	 * notification bits; right now we only want the event bits.
 633	 */
 634	status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
 635		  PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
 636		  PCI_EXP_SLTSTA_DLLSC;
 637
 638	/*
 639	 * If we've already reported a power fault, don't report it again
 640	 * until we've done something to handle it.
 641	 */
 642	if (ctrl->power_fault_detected)
 643		status &= ~PCI_EXP_SLTSTA_PFD;
 644	else if (status & PCI_EXP_SLTSTA_PFD)
 645		ctrl->power_fault_detected = true;
 646
 647	events |= status;
 648	if (!events) {
 649		if (parent)
 650			pm_runtime_put(parent);
 651		return IRQ_NONE;
 652	}
 653
 654	if (status) {
 655		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
 656
 657		/*
 658		 * In MSI mode, all event bits must be zero before the port
 659		 * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
 660		 * So re-read the Slot Status register in case a bit was set
 661		 * between read and write.
 662		 */
 663		if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
 664			goto read_status;
 665	}
 666
 667	ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
 668	if (parent)
 669		pm_runtime_put(parent);
 670
 671	/*
 672	 * Command Completed notifications are not deferred to the
 673	 * IRQ thread because it may be waiting for their arrival.
 674	 */
 675	if (events & PCI_EXP_SLTSTA_CC) {
 676		ctrl->cmd_busy = 0;
 677		smp_mb();
 678		wake_up(&ctrl->queue);
 679
 680		if (events == PCI_EXP_SLTSTA_CC)
 681			return IRQ_HANDLED;
 682
 683		events &= ~PCI_EXP_SLTSTA_CC;
 684	}
 685
 686	if (pdev->ignore_hotplug) {
 687		ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
 688		return IRQ_HANDLED;
 689	}
 690
 691	/* Save pending events for consumption by IRQ thread. */
 692	atomic_or(events, &ctrl->pending_events);
 693	return IRQ_WAKE_THREAD;
 694}
 695
 696static irqreturn_t pciehp_ist(int irq, void *dev_id)
 697{
 698	struct controller *ctrl = (struct controller *)dev_id;
 699	struct pci_dev *pdev = ctrl_dev(ctrl);
 700	irqreturn_t ret;
 701	u32 events;
 702
 703	ctrl->ist_running = true;
 704	pci_config_pm_runtime_get(pdev);
 705
 706	/* rerun pciehp_isr() if the port was inaccessible on interrupt */
 707	if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
 708		ret = pciehp_isr(irq, dev_id);
 709		enable_irq(irq);
 710		if (ret != IRQ_WAKE_THREAD)
 711			goto out;
 712	}
 713
 714	synchronize_hardirq(irq);
 715	events = atomic_xchg(&ctrl->pending_events, 0);
 716	if (!events) {
 717		ret = IRQ_NONE;
 718		goto out;
 719	}
 720
 721	/* Check Attention Button Pressed */
 722	if (events & PCI_EXP_SLTSTA_ABP)
 723		pciehp_handle_button_press(ctrl);
 724
 725	/* Check Power Fault Detected */
 726	if (events & PCI_EXP_SLTSTA_PFD) {
 727		ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
 728		pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
 729				      PCI_EXP_SLTCTL_ATTN_IND_ON);
 730	}
 731
 732	/*
 733	 * Ignore Link Down/Up events caused by Downstream Port Containment
 734	 * if recovery from the error succeeded.
 735	 */
 736	if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
 737	    ctrl->state == ON_STATE) {
 738		events &= ~PCI_EXP_SLTSTA_DLLSC;
 739		pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
 740	}
 741
 742	/*
 743	 * Disable requests have higher priority than Presence Detect Changed
 744	 * or Data Link Layer State Changed events.
 745	 */
 746	down_read_nested(&ctrl->reset_lock, ctrl->depth);
 747	if (events & DISABLE_SLOT)
 748		pciehp_handle_disable_request(ctrl);
 749	else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
 750		pciehp_handle_presence_or_link_change(ctrl, events);
 751	up_read(&ctrl->reset_lock);
 752
 753	ret = IRQ_HANDLED;
 754out:
 755	pci_config_pm_runtime_put(pdev);
 756	ctrl->ist_running = false;
 757	wake_up(&ctrl->requester);
 758	return ret;
 759}
 760
 761static int pciehp_poll(void *data)
 762{
 763	struct controller *ctrl = data;
 764
 765	schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
 766
 767	while (!kthread_should_stop()) {
 768		/* poll for interrupt events or user requests */
 769		while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
 770		       atomic_read(&ctrl->pending_events))
 771			pciehp_ist(IRQ_NOTCONNECTED, ctrl);
 772
 773		if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
 774			pciehp_poll_time = 2; /* clamp to sane value */
 775
 776		schedule_timeout_idle(pciehp_poll_time * HZ);
 777	}
 778
 779	return 0;
 780}
 781
 782static void pcie_enable_notification(struct controller *ctrl)
 783{
 784	u16 cmd, mask;
 785
 786	/*
 787	 * TBD: Power fault detected software notification support.
 788	 *
 789	 * Power fault detected software notification is not enabled
 790	 * now, because it caused power fault detected interrupt storm
 791	 * on some machines. On those machines, power fault detected
 792	 * bit in the slot status register was set again immediately
 793	 * when it is cleared in the interrupt service routine, and
 794	 * next power fault detected interrupt was notified again.
 795	 */
 796
 797	/*
 798	 * Always enable link events: thus link-up and link-down shall
 799	 * always be treated as hotplug and unplug respectively. Enable
 800	 * presence detect only if Attention Button is not present.
 801	 */
 802	cmd = PCI_EXP_SLTCTL_DLLSCE;
 803	if (ATTN_BUTTN(ctrl))
 804		cmd |= PCI_EXP_SLTCTL_ABPE;
 805	else
 806		cmd |= PCI_EXP_SLTCTL_PDCE;
 807	if (!pciehp_poll_mode)
 808		cmd |= PCI_EXP_SLTCTL_HPIE;
 809	if (!pciehp_poll_mode && !NO_CMD_CMPL(ctrl))
 810		cmd |= PCI_EXP_SLTCTL_CCIE;
 811
 812	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
 813		PCI_EXP_SLTCTL_PFDE |
 814		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 815		PCI_EXP_SLTCTL_DLLSCE);
 816
 817	pcie_write_cmd_nowait(ctrl, cmd, mask);
 818	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 819		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 820}
 821
 822static void pcie_disable_notification(struct controller *ctrl)
 823{
 824	u16 mask;
 825
 826	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
 827		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
 828		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 829		PCI_EXP_SLTCTL_DLLSCE);
 830	pcie_write_cmd(ctrl, 0, mask);
 831	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 832		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 833}
 834
 835void pcie_clear_hotplug_events(struct controller *ctrl)
 836{
 837	pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
 838				   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
 839}
 840
 841void pcie_enable_interrupt(struct controller *ctrl)
 842{
 843	u16 mask;
 844
 845	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
 846	pcie_write_cmd(ctrl, mask, mask);
 847}
 848
 849void pcie_disable_interrupt(struct controller *ctrl)
 850{
 851	u16 mask;
 852
 853	/*
 854	 * Mask hot-plug interrupt to prevent it triggering immediately
 855	 * when the link goes inactive (we still get PME when any of the
 856	 * enabled events is detected). Same goes with Link Layer State
 857	 * changed event which generates PME immediately when the link goes
 858	 * inactive so mask it as well.
 859	 */
 860	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
 861	pcie_write_cmd(ctrl, 0, mask);
 862}
 863
 864/**
 865 * pciehp_slot_reset() - ignore link event caused by error-induced hot reset
 866 * @dev: PCI Express port service device
 867 *
 868 * Called from pcie_portdrv_slot_reset() after AER or DPC initiated a reset
 869 * further up in the hierarchy to recover from an error.  The reset was
 870 * propagated down to this hotplug port.  Ignore the resulting link flap.
 871 * If the link failed to retrain successfully, synthesize the ignored event.
 872 * Surprise removal during reset is detected through Presence Detect Changed.
 873 */
 874int pciehp_slot_reset(struct pcie_device *dev)
 875{
 876	struct controller *ctrl = get_service_data(dev);
 877
 878	if (ctrl->state != ON_STATE)
 879		return 0;
 880
 881	pcie_capability_write_word(dev->port, PCI_EXP_SLTSTA,
 882				   PCI_EXP_SLTSTA_DLLSC);
 883
 884	if (!pciehp_check_link_active(ctrl))
 885		pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
 886
 887	return 0;
 888}
 889
 890/*
 891 * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
 892 * bus reset of the bridge, but at the same time we want to ensure that it is
 893 * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
 894 * disable link state notification and presence detection change notification
 895 * momentarily, if we see that they could interfere. Also, clear any spurious
 896 * events after.
 897 */
 898int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
 899{
 900	struct controller *ctrl = to_ctrl(hotplug_slot);
 901	struct pci_dev *pdev = ctrl_dev(ctrl);
 902	u16 stat_mask = 0, ctrl_mask = 0;
 903	int rc;
 904
 905	if (probe)
 906		return 0;
 907
 908	down_write_nested(&ctrl->reset_lock, ctrl->depth);
 909
 910	if (!ATTN_BUTTN(ctrl)) {
 911		ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
 912		stat_mask |= PCI_EXP_SLTSTA_PDC;
 913	}
 914	ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
 915	stat_mask |= PCI_EXP_SLTSTA_DLLSC;
 916
 917	pcie_write_cmd(ctrl, 0, ctrl_mask);
 918	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 919		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 920
 921	rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
 922
 923	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
 924	pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
 925	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 926		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
 927
 928	up_write(&ctrl->reset_lock);
 929	return rc;
 930}
 931
 932int pcie_init_notification(struct controller *ctrl)
 933{
 934	if (pciehp_request_irq(ctrl))
 935		return -1;
 936	pcie_enable_notification(ctrl);
 937	ctrl->notification_enabled = 1;
 938	return 0;
 939}
 940
 941void pcie_shutdown_notification(struct controller *ctrl)
 942{
 943	if (ctrl->notification_enabled) {
 944		pcie_disable_notification(ctrl);
 945		pciehp_free_irq(ctrl);
 946		ctrl->notification_enabled = 0;
 947	}
 948}
 949
 950static inline void dbg_ctrl(struct controller *ctrl)
 951{
 952	struct pci_dev *pdev = ctrl->pcie->port;
 953	u16 reg16;
 954
 955	ctrl_dbg(ctrl, "Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
 956	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
 957	ctrl_dbg(ctrl, "Slot Status            : 0x%04x\n", reg16);
 958	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
 959	ctrl_dbg(ctrl, "Slot Control           : 0x%04x\n", reg16);
 960}
 961
 962#define FLAG(x, y)	(((x) & (y)) ? '+' : '-')
 963
 964static inline int pcie_hotplug_depth(struct pci_dev *dev)
 965{
 966	struct pci_bus *bus = dev->bus;
 967	int depth = 0;
 968
 969	while (bus->parent) {
 970		bus = bus->parent;
 971		if (bus->self && bus->self->is_hotplug_bridge)
 972			depth++;
 973	}
 974
 975	return depth;
 976}
 977
 978struct controller *pcie_init(struct pcie_device *dev)
 979{
 980	struct controller *ctrl;
 981	u32 slot_cap, slot_cap2;
 982	u8 poweron;
 983	struct pci_dev *pdev = dev->port;
 984	struct pci_bus *subordinate = pdev->subordinate;
 985
 986	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
 987	if (!ctrl)
 988		return NULL;
 989
 990	ctrl->pcie = dev;
 991	ctrl->depth = pcie_hotplug_depth(dev->port);
 992	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
 993
 994	if (pdev->hotplug_user_indicators)
 995		slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
 996
 997	/*
 998	 * We assume no Thunderbolt controllers support Command Complete events,
 999	 * but some controllers falsely claim they do.
1000	 */
1001	if (pdev->is_thunderbolt)
1002		slot_cap |= PCI_EXP_SLTCAP_NCCS;
1003
1004	ctrl->slot_cap = slot_cap;
1005	mutex_init(&ctrl->ctrl_lock);
1006	mutex_init(&ctrl->state_lock);
1007	init_rwsem(&ctrl->reset_lock);
1008	init_waitqueue_head(&ctrl->requester);
1009	init_waitqueue_head(&ctrl->queue);
1010	INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
1011	dbg_ctrl(ctrl);
1012
1013	down_read(&pci_bus_sem);
1014	ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
1015	up_read(&pci_bus_sem);
1016
1017	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2);
1018	if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) {
1019		pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE,
1020				      PCI_EXP_SLTCTL_IBPD_DISABLE);
1021		ctrl->inband_presence_disabled = 1;
1022	}
1023
1024	if (dmi_first_match(inband_presence_disabled_dmi_table))
1025		ctrl->inband_presence_disabled = 1;
1026
1027	/* Clear all remaining event bits in Slot Status register. */
1028	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
1029		PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
1030		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
1031		PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
1032
1033	ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n",
1034		FIELD_GET(PCI_EXP_SLTCAP_PSN, slot_cap),
1035		FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
1036		FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
1037		FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
1038		FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
1039		FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
1040		FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
1041		FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
1042		FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
1043		FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
1044		FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD),
1045		FLAG(pdev->link_active_reporting, true),
1046		pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
1047
1048	/*
1049	 * If empty slot's power status is on, turn power off.  The IRQ isn't
1050	 * requested yet, so avoid triggering a notification with this command.
1051	 */
1052	if (POWER_CTRL(ctrl)) {
1053		pciehp_get_power_status(ctrl, &poweron);
1054		if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
1055			pcie_disable_notification(ctrl);
1056			pciehp_power_off_slot(ctrl);
1057		}
1058	}
1059
1060	pdev = pci_get_slot(subordinate, PCI_DEVFN(0, 0));
1061	if (pdev)
1062		ctrl->dsn = pci_get_dsn(pdev);
1063	pci_dev_put(pdev);
1064
1065	return ctrl;
1066}
1067
1068void pciehp_release_ctrl(struct controller *ctrl)
1069{
1070	cancel_delayed_work_sync(&ctrl->button_work);
1071	kfree(ctrl);
1072}
1073
1074static void quirk_cmd_compl(struct pci_dev *pdev)
1075{
1076	u32 slot_cap;
1077
1078	if (pci_is_pcie(pdev)) {
1079		pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
1080		if (slot_cap & PCI_EXP_SLTCAP_HPC &&
1081		    !(slot_cap & PCI_EXP_SLTCAP_NCCS))
1082			pdev->broken_cmd_compl = 1;
1083	}
1084}
1085DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1086			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1087DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x010e,
1088			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1089DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
1090			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1091DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
1092			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1093DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
1094			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1095DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
1096			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCI Express PCI Hot Plug Driver
   4 *
   5 * Copyright (C) 1995,2001 Compaq Computer Corporation
   6 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
   7 * Copyright (C) 2001 IBM Corp.
   8 * Copyright (C) 2003-2004 Intel Corporation
   9 *
  10 * All rights reserved.
  11 *
  12 * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
  13 */
  14
  15#define dev_fmt(fmt) "pciehp: " fmt
  16
  17#include <linux/bitfield.h>
  18#include <linux/dmi.h>
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/jiffies.h>
  22#include <linux/kthread.h>
  23#include <linux/pci.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/interrupt.h>
  26#include <linux/slab.h>
  27
  28#include "../pci.h"
  29#include "pciehp.h"
  30
  31static const struct dmi_system_id inband_presence_disabled_dmi_table[] = {
  32	/*
  33	 * Match all Dell systems, as some Dell systems have inband
  34	 * presence disabled on NVMe slots (but don't support the bit to
  35	 * report it). Setting inband presence disabled should have no
  36	 * negative effect, except on broken hotplug slots that never
  37	 * assert presence detect--and those will still work, they will
  38	 * just have a bit of extra delay before being probed.
  39	 */
  40	{
  41		.ident = "Dell System",
  42		.matches = {
  43			DMI_MATCH(DMI_OEM_STRING, "Dell System"),
  44		},
  45	},
  46	{}
  47};
  48
  49static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
  50{
  51	return ctrl->pcie->port;
  52}
  53
  54static irqreturn_t pciehp_isr(int irq, void *dev_id);
  55static irqreturn_t pciehp_ist(int irq, void *dev_id);
  56static int pciehp_poll(void *data);
  57
  58static inline int pciehp_request_irq(struct controller *ctrl)
  59{
  60	int retval, irq = ctrl->pcie->irq;
  61
  62	if (pciehp_poll_mode) {
  63		ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
  64						"pciehp_poll-%s",
  65						slot_name(ctrl));
  66		return PTR_ERR_OR_ZERO(ctrl->poll_thread);
  67	}
  68
  69	/* Installs the interrupt handler */
  70	retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
  71				      IRQF_SHARED, "pciehp", ctrl);
  72	if (retval)
  73		ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
  74			 irq);
  75	return retval;
  76}
  77
  78static inline void pciehp_free_irq(struct controller *ctrl)
  79{
  80	if (pciehp_poll_mode)
  81		kthread_stop(ctrl->poll_thread);
  82	else
  83		free_irq(ctrl->pcie->irq, ctrl);
  84}
  85
  86static int pcie_poll_cmd(struct controller *ctrl, int timeout)
  87{
  88	struct pci_dev *pdev = ctrl_dev(ctrl);
  89	u16 slot_status;
  90
  91	do {
  92		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  93		if (PCI_POSSIBLE_ERROR(slot_status)) {
  94			ctrl_info(ctrl, "%s: no response from device\n",
  95				  __func__);
  96			return 0;
  97		}
  98
  99		if (slot_status & PCI_EXP_SLTSTA_CC) {
 100			pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 101						   PCI_EXP_SLTSTA_CC);
 102			ctrl->cmd_busy = 0;
 103			smp_mb();
 104			return 1;
 105		}
 106		msleep(10);
 107		timeout -= 10;
 108	} while (timeout >= 0);
 109	return 0;	/* timeout */
 110}
 111
 112static void pcie_wait_cmd(struct controller *ctrl)
 113{
 114	unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
 115	unsigned long duration = msecs_to_jiffies(msecs);
 116	unsigned long cmd_timeout = ctrl->cmd_started + duration;
 117	unsigned long now, timeout;
 118	int rc;
 119
 120	/*
 121	 * If the controller does not generate notifications for command
 122	 * completions, we never need to wait between writes.
 123	 */
 124	if (NO_CMD_CMPL(ctrl))
 125		return;
 126
 127	if (!ctrl->cmd_busy)
 128		return;
 129
 130	/*
 131	 * Even if the command has already timed out, we want to call
 132	 * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
 133	 */
 134	now = jiffies;
 135	if (time_before_eq(cmd_timeout, now))
 136		timeout = 1;
 137	else
 138		timeout = cmd_timeout - now;
 139
 140	if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
 141	    ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
 142		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
 143	else
 144		rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
 145
 146	if (!rc)
 147		ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
 148			  ctrl->slot_ctrl,
 149			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
 150}
 151
 152#define CC_ERRATUM_MASK		(PCI_EXP_SLTCTL_PCC |	\
 153				 PCI_EXP_SLTCTL_PIC |	\
 154				 PCI_EXP_SLTCTL_AIC |	\
 155				 PCI_EXP_SLTCTL_EIC)
 156
 157static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
 158			      u16 mask, bool wait)
 159{
 160	struct pci_dev *pdev = ctrl_dev(ctrl);
 161	u16 slot_ctrl_orig, slot_ctrl;
 162
 163	mutex_lock(&ctrl->ctrl_lock);
 164
 165	/*
 166	 * Always wait for any previous command that might still be in progress
 167	 */
 168	pcie_wait_cmd(ctrl);
 169
 170	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 171	if (PCI_POSSIBLE_ERROR(slot_ctrl)) {
 172		ctrl_info(ctrl, "%s: no response from device\n", __func__);
 173		goto out;
 174	}
 175
 176	slot_ctrl_orig = slot_ctrl;
 177	slot_ctrl &= ~mask;
 178	slot_ctrl |= (cmd & mask);
 179	ctrl->cmd_busy = 1;
 180	smp_mb();
 181	ctrl->slot_ctrl = slot_ctrl;
 182	pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
 183	ctrl->cmd_started = jiffies;
 184
 185	/*
 186	 * Controllers with the Intel CF118 and similar errata advertise
 187	 * Command Completed support, but they only set Command Completed
 188	 * if we change the "Control" bits for power, power indicator,
 189	 * attention indicator, or interlock.  If we only change the
 190	 * "Enable" bits, they never set the Command Completed bit.
 191	 */
 192	if (pdev->broken_cmd_compl &&
 193	    (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
 194		ctrl->cmd_busy = 0;
 195
 196	/*
 197	 * Optionally wait for the hardware to be ready for a new command,
 198	 * indicating completion of the above issued command.
 199	 */
 200	if (wait)
 201		pcie_wait_cmd(ctrl);
 202
 203out:
 204	mutex_unlock(&ctrl->ctrl_lock);
 205}
 206
 207/**
 208 * pcie_write_cmd - Issue controller command
 209 * @ctrl: controller to which the command is issued
 210 * @cmd:  command value written to slot control register
 211 * @mask: bitmask of slot control register to be modified
 212 */
 213static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
 214{
 215	pcie_do_write_cmd(ctrl, cmd, mask, true);
 216}
 217
 218/* Same as above without waiting for the hardware to latch */
 219static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
 220{
 221	pcie_do_write_cmd(ctrl, cmd, mask, false);
 222}
 223
 224/**
 225 * pciehp_check_link_active() - Is the link active
 226 * @ctrl: PCIe hotplug controller
 227 *
 228 * Check whether the downstream link is currently active. Note it is
 229 * possible that the card is removed immediately after this so the
 230 * caller may need to take it into account.
 231 *
 232 * If the hotplug controller itself is not available anymore returns
 233 * %-ENODEV.
 234 */
 235int pciehp_check_link_active(struct controller *ctrl)
 236{
 237	struct pci_dev *pdev = ctrl_dev(ctrl);
 238	u16 lnk_status;
 239	int ret;
 240
 241	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
 242	if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
 243		return -ENODEV;
 244
 245	ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
 246	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 247
 248	return ret;
 249}
 250
 251static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
 252{
 253	u32 l;
 254	int count = 0;
 255	int delay = 1000, step = 20;
 256	bool found = false;
 257
 258	do {
 259		found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
 260		count++;
 261
 262		if (found)
 263			break;
 264
 265		msleep(step);
 266		delay -= step;
 267	} while (delay > 0);
 268
 269	if (count > 1)
 270		pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
 271			pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
 272			PCI_FUNC(devfn), count, step, l);
 273
 274	return found;
 275}
 276
 277static void pcie_wait_for_presence(struct pci_dev *pdev)
 278{
 279	int timeout = 1250;
 280	u16 slot_status;
 281
 282	do {
 283		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 284		if (slot_status & PCI_EXP_SLTSTA_PDS)
 285			return;
 286		msleep(10);
 287		timeout -= 10;
 288	} while (timeout > 0);
 289}
 290
 291int pciehp_check_link_status(struct controller *ctrl)
 292{
 293	struct pci_dev *pdev = ctrl_dev(ctrl);
 294	bool found;
 295	u16 lnk_status;
 296
 297	if (!pcie_wait_for_link(pdev, true)) {
 298		ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
 299		return -1;
 300	}
 301
 302	if (ctrl->inband_presence_disabled)
 303		pcie_wait_for_presence(pdev);
 304
 305	found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
 306					PCI_DEVFN(0, 0));
 307
 308	/* ignore link or presence changes up to this point */
 309	if (found)
 310		atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
 311			   &ctrl->pending_events);
 312
 313	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
 314	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 315	if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
 316	    !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
 317		ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n",
 318			  slot_name(ctrl), lnk_status);
 319		return -1;
 320	}
 321
 322	pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
 323
 324	if (!found) {
 325		ctrl_info(ctrl, "Slot(%s): No device found\n",
 326			  slot_name(ctrl));
 327		return -1;
 328	}
 329
 330	return 0;
 331}
 332
 333static int __pciehp_link_set(struct controller *ctrl, bool enable)
 334{
 335	struct pci_dev *pdev = ctrl_dev(ctrl);
 336
 337	pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
 338					   PCI_EXP_LNKCTL_LD,
 339					   enable ? 0 : PCI_EXP_LNKCTL_LD);
 340
 341	return 0;
 342}
 343
 344static int pciehp_link_enable(struct controller *ctrl)
 345{
 346	return __pciehp_link_set(ctrl, true);
 347}
 348
 349int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
 350				    u8 *status)
 351{
 352	struct controller *ctrl = to_ctrl(hotplug_slot);
 353	struct pci_dev *pdev = ctrl_dev(ctrl);
 354	u16 slot_ctrl;
 355
 356	pci_config_pm_runtime_get(pdev);
 357	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 358	pci_config_pm_runtime_put(pdev);
 359	*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
 360	return 0;
 361}
 362
 363int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
 364{
 365	struct controller *ctrl = to_ctrl(hotplug_slot);
 366	struct pci_dev *pdev = ctrl_dev(ctrl);
 367	u16 slot_ctrl;
 368
 369	pci_config_pm_runtime_get(pdev);
 370	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 371	pci_config_pm_runtime_put(pdev);
 372	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
 373		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 374
 375	switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
 376	case PCI_EXP_SLTCTL_ATTN_IND_ON:
 377		*status = 1;	/* On */
 378		break;
 379	case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
 380		*status = 2;	/* Blink */
 381		break;
 382	case PCI_EXP_SLTCTL_ATTN_IND_OFF:
 383		*status = 0;	/* Off */
 384		break;
 385	default:
 386		*status = 0xFF;
 387		break;
 388	}
 389
 390	return 0;
 391}
 392
 393void pciehp_get_power_status(struct controller *ctrl, u8 *status)
 394{
 395	struct pci_dev *pdev = ctrl_dev(ctrl);
 396	u16 slot_ctrl;
 397
 398	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 399	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
 400		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 401
 402	switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
 403	case PCI_EXP_SLTCTL_PWR_ON:
 404		*status = 1;	/* On */
 405		break;
 406	case PCI_EXP_SLTCTL_PWR_OFF:
 407		*status = 0;	/* Off */
 408		break;
 409	default:
 410		*status = 0xFF;
 411		break;
 412	}
 413}
 414
 415void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
 416{
 417	struct pci_dev *pdev = ctrl_dev(ctrl);
 418	u16 slot_status;
 419
 420	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 421	*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
 422}
 423
 424/**
 425 * pciehp_card_present() - Is the card present
 426 * @ctrl: PCIe hotplug controller
 427 *
 428 * Function checks whether the card is currently present in the slot and
 429 * in that case returns true. Note it is possible that the card is
 430 * removed immediately after the check so the caller may need to take
 431 * this into account.
 432 *
 433 * It the hotplug controller itself is not available anymore returns
 434 * %-ENODEV.
 435 */
 436int pciehp_card_present(struct controller *ctrl)
 437{
 438	struct pci_dev *pdev = ctrl_dev(ctrl);
 439	u16 slot_status;
 440	int ret;
 441
 442	ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 443	if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(slot_status))
 444		return -ENODEV;
 445
 446	return !!(slot_status & PCI_EXP_SLTSTA_PDS);
 447}
 448
 449/**
 450 * pciehp_card_present_or_link_active() - whether given slot is occupied
 451 * @ctrl: PCIe hotplug controller
 452 *
 453 * Unlike pciehp_card_present(), which determines presence solely from the
 454 * Presence Detect State bit, this helper also returns true if the Link Active
 455 * bit is set.  This is a concession to broken hotplug ports which hardwire
 456 * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
 457 *
 458 * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
 459 *	    port is not present anymore returns %-ENODEV.
 460 */
 461int pciehp_card_present_or_link_active(struct controller *ctrl)
 462{
 463	int ret;
 464
 465	ret = pciehp_card_present(ctrl);
 466	if (ret)
 467		return ret;
 468
 469	return pciehp_check_link_active(ctrl);
 470}
 471
 472int pciehp_query_power_fault(struct controller *ctrl)
 473{
 474	struct pci_dev *pdev = ctrl_dev(ctrl);
 475	u16 slot_status;
 476
 477	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 478	return !!(slot_status & PCI_EXP_SLTSTA_PFD);
 479}
 480
 481int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
 482				    u8 status)
 483{
 484	struct controller *ctrl = to_ctrl(hotplug_slot);
 485	struct pci_dev *pdev = ctrl_dev(ctrl);
 486
 487	pci_config_pm_runtime_get(pdev);
 488	pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC, status),
 
 
 489			      PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
 490	pci_config_pm_runtime_put(pdev);
 491	return 0;
 492}
 493
 494/**
 495 * pciehp_set_indicators() - set attention indicator, power indicator, or both
 496 * @ctrl: PCIe hotplug controller
 497 * @pwr: one of:
 498 *	PCI_EXP_SLTCTL_PWR_IND_ON
 499 *	PCI_EXP_SLTCTL_PWR_IND_BLINK
 500 *	PCI_EXP_SLTCTL_PWR_IND_OFF
 501 * @attn: one of:
 502 *	PCI_EXP_SLTCTL_ATTN_IND_ON
 503 *	PCI_EXP_SLTCTL_ATTN_IND_BLINK
 504 *	PCI_EXP_SLTCTL_ATTN_IND_OFF
 505 *
 506 * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator
 507 * unchanged.
 508 */
 509void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
 510{
 511	u16 cmd = 0, mask = 0;
 512
 513	if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
 514		cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
 515		mask |= PCI_EXP_SLTCTL_PIC;
 516	}
 517
 518	if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
 519		cmd |= (attn & PCI_EXP_SLTCTL_AIC);
 520		mask |= PCI_EXP_SLTCTL_AIC;
 521	}
 522
 523	if (cmd) {
 524		pcie_write_cmd_nowait(ctrl, cmd, mask);
 525		ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 526			 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 527	}
 528}
 529
 530int pciehp_power_on_slot(struct controller *ctrl)
 531{
 532	struct pci_dev *pdev = ctrl_dev(ctrl);
 533	u16 slot_status;
 534	int retval;
 535
 536	/* Clear power-fault bit from previous power failures */
 537	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 538	if (slot_status & PCI_EXP_SLTSTA_PFD)
 539		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 540					   PCI_EXP_SLTSTA_PFD);
 541	ctrl->power_fault_detected = 0;
 542
 543	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
 544	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 545		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
 546		 PCI_EXP_SLTCTL_PWR_ON);
 547
 548	retval = pciehp_link_enable(ctrl);
 549	if (retval)
 550		ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
 551
 552	return retval;
 553}
 554
 555void pciehp_power_off_slot(struct controller *ctrl)
 556{
 557	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
 558	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 559		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
 560		 PCI_EXP_SLTCTL_PWR_OFF);
 561}
 562
 563static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
 564					  struct pci_dev *pdev, int irq)
 565{
 566	/*
 567	 * Ignore link changes which occurred while waiting for DPC recovery.
 568	 * Could be several if DPC triggered multiple times consecutively.
 569	 */
 570	synchronize_hardirq(irq);
 571	atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
 572	if (pciehp_poll_mode)
 573		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 574					   PCI_EXP_SLTSTA_DLLSC);
 575	ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
 576		  slot_name(ctrl));
 577
 578	/*
 579	 * If the link is unexpectedly down after successful recovery,
 580	 * the corresponding link change may have been ignored above.
 581	 * Synthesize it to ensure that it is acted on.
 582	 */
 583	down_read_nested(&ctrl->reset_lock, ctrl->depth);
 584	if (!pciehp_check_link_active(ctrl))
 585		pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
 586	up_read(&ctrl->reset_lock);
 587}
 588
 589static irqreturn_t pciehp_isr(int irq, void *dev_id)
 590{
 591	struct controller *ctrl = (struct controller *)dev_id;
 592	struct pci_dev *pdev = ctrl_dev(ctrl);
 593	struct device *parent = pdev->dev.parent;
 594	u16 status, events = 0;
 595
 596	/*
 597	 * Interrupts only occur in D3hot or shallower and only if enabled
 598	 * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
 599	 */
 600	if (pdev->current_state == PCI_D3cold ||
 601	    (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
 602		return IRQ_NONE;
 603
 604	/*
 605	 * Keep the port accessible by holding a runtime PM ref on its parent.
 606	 * Defer resume of the parent to the IRQ thread if it's suspended.
 607	 * Mask the interrupt until then.
 608	 */
 609	if (parent) {
 610		pm_runtime_get_noresume(parent);
 611		if (!pm_runtime_active(parent)) {
 612			pm_runtime_put(parent);
 613			disable_irq_nosync(irq);
 614			atomic_or(RERUN_ISR, &ctrl->pending_events);
 615			return IRQ_WAKE_THREAD;
 616		}
 617	}
 618
 619read_status:
 620	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
 621	if (PCI_POSSIBLE_ERROR(status)) {
 622		ctrl_info(ctrl, "%s: no response from device\n", __func__);
 623		if (parent)
 624			pm_runtime_put(parent);
 625		return IRQ_NONE;
 626	}
 627
 628	/*
 629	 * Slot Status contains plain status bits as well as event
 630	 * notification bits; right now we only want the event bits.
 631	 */
 632	status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
 633		  PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
 634		  PCI_EXP_SLTSTA_DLLSC;
 635
 636	/*
 637	 * If we've already reported a power fault, don't report it again
 638	 * until we've done something to handle it.
 639	 */
 640	if (ctrl->power_fault_detected)
 641		status &= ~PCI_EXP_SLTSTA_PFD;
 642	else if (status & PCI_EXP_SLTSTA_PFD)
 643		ctrl->power_fault_detected = true;
 644
 645	events |= status;
 646	if (!events) {
 647		if (parent)
 648			pm_runtime_put(parent);
 649		return IRQ_NONE;
 650	}
 651
 652	if (status) {
 653		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
 654
 655		/*
 656		 * In MSI mode, all event bits must be zero before the port
 657		 * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
 658		 * So re-read the Slot Status register in case a bit was set
 659		 * between read and write.
 660		 */
 661		if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
 662			goto read_status;
 663	}
 664
 665	ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
 666	if (parent)
 667		pm_runtime_put(parent);
 668
 669	/*
 670	 * Command Completed notifications are not deferred to the
 671	 * IRQ thread because it may be waiting for their arrival.
 672	 */
 673	if (events & PCI_EXP_SLTSTA_CC) {
 674		ctrl->cmd_busy = 0;
 675		smp_mb();
 676		wake_up(&ctrl->queue);
 677
 678		if (events == PCI_EXP_SLTSTA_CC)
 679			return IRQ_HANDLED;
 680
 681		events &= ~PCI_EXP_SLTSTA_CC;
 682	}
 683
 684	if (pdev->ignore_hotplug) {
 685		ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
 686		return IRQ_HANDLED;
 687	}
 688
 689	/* Save pending events for consumption by IRQ thread. */
 690	atomic_or(events, &ctrl->pending_events);
 691	return IRQ_WAKE_THREAD;
 692}
 693
 694static irqreturn_t pciehp_ist(int irq, void *dev_id)
 695{
 696	struct controller *ctrl = (struct controller *)dev_id;
 697	struct pci_dev *pdev = ctrl_dev(ctrl);
 698	irqreturn_t ret;
 699	u32 events;
 700
 701	ctrl->ist_running = true;
 702	pci_config_pm_runtime_get(pdev);
 703
 704	/* rerun pciehp_isr() if the port was inaccessible on interrupt */
 705	if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
 706		ret = pciehp_isr(irq, dev_id);
 707		enable_irq(irq);
 708		if (ret != IRQ_WAKE_THREAD)
 709			goto out;
 710	}
 711
 712	synchronize_hardirq(irq);
 713	events = atomic_xchg(&ctrl->pending_events, 0);
 714	if (!events) {
 715		ret = IRQ_NONE;
 716		goto out;
 717	}
 718
 719	/* Check Attention Button Pressed */
 720	if (events & PCI_EXP_SLTSTA_ABP)
 721		pciehp_handle_button_press(ctrl);
 722
 723	/* Check Power Fault Detected */
 724	if (events & PCI_EXP_SLTSTA_PFD) {
 725		ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
 726		pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
 727				      PCI_EXP_SLTCTL_ATTN_IND_ON);
 728	}
 729
 730	/*
 731	 * Ignore Link Down/Up events caused by Downstream Port Containment
 732	 * if recovery from the error succeeded.
 733	 */
 734	if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
 735	    ctrl->state == ON_STATE) {
 736		events &= ~PCI_EXP_SLTSTA_DLLSC;
 737		pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
 738	}
 739
 740	/*
 741	 * Disable requests have higher priority than Presence Detect Changed
 742	 * or Data Link Layer State Changed events.
 743	 */
 744	down_read_nested(&ctrl->reset_lock, ctrl->depth);
 745	if (events & DISABLE_SLOT)
 746		pciehp_handle_disable_request(ctrl);
 747	else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
 748		pciehp_handle_presence_or_link_change(ctrl, events);
 749	up_read(&ctrl->reset_lock);
 750
 751	ret = IRQ_HANDLED;
 752out:
 753	pci_config_pm_runtime_put(pdev);
 754	ctrl->ist_running = false;
 755	wake_up(&ctrl->requester);
 756	return ret;
 757}
 758
 759static int pciehp_poll(void *data)
 760{
 761	struct controller *ctrl = data;
 762
 763	schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
 764
 765	while (!kthread_should_stop()) {
 766		/* poll for interrupt events or user requests */
 767		while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
 768		       atomic_read(&ctrl->pending_events))
 769			pciehp_ist(IRQ_NOTCONNECTED, ctrl);
 770
 771		if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
 772			pciehp_poll_time = 2; /* clamp to sane value */
 773
 774		schedule_timeout_idle(pciehp_poll_time * HZ);
 775	}
 776
 777	return 0;
 778}
 779
 780static void pcie_enable_notification(struct controller *ctrl)
 781{
 782	u16 cmd, mask;
 783
 784	/*
 785	 * TBD: Power fault detected software notification support.
 786	 *
 787	 * Power fault detected software notification is not enabled
 788	 * now, because it caused power fault detected interrupt storm
 789	 * on some machines. On those machines, power fault detected
 790	 * bit in the slot status register was set again immediately
 791	 * when it is cleared in the interrupt service routine, and
 792	 * next power fault detected interrupt was notified again.
 793	 */
 794
 795	/*
 796	 * Always enable link events: thus link-up and link-down shall
 797	 * always be treated as hotplug and unplug respectively. Enable
 798	 * presence detect only if Attention Button is not present.
 799	 */
 800	cmd = PCI_EXP_SLTCTL_DLLSCE;
 801	if (ATTN_BUTTN(ctrl))
 802		cmd |= PCI_EXP_SLTCTL_ABPE;
 803	else
 804		cmd |= PCI_EXP_SLTCTL_PDCE;
 805	if (!pciehp_poll_mode)
 806		cmd |= PCI_EXP_SLTCTL_HPIE;
 807	if (!pciehp_poll_mode && !NO_CMD_CMPL(ctrl))
 808		cmd |= PCI_EXP_SLTCTL_CCIE;
 809
 810	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
 811		PCI_EXP_SLTCTL_PFDE |
 812		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 813		PCI_EXP_SLTCTL_DLLSCE);
 814
 815	pcie_write_cmd_nowait(ctrl, cmd, mask);
 816	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 817		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 818}
 819
 820static void pcie_disable_notification(struct controller *ctrl)
 821{
 822	u16 mask;
 823
 824	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
 825		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
 826		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 827		PCI_EXP_SLTCTL_DLLSCE);
 828	pcie_write_cmd(ctrl, 0, mask);
 829	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 830		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 831}
 832
 833void pcie_clear_hotplug_events(struct controller *ctrl)
 834{
 835	pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
 836				   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
 837}
 838
 839void pcie_enable_interrupt(struct controller *ctrl)
 840{
 841	u16 mask;
 842
 843	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
 844	pcie_write_cmd(ctrl, mask, mask);
 845}
 846
 847void pcie_disable_interrupt(struct controller *ctrl)
 848{
 849	u16 mask;
 850
 851	/*
 852	 * Mask hot-plug interrupt to prevent it triggering immediately
 853	 * when the link goes inactive (we still get PME when any of the
 854	 * enabled events is detected). Same goes with Link Layer State
 855	 * changed event which generates PME immediately when the link goes
 856	 * inactive so mask it as well.
 857	 */
 858	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
 859	pcie_write_cmd(ctrl, 0, mask);
 860}
 861
 862/**
 863 * pciehp_slot_reset() - ignore link event caused by error-induced hot reset
 864 * @dev: PCI Express port service device
 865 *
 866 * Called from pcie_portdrv_slot_reset() after AER or DPC initiated a reset
 867 * further up in the hierarchy to recover from an error.  The reset was
 868 * propagated down to this hotplug port.  Ignore the resulting link flap.
 869 * If the link failed to retrain successfully, synthesize the ignored event.
 870 * Surprise removal during reset is detected through Presence Detect Changed.
 871 */
 872int pciehp_slot_reset(struct pcie_device *dev)
 873{
 874	struct controller *ctrl = get_service_data(dev);
 875
 876	if (ctrl->state != ON_STATE)
 877		return 0;
 878
 879	pcie_capability_write_word(dev->port, PCI_EXP_SLTSTA,
 880				   PCI_EXP_SLTSTA_DLLSC);
 881
 882	if (!pciehp_check_link_active(ctrl))
 883		pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
 884
 885	return 0;
 886}
 887
 888/*
 889 * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
 890 * bus reset of the bridge, but at the same time we want to ensure that it is
 891 * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
 892 * disable link state notification and presence detection change notification
 893 * momentarily, if we see that they could interfere. Also, clear any spurious
 894 * events after.
 895 */
 896int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
 897{
 898	struct controller *ctrl = to_ctrl(hotplug_slot);
 899	struct pci_dev *pdev = ctrl_dev(ctrl);
 900	u16 stat_mask = 0, ctrl_mask = 0;
 901	int rc;
 902
 903	if (probe)
 904		return 0;
 905
 906	down_write_nested(&ctrl->reset_lock, ctrl->depth);
 907
 908	if (!ATTN_BUTTN(ctrl)) {
 909		ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
 910		stat_mask |= PCI_EXP_SLTSTA_PDC;
 911	}
 912	ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
 913	stat_mask |= PCI_EXP_SLTSTA_DLLSC;
 914
 915	pcie_write_cmd(ctrl, 0, ctrl_mask);
 916	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 917		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 918
 919	rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
 920
 921	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
 922	pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
 923	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 924		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
 925
 926	up_write(&ctrl->reset_lock);
 927	return rc;
 928}
 929
 930int pcie_init_notification(struct controller *ctrl)
 931{
 932	if (pciehp_request_irq(ctrl))
 933		return -1;
 934	pcie_enable_notification(ctrl);
 935	ctrl->notification_enabled = 1;
 936	return 0;
 937}
 938
 939void pcie_shutdown_notification(struct controller *ctrl)
 940{
 941	if (ctrl->notification_enabled) {
 942		pcie_disable_notification(ctrl);
 943		pciehp_free_irq(ctrl);
 944		ctrl->notification_enabled = 0;
 945	}
 946}
 947
 948static inline void dbg_ctrl(struct controller *ctrl)
 949{
 950	struct pci_dev *pdev = ctrl->pcie->port;
 951	u16 reg16;
 952
 953	ctrl_dbg(ctrl, "Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
 954	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
 955	ctrl_dbg(ctrl, "Slot Status            : 0x%04x\n", reg16);
 956	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
 957	ctrl_dbg(ctrl, "Slot Control           : 0x%04x\n", reg16);
 958}
 959
 960#define FLAG(x, y)	(((x) & (y)) ? '+' : '-')
 961
 962static inline int pcie_hotplug_depth(struct pci_dev *dev)
 963{
 964	struct pci_bus *bus = dev->bus;
 965	int depth = 0;
 966
 967	while (bus->parent) {
 968		bus = bus->parent;
 969		if (bus->self && bus->self->is_hotplug_bridge)
 970			depth++;
 971	}
 972
 973	return depth;
 974}
 975
 976struct controller *pcie_init(struct pcie_device *dev)
 977{
 978	struct controller *ctrl;
 979	u32 slot_cap, slot_cap2;
 980	u8 poweron;
 981	struct pci_dev *pdev = dev->port;
 982	struct pci_bus *subordinate = pdev->subordinate;
 983
 984	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
 985	if (!ctrl)
 986		return NULL;
 987
 988	ctrl->pcie = dev;
 989	ctrl->depth = pcie_hotplug_depth(dev->port);
 990	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
 991
 992	if (pdev->hotplug_user_indicators)
 993		slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
 994
 995	/*
 996	 * We assume no Thunderbolt controllers support Command Complete events,
 997	 * but some controllers falsely claim they do.
 998	 */
 999	if (pdev->is_thunderbolt)
1000		slot_cap |= PCI_EXP_SLTCAP_NCCS;
1001
1002	ctrl->slot_cap = slot_cap;
1003	mutex_init(&ctrl->ctrl_lock);
1004	mutex_init(&ctrl->state_lock);
1005	init_rwsem(&ctrl->reset_lock);
1006	init_waitqueue_head(&ctrl->requester);
1007	init_waitqueue_head(&ctrl->queue);
1008	INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
1009	dbg_ctrl(ctrl);
1010
1011	down_read(&pci_bus_sem);
1012	ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
1013	up_read(&pci_bus_sem);
1014
1015	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2);
1016	if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) {
1017		pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE,
1018				      PCI_EXP_SLTCTL_IBPD_DISABLE);
1019		ctrl->inband_presence_disabled = 1;
1020	}
1021
1022	if (dmi_first_match(inband_presence_disabled_dmi_table))
1023		ctrl->inband_presence_disabled = 1;
1024
1025	/* Clear all remaining event bits in Slot Status register. */
1026	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
1027		PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
1028		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
1029		PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
1030
1031	ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n",
1032		FIELD_GET(PCI_EXP_SLTCAP_PSN, slot_cap),
1033		FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
1034		FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
1035		FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
1036		FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
1037		FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
1038		FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
1039		FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
1040		FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
1041		FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
1042		FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD),
1043		FLAG(pdev->link_active_reporting, true),
1044		pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
1045
1046	/*
1047	 * If empty slot's power status is on, turn power off.  The IRQ isn't
1048	 * requested yet, so avoid triggering a notification with this command.
1049	 */
1050	if (POWER_CTRL(ctrl)) {
1051		pciehp_get_power_status(ctrl, &poweron);
1052		if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
1053			pcie_disable_notification(ctrl);
1054			pciehp_power_off_slot(ctrl);
1055		}
1056	}
 
 
 
 
 
1057
1058	return ctrl;
1059}
1060
1061void pciehp_release_ctrl(struct controller *ctrl)
1062{
1063	cancel_delayed_work_sync(&ctrl->button_work);
1064	kfree(ctrl);
1065}
1066
1067static void quirk_cmd_compl(struct pci_dev *pdev)
1068{
1069	u32 slot_cap;
1070
1071	if (pci_is_pcie(pdev)) {
1072		pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
1073		if (slot_cap & PCI_EXP_SLTCAP_HPC &&
1074		    !(slot_cap & PCI_EXP_SLTCAP_NCCS))
1075			pdev->broken_cmd_compl = 1;
1076	}
1077}
1078DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1079			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1080DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x010e,
1081			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1082DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
1083			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1084DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
1085			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1086DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
1087			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1088DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
1089			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);