Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * PCI Express PCI Hot Plug Driver
  4 *
  5 * Copyright (C) 1995,2001 Compaq Computer Corporation
  6 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
  7 * Copyright (C) 2001 IBM Corp.
  8 * Copyright (C) 2003-2004 Intel Corporation
  9 *
 10 * All rights reserved.
 11 *
 12 * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
 13 */
 14
 15#define dev_fmt(fmt) "pciehp: " fmt
 16
 
 17#include <linux/kernel.h>
 18#include <linux/types.h>
 19#include <linux/jiffies.h>
 20#include <linux/kthread.h>
 21#include <linux/pci.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/interrupt.h>
 24#include <linux/slab.h>
 25
 26#include "../pci.h"
 27#include "pciehp.h"
 28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
 30{
 31	return ctrl->pcie->port;
 32}
 33
 34static irqreturn_t pciehp_isr(int irq, void *dev_id);
 35static irqreturn_t pciehp_ist(int irq, void *dev_id);
 36static int pciehp_poll(void *data);
 37
 38static inline int pciehp_request_irq(struct controller *ctrl)
 39{
 40	int retval, irq = ctrl->pcie->irq;
 41
 42	if (pciehp_poll_mode) {
 43		ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
 44						"pciehp_poll-%s",
 45						slot_name(ctrl));
 46		return PTR_ERR_OR_ZERO(ctrl->poll_thread);
 47	}
 48
 49	/* Installs the interrupt handler */
 50	retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
 51				      IRQF_SHARED, "pciehp", ctrl);
 52	if (retval)
 53		ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
 54			 irq);
 55	return retval;
 56}
 57
 58static inline void pciehp_free_irq(struct controller *ctrl)
 59{
 60	if (pciehp_poll_mode)
 61		kthread_stop(ctrl->poll_thread);
 62	else
 63		free_irq(ctrl->pcie->irq, ctrl);
 64}
 65
 66static int pcie_poll_cmd(struct controller *ctrl, int timeout)
 67{
 68	struct pci_dev *pdev = ctrl_dev(ctrl);
 69	u16 slot_status;
 70
 71	while (true) {
 72		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 73		if (slot_status == (u16) ~0) {
 74			ctrl_info(ctrl, "%s: no response from device\n",
 75				  __func__);
 76			return 0;
 77		}
 78
 79		if (slot_status & PCI_EXP_SLTSTA_CC) {
 80			pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 81						   PCI_EXP_SLTSTA_CC);
 
 
 82			return 1;
 83		}
 84		if (timeout < 0)
 85			break;
 86		msleep(10);
 87		timeout -= 10;
 88	}
 89	return 0;	/* timeout */
 90}
 91
 92static void pcie_wait_cmd(struct controller *ctrl)
 93{
 94	unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
 95	unsigned long duration = msecs_to_jiffies(msecs);
 96	unsigned long cmd_timeout = ctrl->cmd_started + duration;
 97	unsigned long now, timeout;
 98	int rc;
 99
100	/*
101	 * If the controller does not generate notifications for command
102	 * completions, we never need to wait between writes.
103	 */
104	if (NO_CMD_CMPL(ctrl))
105		return;
106
107	if (!ctrl->cmd_busy)
108		return;
109
110	/*
111	 * Even if the command has already timed out, we want to call
112	 * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
113	 */
114	now = jiffies;
115	if (time_before_eq(cmd_timeout, now))
116		timeout = 1;
117	else
118		timeout = cmd_timeout - now;
119
120	if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
121	    ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
122		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
123	else
124		rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
125
126	if (!rc)
127		ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
128			  ctrl->slot_ctrl,
129			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
130}
131
132#define CC_ERRATUM_MASK		(PCI_EXP_SLTCTL_PCC |	\
133				 PCI_EXP_SLTCTL_PIC |	\
134				 PCI_EXP_SLTCTL_AIC |	\
135				 PCI_EXP_SLTCTL_EIC)
136
137static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
138			      u16 mask, bool wait)
139{
140	struct pci_dev *pdev = ctrl_dev(ctrl);
141	u16 slot_ctrl_orig, slot_ctrl;
142
143	mutex_lock(&ctrl->ctrl_lock);
144
145	/*
146	 * Always wait for any previous command that might still be in progress
147	 */
148	pcie_wait_cmd(ctrl);
149
150	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
151	if (slot_ctrl == (u16) ~0) {
152		ctrl_info(ctrl, "%s: no response from device\n", __func__);
153		goto out;
154	}
155
156	slot_ctrl_orig = slot_ctrl;
157	slot_ctrl &= ~mask;
158	slot_ctrl |= (cmd & mask);
159	ctrl->cmd_busy = 1;
160	smp_mb();
161	ctrl->slot_ctrl = slot_ctrl;
162	pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
163	ctrl->cmd_started = jiffies;
164
165	/*
166	 * Controllers with the Intel CF118 and similar errata advertise
167	 * Command Completed support, but they only set Command Completed
168	 * if we change the "Control" bits for power, power indicator,
169	 * attention indicator, or interlock.  If we only change the
170	 * "Enable" bits, they never set the Command Completed bit.
171	 */
172	if (pdev->broken_cmd_compl &&
173	    (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
174		ctrl->cmd_busy = 0;
175
176	/*
177	 * Optionally wait for the hardware to be ready for a new command,
178	 * indicating completion of the above issued command.
179	 */
180	if (wait)
181		pcie_wait_cmd(ctrl);
182
183out:
184	mutex_unlock(&ctrl->ctrl_lock);
185}
186
187/**
188 * pcie_write_cmd - Issue controller command
189 * @ctrl: controller to which the command is issued
190 * @cmd:  command value written to slot control register
191 * @mask: bitmask of slot control register to be modified
192 */
193static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
194{
195	pcie_do_write_cmd(ctrl, cmd, mask, true);
196}
197
198/* Same as above without waiting for the hardware to latch */
199static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
200{
201	pcie_do_write_cmd(ctrl, cmd, mask, false);
202}
203
204bool pciehp_check_link_active(struct controller *ctrl)
 
 
 
 
 
 
 
 
 
 
 
205{
206	struct pci_dev *pdev = ctrl_dev(ctrl);
207	u16 lnk_status;
208	bool ret;
209
210	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
211	ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
 
212
213	if (ret)
214		ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
215
216	return ret;
217}
218
219static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
220{
221	u32 l;
222	int count = 0;
223	int delay = 1000, step = 20;
224	bool found = false;
225
226	do {
227		found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
228		count++;
229
230		if (found)
231			break;
232
233		msleep(step);
234		delay -= step;
235	} while (delay > 0);
236
237	if (count > 1)
238		pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
239			pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
240			PCI_FUNC(devfn), count, step, l);
241
242	return found;
243}
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245int pciehp_check_link_status(struct controller *ctrl)
246{
247	struct pci_dev *pdev = ctrl_dev(ctrl);
248	bool found;
249	u16 lnk_status;
250
251	if (!pcie_wait_for_link(pdev, true))
 
252		return -1;
 
 
 
 
253
254	found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
255					PCI_DEVFN(0, 0));
256
257	/* ignore link or presence changes up to this point */
258	if (found)
259		atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
260			   &ctrl->pending_events);
261
262	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
263	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
264	if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
265	    !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
266		ctrl_err(ctrl, "link training error: status %#06x\n",
267			 lnk_status);
268		return -1;
269	}
270
271	pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
272
273	if (!found)
 
 
274		return -1;
 
275
276	return 0;
277}
278
279static int __pciehp_link_set(struct controller *ctrl, bool enable)
280{
281	struct pci_dev *pdev = ctrl_dev(ctrl);
282	u16 lnk_ctrl;
283
284	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
285
286	if (enable)
287		lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
288	else
289		lnk_ctrl |= PCI_EXP_LNKCTL_LD;
290
291	pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
292	ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
293	return 0;
294}
295
296static int pciehp_link_enable(struct controller *ctrl)
297{
298	return __pciehp_link_set(ctrl, true);
299}
300
301int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
302				    u8 *status)
303{
304	struct controller *ctrl = to_ctrl(hotplug_slot);
305	struct pci_dev *pdev = ctrl_dev(ctrl);
306	u16 slot_ctrl;
307
308	pci_config_pm_runtime_get(pdev);
309	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
310	pci_config_pm_runtime_put(pdev);
311	*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
312	return 0;
313}
314
315int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
316{
317	struct controller *ctrl = to_ctrl(hotplug_slot);
318	struct pci_dev *pdev = ctrl_dev(ctrl);
319	u16 slot_ctrl;
320
321	pci_config_pm_runtime_get(pdev);
322	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
323	pci_config_pm_runtime_put(pdev);
324	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
325		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
326
327	switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
328	case PCI_EXP_SLTCTL_ATTN_IND_ON:
329		*status = 1;	/* On */
330		break;
331	case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
332		*status = 2;	/* Blink */
333		break;
334	case PCI_EXP_SLTCTL_ATTN_IND_OFF:
335		*status = 0;	/* Off */
336		break;
337	default:
338		*status = 0xFF;
339		break;
340	}
341
342	return 0;
343}
344
345void pciehp_get_power_status(struct controller *ctrl, u8 *status)
346{
347	struct pci_dev *pdev = ctrl_dev(ctrl);
348	u16 slot_ctrl;
349
350	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
351	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
352		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
353
354	switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
355	case PCI_EXP_SLTCTL_PWR_ON:
356		*status = 1;	/* On */
357		break;
358	case PCI_EXP_SLTCTL_PWR_OFF:
359		*status = 0;	/* Off */
360		break;
361	default:
362		*status = 0xFF;
363		break;
364	}
365}
366
367void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
368{
369	struct pci_dev *pdev = ctrl_dev(ctrl);
370	u16 slot_status;
371
372	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
373	*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
374}
375
376bool pciehp_card_present(struct controller *ctrl)
 
 
 
 
 
 
 
 
 
 
 
 
377{
378	struct pci_dev *pdev = ctrl_dev(ctrl);
379	u16 slot_status;
 
380
381	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
382	return slot_status & PCI_EXP_SLTSTA_PDS;
 
 
 
383}
384
385/**
386 * pciehp_card_present_or_link_active() - whether given slot is occupied
387 * @ctrl: PCIe hotplug controller
388 *
389 * Unlike pciehp_card_present(), which determines presence solely from the
390 * Presence Detect State bit, this helper also returns true if the Link Active
391 * bit is set.  This is a concession to broken hotplug ports which hardwire
392 * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
 
 
 
393 */
394bool pciehp_card_present_or_link_active(struct controller *ctrl)
395{
396	return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
 
 
 
 
 
 
397}
398
399int pciehp_query_power_fault(struct controller *ctrl)
400{
401	struct pci_dev *pdev = ctrl_dev(ctrl);
402	u16 slot_status;
403
404	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
405	return !!(slot_status & PCI_EXP_SLTSTA_PFD);
406}
407
408int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
409				    u8 status)
410{
411	struct controller *ctrl = to_ctrl(hotplug_slot);
412	struct pci_dev *pdev = ctrl_dev(ctrl);
413
414	pci_config_pm_runtime_get(pdev);
415	pcie_write_cmd_nowait(ctrl, status << 6,
416			      PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
417	pci_config_pm_runtime_put(pdev);
418	return 0;
419}
420
421/**
422 * pciehp_set_indicators() - set attention indicator, power indicator, or both
423 * @ctrl: PCIe hotplug controller
424 * @pwr: one of:
425 *	PCI_EXP_SLTCTL_PWR_IND_ON
426 *	PCI_EXP_SLTCTL_PWR_IND_BLINK
427 *	PCI_EXP_SLTCTL_PWR_IND_OFF
428 * @attn: one of:
429 *	PCI_EXP_SLTCTL_ATTN_IND_ON
430 *	PCI_EXP_SLTCTL_ATTN_IND_BLINK
431 *	PCI_EXP_SLTCTL_ATTN_IND_OFF
432 *
433 * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator
434 * unchanged.
435 */
436void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
437{
438	u16 cmd = 0, mask = 0;
439
440	if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
441		cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
442		mask |= PCI_EXP_SLTCTL_PIC;
443	}
444
445	if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
446		cmd |= (attn & PCI_EXP_SLTCTL_AIC);
447		mask |= PCI_EXP_SLTCTL_AIC;
448	}
449
450	if (cmd) {
451		pcie_write_cmd_nowait(ctrl, cmd, mask);
452		ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
453			 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
454	}
455}
456
457int pciehp_power_on_slot(struct controller *ctrl)
458{
459	struct pci_dev *pdev = ctrl_dev(ctrl);
460	u16 slot_status;
461	int retval;
462
463	/* Clear power-fault bit from previous power failures */
464	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
465	if (slot_status & PCI_EXP_SLTSTA_PFD)
466		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
467					   PCI_EXP_SLTSTA_PFD);
468	ctrl->power_fault_detected = 0;
469
470	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
471	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
472		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
473		 PCI_EXP_SLTCTL_PWR_ON);
474
475	retval = pciehp_link_enable(ctrl);
476	if (retval)
477		ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
478
479	return retval;
480}
481
482void pciehp_power_off_slot(struct controller *ctrl)
483{
484	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
485	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
486		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
487		 PCI_EXP_SLTCTL_PWR_OFF);
488}
489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490static irqreturn_t pciehp_isr(int irq, void *dev_id)
491{
492	struct controller *ctrl = (struct controller *)dev_id;
493	struct pci_dev *pdev = ctrl_dev(ctrl);
494	struct device *parent = pdev->dev.parent;
495	u16 status, events;
496
497	/*
498	 * Interrupts only occur in D3hot or shallower and only if enabled
499	 * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
500	 */
501	if (pdev->current_state == PCI_D3cold ||
502	    (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
503		return IRQ_NONE;
504
505	/*
506	 * Keep the port accessible by holding a runtime PM ref on its parent.
507	 * Defer resume of the parent to the IRQ thread if it's suspended.
508	 * Mask the interrupt until then.
509	 */
510	if (parent) {
511		pm_runtime_get_noresume(parent);
512		if (!pm_runtime_active(parent)) {
513			pm_runtime_put(parent);
514			disable_irq_nosync(irq);
515			atomic_or(RERUN_ISR, &ctrl->pending_events);
516			return IRQ_WAKE_THREAD;
517		}
518	}
519
 
520	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
521	if (status == (u16) ~0) {
522		ctrl_info(ctrl, "%s: no response from device\n", __func__);
523		if (parent)
524			pm_runtime_put(parent);
525		return IRQ_NONE;
526	}
527
528	/*
529	 * Slot Status contains plain status bits as well as event
530	 * notification bits; right now we only want the event bits.
531	 */
532	events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
533			   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
534			   PCI_EXP_SLTSTA_DLLSC);
535
536	/*
537	 * If we've already reported a power fault, don't report it again
538	 * until we've done something to handle it.
539	 */
540	if (ctrl->power_fault_detected)
541		events &= ~PCI_EXP_SLTSTA_PFD;
 
 
542
 
543	if (!events) {
544		if (parent)
545			pm_runtime_put(parent);
546		return IRQ_NONE;
547	}
548
549	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
 
 
 
 
 
 
 
 
 
 
 
 
550	ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
551	if (parent)
552		pm_runtime_put(parent);
553
554	/*
555	 * Command Completed notifications are not deferred to the
556	 * IRQ thread because it may be waiting for their arrival.
557	 */
558	if (events & PCI_EXP_SLTSTA_CC) {
559		ctrl->cmd_busy = 0;
560		smp_mb();
561		wake_up(&ctrl->queue);
562
563		if (events == PCI_EXP_SLTSTA_CC)
564			return IRQ_HANDLED;
565
566		events &= ~PCI_EXP_SLTSTA_CC;
567	}
568
569	if (pdev->ignore_hotplug) {
570		ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
571		return IRQ_HANDLED;
572	}
573
574	/* Save pending events for consumption by IRQ thread. */
575	atomic_or(events, &ctrl->pending_events);
576	return IRQ_WAKE_THREAD;
577}
578
579static irqreturn_t pciehp_ist(int irq, void *dev_id)
580{
581	struct controller *ctrl = (struct controller *)dev_id;
582	struct pci_dev *pdev = ctrl_dev(ctrl);
583	irqreturn_t ret;
584	u32 events;
585
 
586	pci_config_pm_runtime_get(pdev);
587
588	/* rerun pciehp_isr() if the port was inaccessible on interrupt */
589	if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
590		ret = pciehp_isr(irq, dev_id);
591		enable_irq(irq);
592		if (ret != IRQ_WAKE_THREAD) {
593			pci_config_pm_runtime_put(pdev);
594			return ret;
595		}
596	}
597
598	synchronize_hardirq(irq);
599	events = atomic_xchg(&ctrl->pending_events, 0);
600	if (!events) {
601		pci_config_pm_runtime_put(pdev);
602		return IRQ_NONE;
603	}
604
605	/* Check Attention Button Pressed */
606	if (events & PCI_EXP_SLTSTA_ABP) {
607		ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
608			  slot_name(ctrl));
609		pciehp_handle_button_press(ctrl);
610	}
611
612	/* Check Power Fault Detected */
613	if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
614		ctrl->power_fault_detected = 1;
615		ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
616		pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
617				      PCI_EXP_SLTCTL_ATTN_IND_ON);
618	}
619
620	/*
 
 
 
 
 
 
 
 
 
 
621	 * Disable requests have higher priority than Presence Detect Changed
622	 * or Data Link Layer State Changed events.
623	 */
624	down_read(&ctrl->reset_lock);
625	if (events & DISABLE_SLOT)
626		pciehp_handle_disable_request(ctrl);
627	else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
628		pciehp_handle_presence_or_link_change(ctrl, events);
629	up_read(&ctrl->reset_lock);
630
 
 
631	pci_config_pm_runtime_put(pdev);
 
632	wake_up(&ctrl->requester);
633	return IRQ_HANDLED;
634}
635
636static int pciehp_poll(void *data)
637{
638	struct controller *ctrl = data;
639
640	schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
641
642	while (!kthread_should_stop()) {
643		/* poll for interrupt events or user requests */
644		while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
645		       atomic_read(&ctrl->pending_events))
646			pciehp_ist(IRQ_NOTCONNECTED, ctrl);
647
648		if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
649			pciehp_poll_time = 2; /* clamp to sane value */
650
651		schedule_timeout_idle(pciehp_poll_time * HZ);
652	}
653
654	return 0;
655}
656
657static void pcie_enable_notification(struct controller *ctrl)
658{
659	u16 cmd, mask;
660
661	/*
662	 * TBD: Power fault detected software notification support.
663	 *
664	 * Power fault detected software notification is not enabled
665	 * now, because it caused power fault detected interrupt storm
666	 * on some machines. On those machines, power fault detected
667	 * bit in the slot status register was set again immediately
668	 * when it is cleared in the interrupt service routine, and
669	 * next power fault detected interrupt was notified again.
670	 */
671
672	/*
673	 * Always enable link events: thus link-up and link-down shall
674	 * always be treated as hotplug and unplug respectively. Enable
675	 * presence detect only if Attention Button is not present.
676	 */
677	cmd = PCI_EXP_SLTCTL_DLLSCE;
678	if (ATTN_BUTTN(ctrl))
679		cmd |= PCI_EXP_SLTCTL_ABPE;
680	else
681		cmd |= PCI_EXP_SLTCTL_PDCE;
682	if (!pciehp_poll_mode)
683		cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
 
 
684
685	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
686		PCI_EXP_SLTCTL_PFDE |
687		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
688		PCI_EXP_SLTCTL_DLLSCE);
689
690	pcie_write_cmd_nowait(ctrl, cmd, mask);
691	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
692		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
693}
694
695static void pcie_disable_notification(struct controller *ctrl)
696{
697	u16 mask;
698
699	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
700		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
701		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
702		PCI_EXP_SLTCTL_DLLSCE);
703	pcie_write_cmd(ctrl, 0, mask);
704	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
705		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
706}
707
708void pcie_clear_hotplug_events(struct controller *ctrl)
709{
710	pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
711				   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
712}
713
714void pcie_enable_interrupt(struct controller *ctrl)
715{
716	u16 mask;
717
718	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
719	pcie_write_cmd(ctrl, mask, mask);
720}
721
722void pcie_disable_interrupt(struct controller *ctrl)
723{
724	u16 mask;
725
726	/*
727	 * Mask hot-plug interrupt to prevent it triggering immediately
728	 * when the link goes inactive (we still get PME when any of the
729	 * enabled events is detected). Same goes with Link Layer State
730	 * changed event which generates PME immediately when the link goes
731	 * inactive so mask it as well.
732	 */
733	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
734	pcie_write_cmd(ctrl, 0, mask);
735}
736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
737/*
738 * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
739 * bus reset of the bridge, but at the same time we want to ensure that it is
740 * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
741 * disable link state notification and presence detection change notification
742 * momentarily, if we see that they could interfere. Also, clear any spurious
743 * events after.
744 */
745int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
746{
747	struct controller *ctrl = to_ctrl(hotplug_slot);
748	struct pci_dev *pdev = ctrl_dev(ctrl);
749	u16 stat_mask = 0, ctrl_mask = 0;
750	int rc;
751
752	if (probe)
753		return 0;
754
755	down_write(&ctrl->reset_lock);
756
757	if (!ATTN_BUTTN(ctrl)) {
758		ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
759		stat_mask |= PCI_EXP_SLTSTA_PDC;
760	}
761	ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
762	stat_mask |= PCI_EXP_SLTSTA_DLLSC;
763
764	pcie_write_cmd(ctrl, 0, ctrl_mask);
765	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
766		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
767
768	rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
769
770	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
771	pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
772	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
773		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
774
775	up_write(&ctrl->reset_lock);
776	return rc;
777}
778
779int pcie_init_notification(struct controller *ctrl)
780{
781	if (pciehp_request_irq(ctrl))
782		return -1;
783	pcie_enable_notification(ctrl);
784	ctrl->notification_enabled = 1;
785	return 0;
786}
787
788void pcie_shutdown_notification(struct controller *ctrl)
789{
790	if (ctrl->notification_enabled) {
791		pcie_disable_notification(ctrl);
792		pciehp_free_irq(ctrl);
793		ctrl->notification_enabled = 0;
794	}
795}
796
797static inline void dbg_ctrl(struct controller *ctrl)
798{
799	struct pci_dev *pdev = ctrl->pcie->port;
800	u16 reg16;
801
802	ctrl_dbg(ctrl, "Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
803	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
804	ctrl_dbg(ctrl, "Slot Status            : 0x%04x\n", reg16);
805	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
806	ctrl_dbg(ctrl, "Slot Control           : 0x%04x\n", reg16);
807}
808
809#define FLAG(x, y)	(((x) & (y)) ? '+' : '-')
810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
811struct controller *pcie_init(struct pcie_device *dev)
812{
813	struct controller *ctrl;
814	u32 slot_cap, link_cap;
815	u8 poweron;
816	struct pci_dev *pdev = dev->port;
817	struct pci_bus *subordinate = pdev->subordinate;
818
819	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
820	if (!ctrl)
821		return NULL;
822
823	ctrl->pcie = dev;
 
824	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
825
826	if (pdev->hotplug_user_indicators)
827		slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
828
829	/*
830	 * We assume no Thunderbolt controllers support Command Complete events,
831	 * but some controllers falsely claim they do.
832	 */
833	if (pdev->is_thunderbolt)
834		slot_cap |= PCI_EXP_SLTCAP_NCCS;
835
836	ctrl->slot_cap = slot_cap;
837	mutex_init(&ctrl->ctrl_lock);
838	mutex_init(&ctrl->state_lock);
839	init_rwsem(&ctrl->reset_lock);
840	init_waitqueue_head(&ctrl->requester);
841	init_waitqueue_head(&ctrl->queue);
842	INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
843	dbg_ctrl(ctrl);
844
845	down_read(&pci_bus_sem);
846	ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
847	up_read(&pci_bus_sem);
848
 
 
 
 
 
 
 
 
 
 
849	/* Check if Data Link Layer Link Active Reporting is implemented */
850	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
851
852	/* Clear all remaining event bits in Slot Status register. */
853	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
854		PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
855		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
856		PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
857
858	ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
859		(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
860		FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
861		FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
862		FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
863		FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
864		FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
865		FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
866		FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
867		FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
868		FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
 
869		FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
870		pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
871
872	/*
873	 * If empty slot's power status is on, turn power off.  The IRQ isn't
874	 * requested yet, so avoid triggering a notification with this command.
875	 */
876	if (POWER_CTRL(ctrl)) {
877		pciehp_get_power_status(ctrl, &poweron);
878		if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
879			pcie_disable_notification(ctrl);
880			pciehp_power_off_slot(ctrl);
881		}
882	}
883
884	return ctrl;
885}
886
887void pciehp_release_ctrl(struct controller *ctrl)
888{
889	cancel_delayed_work_sync(&ctrl->button_work);
890	kfree(ctrl);
891}
892
893static void quirk_cmd_compl(struct pci_dev *pdev)
894{
895	u32 slot_cap;
896
897	if (pci_is_pcie(pdev)) {
898		pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
899		if (slot_cap & PCI_EXP_SLTCAP_HPC &&
900		    !(slot_cap & PCI_EXP_SLTCAP_NCCS))
901			pdev->broken_cmd_compl = 1;
902	}
903}
904DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
 
 
905			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
906DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
907			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
908DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
909			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
910DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
911			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCI Express PCI Hot Plug Driver
   4 *
   5 * Copyright (C) 1995,2001 Compaq Computer Corporation
   6 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
   7 * Copyright (C) 2001 IBM Corp.
   8 * Copyright (C) 2003-2004 Intel Corporation
   9 *
  10 * All rights reserved.
  11 *
  12 * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
  13 */
  14
  15#define dev_fmt(fmt) "pciehp: " fmt
  16
  17#include <linux/dmi.h>
  18#include <linux/kernel.h>
  19#include <linux/types.h>
  20#include <linux/jiffies.h>
  21#include <linux/kthread.h>
  22#include <linux/pci.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/interrupt.h>
  25#include <linux/slab.h>
  26
  27#include "../pci.h"
  28#include "pciehp.h"
  29
  30static const struct dmi_system_id inband_presence_disabled_dmi_table[] = {
  31	/*
  32	 * Match all Dell systems, as some Dell systems have inband
  33	 * presence disabled on NVMe slots (but don't support the bit to
  34	 * report it). Setting inband presence disabled should have no
  35	 * negative effect, except on broken hotplug slots that never
  36	 * assert presence detect--and those will still work, they will
  37	 * just have a bit of extra delay before being probed.
  38	 */
  39	{
  40		.ident = "Dell System",
  41		.matches = {
  42			DMI_MATCH(DMI_OEM_STRING, "Dell System"),
  43		},
  44	},
  45	{}
  46};
  47
  48static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
  49{
  50	return ctrl->pcie->port;
  51}
  52
  53static irqreturn_t pciehp_isr(int irq, void *dev_id);
  54static irqreturn_t pciehp_ist(int irq, void *dev_id);
  55static int pciehp_poll(void *data);
  56
  57static inline int pciehp_request_irq(struct controller *ctrl)
  58{
  59	int retval, irq = ctrl->pcie->irq;
  60
  61	if (pciehp_poll_mode) {
  62		ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
  63						"pciehp_poll-%s",
  64						slot_name(ctrl));
  65		return PTR_ERR_OR_ZERO(ctrl->poll_thread);
  66	}
  67
  68	/* Installs the interrupt handler */
  69	retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
  70				      IRQF_SHARED, "pciehp", ctrl);
  71	if (retval)
  72		ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
  73			 irq);
  74	return retval;
  75}
  76
  77static inline void pciehp_free_irq(struct controller *ctrl)
  78{
  79	if (pciehp_poll_mode)
  80		kthread_stop(ctrl->poll_thread);
  81	else
  82		free_irq(ctrl->pcie->irq, ctrl);
  83}
  84
  85static int pcie_poll_cmd(struct controller *ctrl, int timeout)
  86{
  87	struct pci_dev *pdev = ctrl_dev(ctrl);
  88	u16 slot_status;
  89
  90	do {
  91		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  92		if (PCI_POSSIBLE_ERROR(slot_status)) {
  93			ctrl_info(ctrl, "%s: no response from device\n",
  94				  __func__);
  95			return 0;
  96		}
  97
  98		if (slot_status & PCI_EXP_SLTSTA_CC) {
  99			pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 100						   PCI_EXP_SLTSTA_CC);
 101			ctrl->cmd_busy = 0;
 102			smp_mb();
 103			return 1;
 104		}
 
 
 105		msleep(10);
 106		timeout -= 10;
 107	} while (timeout >= 0);
 108	return 0;	/* timeout */
 109}
 110
 111static void pcie_wait_cmd(struct controller *ctrl)
 112{
 113	unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
 114	unsigned long duration = msecs_to_jiffies(msecs);
 115	unsigned long cmd_timeout = ctrl->cmd_started + duration;
 116	unsigned long now, timeout;
 117	int rc;
 118
 119	/*
 120	 * If the controller does not generate notifications for command
 121	 * completions, we never need to wait between writes.
 122	 */
 123	if (NO_CMD_CMPL(ctrl))
 124		return;
 125
 126	if (!ctrl->cmd_busy)
 127		return;
 128
 129	/*
 130	 * Even if the command has already timed out, we want to call
 131	 * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
 132	 */
 133	now = jiffies;
 134	if (time_before_eq(cmd_timeout, now))
 135		timeout = 1;
 136	else
 137		timeout = cmd_timeout - now;
 138
 139	if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
 140	    ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
 141		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
 142	else
 143		rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
 144
 145	if (!rc)
 146		ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
 147			  ctrl->slot_ctrl,
 148			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
 149}
 150
 151#define CC_ERRATUM_MASK		(PCI_EXP_SLTCTL_PCC |	\
 152				 PCI_EXP_SLTCTL_PIC |	\
 153				 PCI_EXP_SLTCTL_AIC |	\
 154				 PCI_EXP_SLTCTL_EIC)
 155
 156static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
 157			      u16 mask, bool wait)
 158{
 159	struct pci_dev *pdev = ctrl_dev(ctrl);
 160	u16 slot_ctrl_orig, slot_ctrl;
 161
 162	mutex_lock(&ctrl->ctrl_lock);
 163
 164	/*
 165	 * Always wait for any previous command that might still be in progress
 166	 */
 167	pcie_wait_cmd(ctrl);
 168
 169	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 170	if (PCI_POSSIBLE_ERROR(slot_ctrl)) {
 171		ctrl_info(ctrl, "%s: no response from device\n", __func__);
 172		goto out;
 173	}
 174
 175	slot_ctrl_orig = slot_ctrl;
 176	slot_ctrl &= ~mask;
 177	slot_ctrl |= (cmd & mask);
 178	ctrl->cmd_busy = 1;
 179	smp_mb();
 180	ctrl->slot_ctrl = slot_ctrl;
 181	pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
 182	ctrl->cmd_started = jiffies;
 183
 184	/*
 185	 * Controllers with the Intel CF118 and similar errata advertise
 186	 * Command Completed support, but they only set Command Completed
 187	 * if we change the "Control" bits for power, power indicator,
 188	 * attention indicator, or interlock.  If we only change the
 189	 * "Enable" bits, they never set the Command Completed bit.
 190	 */
 191	if (pdev->broken_cmd_compl &&
 192	    (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
 193		ctrl->cmd_busy = 0;
 194
 195	/*
 196	 * Optionally wait for the hardware to be ready for a new command,
 197	 * indicating completion of the above issued command.
 198	 */
 199	if (wait)
 200		pcie_wait_cmd(ctrl);
 201
 202out:
 203	mutex_unlock(&ctrl->ctrl_lock);
 204}
 205
 206/**
 207 * pcie_write_cmd - Issue controller command
 208 * @ctrl: controller to which the command is issued
 209 * @cmd:  command value written to slot control register
 210 * @mask: bitmask of slot control register to be modified
 211 */
 212static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
 213{
 214	pcie_do_write_cmd(ctrl, cmd, mask, true);
 215}
 216
 217/* Same as above without waiting for the hardware to latch */
 218static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
 219{
 220	pcie_do_write_cmd(ctrl, cmd, mask, false);
 221}
 222
 223/**
 224 * pciehp_check_link_active() - Is the link active
 225 * @ctrl: PCIe hotplug controller
 226 *
 227 * Check whether the downstream link is currently active. Note it is
 228 * possible that the card is removed immediately after this so the
 229 * caller may need to take it into account.
 230 *
 231 * If the hotplug controller itself is not available anymore returns
 232 * %-ENODEV.
 233 */
 234int pciehp_check_link_active(struct controller *ctrl)
 235{
 236	struct pci_dev *pdev = ctrl_dev(ctrl);
 237	u16 lnk_status;
 238	int ret;
 239
 240	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
 241	if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
 242		return -ENODEV;
 243
 244	ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
 245	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 246
 247	return ret;
 248}
 249
 250static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
 251{
 252	u32 l;
 253	int count = 0;
 254	int delay = 1000, step = 20;
 255	bool found = false;
 256
 257	do {
 258		found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
 259		count++;
 260
 261		if (found)
 262			break;
 263
 264		msleep(step);
 265		delay -= step;
 266	} while (delay > 0);
 267
 268	if (count > 1)
 269		pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
 270			pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
 271			PCI_FUNC(devfn), count, step, l);
 272
 273	return found;
 274}
 275
 276static void pcie_wait_for_presence(struct pci_dev *pdev)
 277{
 278	int timeout = 1250;
 279	u16 slot_status;
 280
 281	do {
 282		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 283		if (slot_status & PCI_EXP_SLTSTA_PDS)
 284			return;
 285		msleep(10);
 286		timeout -= 10;
 287	} while (timeout > 0);
 288}
 289
 290int pciehp_check_link_status(struct controller *ctrl)
 291{
 292	struct pci_dev *pdev = ctrl_dev(ctrl);
 293	bool found;
 294	u16 lnk_status;
 295
 296	if (!pcie_wait_for_link(pdev, true)) {
 297		ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
 298		return -1;
 299	}
 300
 301	if (ctrl->inband_presence_disabled)
 302		pcie_wait_for_presence(pdev);
 303
 304	found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
 305					PCI_DEVFN(0, 0));
 306
 307	/* ignore link or presence changes up to this point */
 308	if (found)
 309		atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
 310			   &ctrl->pending_events);
 311
 312	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
 313	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 314	if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
 315	    !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
 316		ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n",
 317			  slot_name(ctrl), lnk_status);
 318		return -1;
 319	}
 320
 321	pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
 322
 323	if (!found) {
 324		ctrl_info(ctrl, "Slot(%s): No device found\n",
 325			  slot_name(ctrl));
 326		return -1;
 327	}
 328
 329	return 0;
 330}
 331
 332static int __pciehp_link_set(struct controller *ctrl, bool enable)
 333{
 334	struct pci_dev *pdev = ctrl_dev(ctrl);
 335	u16 lnk_ctrl;
 336
 337	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
 338
 339	if (enable)
 340		lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
 341	else
 342		lnk_ctrl |= PCI_EXP_LNKCTL_LD;
 343
 344	pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
 345	ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
 346	return 0;
 347}
 348
 349static int pciehp_link_enable(struct controller *ctrl)
 350{
 351	return __pciehp_link_set(ctrl, true);
 352}
 353
 354int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
 355				    u8 *status)
 356{
 357	struct controller *ctrl = to_ctrl(hotplug_slot);
 358	struct pci_dev *pdev = ctrl_dev(ctrl);
 359	u16 slot_ctrl;
 360
 361	pci_config_pm_runtime_get(pdev);
 362	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 363	pci_config_pm_runtime_put(pdev);
 364	*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
 365	return 0;
 366}
 367
 368int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
 369{
 370	struct controller *ctrl = to_ctrl(hotplug_slot);
 371	struct pci_dev *pdev = ctrl_dev(ctrl);
 372	u16 slot_ctrl;
 373
 374	pci_config_pm_runtime_get(pdev);
 375	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 376	pci_config_pm_runtime_put(pdev);
 377	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
 378		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 379
 380	switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
 381	case PCI_EXP_SLTCTL_ATTN_IND_ON:
 382		*status = 1;	/* On */
 383		break;
 384	case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
 385		*status = 2;	/* Blink */
 386		break;
 387	case PCI_EXP_SLTCTL_ATTN_IND_OFF:
 388		*status = 0;	/* Off */
 389		break;
 390	default:
 391		*status = 0xFF;
 392		break;
 393	}
 394
 395	return 0;
 396}
 397
 398void pciehp_get_power_status(struct controller *ctrl, u8 *status)
 399{
 400	struct pci_dev *pdev = ctrl_dev(ctrl);
 401	u16 slot_ctrl;
 402
 403	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 404	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
 405		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 406
 407	switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
 408	case PCI_EXP_SLTCTL_PWR_ON:
 409		*status = 1;	/* On */
 410		break;
 411	case PCI_EXP_SLTCTL_PWR_OFF:
 412		*status = 0;	/* Off */
 413		break;
 414	default:
 415		*status = 0xFF;
 416		break;
 417	}
 418}
 419
 420void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
 421{
 422	struct pci_dev *pdev = ctrl_dev(ctrl);
 423	u16 slot_status;
 424
 425	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 426	*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
 427}
 428
 429/**
 430 * pciehp_card_present() - Is the card present
 431 * @ctrl: PCIe hotplug controller
 432 *
 433 * Function checks whether the card is currently present in the slot and
 434 * in that case returns true. Note it is possible that the card is
 435 * removed immediately after the check so the caller may need to take
 436 * this into account.
 437 *
 438 * It the hotplug controller itself is not available anymore returns
 439 * %-ENODEV.
 440 */
 441int pciehp_card_present(struct controller *ctrl)
 442{
 443	struct pci_dev *pdev = ctrl_dev(ctrl);
 444	u16 slot_status;
 445	int ret;
 446
 447	ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 448	if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(slot_status))
 449		return -ENODEV;
 450
 451	return !!(slot_status & PCI_EXP_SLTSTA_PDS);
 452}
 453
 454/**
 455 * pciehp_card_present_or_link_active() - whether given slot is occupied
 456 * @ctrl: PCIe hotplug controller
 457 *
 458 * Unlike pciehp_card_present(), which determines presence solely from the
 459 * Presence Detect State bit, this helper also returns true if the Link Active
 460 * bit is set.  This is a concession to broken hotplug ports which hardwire
 461 * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
 462 *
 463 * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
 464 *	    port is not present anymore returns %-ENODEV.
 465 */
 466int pciehp_card_present_or_link_active(struct controller *ctrl)
 467{
 468	int ret;
 469
 470	ret = pciehp_card_present(ctrl);
 471	if (ret)
 472		return ret;
 473
 474	return pciehp_check_link_active(ctrl);
 475}
 476
 477int pciehp_query_power_fault(struct controller *ctrl)
 478{
 479	struct pci_dev *pdev = ctrl_dev(ctrl);
 480	u16 slot_status;
 481
 482	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 483	return !!(slot_status & PCI_EXP_SLTSTA_PFD);
 484}
 485
 486int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
 487				    u8 status)
 488{
 489	struct controller *ctrl = to_ctrl(hotplug_slot);
 490	struct pci_dev *pdev = ctrl_dev(ctrl);
 491
 492	pci_config_pm_runtime_get(pdev);
 493	pcie_write_cmd_nowait(ctrl, status << 6,
 494			      PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
 495	pci_config_pm_runtime_put(pdev);
 496	return 0;
 497}
 498
 499/**
 500 * pciehp_set_indicators() - set attention indicator, power indicator, or both
 501 * @ctrl: PCIe hotplug controller
 502 * @pwr: one of:
 503 *	PCI_EXP_SLTCTL_PWR_IND_ON
 504 *	PCI_EXP_SLTCTL_PWR_IND_BLINK
 505 *	PCI_EXP_SLTCTL_PWR_IND_OFF
 506 * @attn: one of:
 507 *	PCI_EXP_SLTCTL_ATTN_IND_ON
 508 *	PCI_EXP_SLTCTL_ATTN_IND_BLINK
 509 *	PCI_EXP_SLTCTL_ATTN_IND_OFF
 510 *
 511 * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator
 512 * unchanged.
 513 */
 514void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
 515{
 516	u16 cmd = 0, mask = 0;
 517
 518	if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
 519		cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
 520		mask |= PCI_EXP_SLTCTL_PIC;
 521	}
 522
 523	if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
 524		cmd |= (attn & PCI_EXP_SLTCTL_AIC);
 525		mask |= PCI_EXP_SLTCTL_AIC;
 526	}
 527
 528	if (cmd) {
 529		pcie_write_cmd_nowait(ctrl, cmd, mask);
 530		ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 531			 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 532	}
 533}
 534
 535int pciehp_power_on_slot(struct controller *ctrl)
 536{
 537	struct pci_dev *pdev = ctrl_dev(ctrl);
 538	u16 slot_status;
 539	int retval;
 540
 541	/* Clear power-fault bit from previous power failures */
 542	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 543	if (slot_status & PCI_EXP_SLTSTA_PFD)
 544		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 545					   PCI_EXP_SLTSTA_PFD);
 546	ctrl->power_fault_detected = 0;
 547
 548	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
 549	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 550		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
 551		 PCI_EXP_SLTCTL_PWR_ON);
 552
 553	retval = pciehp_link_enable(ctrl);
 554	if (retval)
 555		ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
 556
 557	return retval;
 558}
 559
 560void pciehp_power_off_slot(struct controller *ctrl)
 561{
 562	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
 563	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 564		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
 565		 PCI_EXP_SLTCTL_PWR_OFF);
 566}
 567
 568static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
 569					  struct pci_dev *pdev, int irq)
 570{
 571	/*
 572	 * Ignore link changes which occurred while waiting for DPC recovery.
 573	 * Could be several if DPC triggered multiple times consecutively.
 574	 */
 575	synchronize_hardirq(irq);
 576	atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
 577	if (pciehp_poll_mode)
 578		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 579					   PCI_EXP_SLTSTA_DLLSC);
 580	ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
 581		  slot_name(ctrl));
 582
 583	/*
 584	 * If the link is unexpectedly down after successful recovery,
 585	 * the corresponding link change may have been ignored above.
 586	 * Synthesize it to ensure that it is acted on.
 587	 */
 588	down_read_nested(&ctrl->reset_lock, ctrl->depth);
 589	if (!pciehp_check_link_active(ctrl))
 590		pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
 591	up_read(&ctrl->reset_lock);
 592}
 593
 594static irqreturn_t pciehp_isr(int irq, void *dev_id)
 595{
 596	struct controller *ctrl = (struct controller *)dev_id;
 597	struct pci_dev *pdev = ctrl_dev(ctrl);
 598	struct device *parent = pdev->dev.parent;
 599	u16 status, events = 0;
 600
 601	/*
 602	 * Interrupts only occur in D3hot or shallower and only if enabled
 603	 * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
 604	 */
 605	if (pdev->current_state == PCI_D3cold ||
 606	    (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
 607		return IRQ_NONE;
 608
 609	/*
 610	 * Keep the port accessible by holding a runtime PM ref on its parent.
 611	 * Defer resume of the parent to the IRQ thread if it's suspended.
 612	 * Mask the interrupt until then.
 613	 */
 614	if (parent) {
 615		pm_runtime_get_noresume(parent);
 616		if (!pm_runtime_active(parent)) {
 617			pm_runtime_put(parent);
 618			disable_irq_nosync(irq);
 619			atomic_or(RERUN_ISR, &ctrl->pending_events);
 620			return IRQ_WAKE_THREAD;
 621		}
 622	}
 623
 624read_status:
 625	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
 626	if (PCI_POSSIBLE_ERROR(status)) {
 627		ctrl_info(ctrl, "%s: no response from device\n", __func__);
 628		if (parent)
 629			pm_runtime_put(parent);
 630		return IRQ_NONE;
 631	}
 632
 633	/*
 634	 * Slot Status contains plain status bits as well as event
 635	 * notification bits; right now we only want the event bits.
 636	 */
 637	status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
 638		  PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
 639		  PCI_EXP_SLTSTA_DLLSC;
 640
 641	/*
 642	 * If we've already reported a power fault, don't report it again
 643	 * until we've done something to handle it.
 644	 */
 645	if (ctrl->power_fault_detected)
 646		status &= ~PCI_EXP_SLTSTA_PFD;
 647	else if (status & PCI_EXP_SLTSTA_PFD)
 648		ctrl->power_fault_detected = true;
 649
 650	events |= status;
 651	if (!events) {
 652		if (parent)
 653			pm_runtime_put(parent);
 654		return IRQ_NONE;
 655	}
 656
 657	if (status) {
 658		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
 659
 660		/*
 661		 * In MSI mode, all event bits must be zero before the port
 662		 * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
 663		 * So re-read the Slot Status register in case a bit was set
 664		 * between read and write.
 665		 */
 666		if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
 667			goto read_status;
 668	}
 669
 670	ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
 671	if (parent)
 672		pm_runtime_put(parent);
 673
 674	/*
 675	 * Command Completed notifications are not deferred to the
 676	 * IRQ thread because it may be waiting for their arrival.
 677	 */
 678	if (events & PCI_EXP_SLTSTA_CC) {
 679		ctrl->cmd_busy = 0;
 680		smp_mb();
 681		wake_up(&ctrl->queue);
 682
 683		if (events == PCI_EXP_SLTSTA_CC)
 684			return IRQ_HANDLED;
 685
 686		events &= ~PCI_EXP_SLTSTA_CC;
 687	}
 688
 689	if (pdev->ignore_hotplug) {
 690		ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
 691		return IRQ_HANDLED;
 692	}
 693
 694	/* Save pending events for consumption by IRQ thread. */
 695	atomic_or(events, &ctrl->pending_events);
 696	return IRQ_WAKE_THREAD;
 697}
 698
 699static irqreturn_t pciehp_ist(int irq, void *dev_id)
 700{
 701	struct controller *ctrl = (struct controller *)dev_id;
 702	struct pci_dev *pdev = ctrl_dev(ctrl);
 703	irqreturn_t ret;
 704	u32 events;
 705
 706	ctrl->ist_running = true;
 707	pci_config_pm_runtime_get(pdev);
 708
 709	/* rerun pciehp_isr() if the port was inaccessible on interrupt */
 710	if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
 711		ret = pciehp_isr(irq, dev_id);
 712		enable_irq(irq);
 713		if (ret != IRQ_WAKE_THREAD)
 714			goto out;
 
 
 715	}
 716
 717	synchronize_hardirq(irq);
 718	events = atomic_xchg(&ctrl->pending_events, 0);
 719	if (!events) {
 720		ret = IRQ_NONE;
 721		goto out;
 722	}
 723
 724	/* Check Attention Button Pressed */
 725	if (events & PCI_EXP_SLTSTA_ABP) {
 726		ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
 727			  slot_name(ctrl));
 728		pciehp_handle_button_press(ctrl);
 729	}
 730
 731	/* Check Power Fault Detected */
 732	if (events & PCI_EXP_SLTSTA_PFD) {
 
 733		ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
 734		pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
 735				      PCI_EXP_SLTCTL_ATTN_IND_ON);
 736	}
 737
 738	/*
 739	 * Ignore Link Down/Up events caused by Downstream Port Containment
 740	 * if recovery from the error succeeded.
 741	 */
 742	if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
 743	    ctrl->state == ON_STATE) {
 744		events &= ~PCI_EXP_SLTSTA_DLLSC;
 745		pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
 746	}
 747
 748	/*
 749	 * Disable requests have higher priority than Presence Detect Changed
 750	 * or Data Link Layer State Changed events.
 751	 */
 752	down_read_nested(&ctrl->reset_lock, ctrl->depth);
 753	if (events & DISABLE_SLOT)
 754		pciehp_handle_disable_request(ctrl);
 755	else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
 756		pciehp_handle_presence_or_link_change(ctrl, events);
 757	up_read(&ctrl->reset_lock);
 758
 759	ret = IRQ_HANDLED;
 760out:
 761	pci_config_pm_runtime_put(pdev);
 762	ctrl->ist_running = false;
 763	wake_up(&ctrl->requester);
 764	return ret;
 765}
 766
 767static int pciehp_poll(void *data)
 768{
 769	struct controller *ctrl = data;
 770
 771	schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
 772
 773	while (!kthread_should_stop()) {
 774		/* poll for interrupt events or user requests */
 775		while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
 776		       atomic_read(&ctrl->pending_events))
 777			pciehp_ist(IRQ_NOTCONNECTED, ctrl);
 778
 779		if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
 780			pciehp_poll_time = 2; /* clamp to sane value */
 781
 782		schedule_timeout_idle(pciehp_poll_time * HZ);
 783	}
 784
 785	return 0;
 786}
 787
 788static void pcie_enable_notification(struct controller *ctrl)
 789{
 790	u16 cmd, mask;
 791
 792	/*
 793	 * TBD: Power fault detected software notification support.
 794	 *
 795	 * Power fault detected software notification is not enabled
 796	 * now, because it caused power fault detected interrupt storm
 797	 * on some machines. On those machines, power fault detected
 798	 * bit in the slot status register was set again immediately
 799	 * when it is cleared in the interrupt service routine, and
 800	 * next power fault detected interrupt was notified again.
 801	 */
 802
 803	/*
 804	 * Always enable link events: thus link-up and link-down shall
 805	 * always be treated as hotplug and unplug respectively. Enable
 806	 * presence detect only if Attention Button is not present.
 807	 */
 808	cmd = PCI_EXP_SLTCTL_DLLSCE;
 809	if (ATTN_BUTTN(ctrl))
 810		cmd |= PCI_EXP_SLTCTL_ABPE;
 811	else
 812		cmd |= PCI_EXP_SLTCTL_PDCE;
 813	if (!pciehp_poll_mode)
 814		cmd |= PCI_EXP_SLTCTL_HPIE;
 815	if (!pciehp_poll_mode && !NO_CMD_CMPL(ctrl))
 816		cmd |= PCI_EXP_SLTCTL_CCIE;
 817
 818	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
 819		PCI_EXP_SLTCTL_PFDE |
 820		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 821		PCI_EXP_SLTCTL_DLLSCE);
 822
 823	pcie_write_cmd_nowait(ctrl, cmd, mask);
 824	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 825		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 826}
 827
 828static void pcie_disable_notification(struct controller *ctrl)
 829{
 830	u16 mask;
 831
 832	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
 833		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
 834		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 835		PCI_EXP_SLTCTL_DLLSCE);
 836	pcie_write_cmd(ctrl, 0, mask);
 837	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 838		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 839}
 840
 841void pcie_clear_hotplug_events(struct controller *ctrl)
 842{
 843	pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
 844				   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
 845}
 846
 847void pcie_enable_interrupt(struct controller *ctrl)
 848{
 849	u16 mask;
 850
 851	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
 852	pcie_write_cmd(ctrl, mask, mask);
 853}
 854
 855void pcie_disable_interrupt(struct controller *ctrl)
 856{
 857	u16 mask;
 858
 859	/*
 860	 * Mask hot-plug interrupt to prevent it triggering immediately
 861	 * when the link goes inactive (we still get PME when any of the
 862	 * enabled events is detected). Same goes with Link Layer State
 863	 * changed event which generates PME immediately when the link goes
 864	 * inactive so mask it as well.
 865	 */
 866	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
 867	pcie_write_cmd(ctrl, 0, mask);
 868}
 869
 870/**
 871 * pciehp_slot_reset() - ignore link event caused by error-induced hot reset
 872 * @dev: PCI Express port service device
 873 *
 874 * Called from pcie_portdrv_slot_reset() after AER or DPC initiated a reset
 875 * further up in the hierarchy to recover from an error.  The reset was
 876 * propagated down to this hotplug port.  Ignore the resulting link flap.
 877 * If the link failed to retrain successfully, synthesize the ignored event.
 878 * Surprise removal during reset is detected through Presence Detect Changed.
 879 */
 880int pciehp_slot_reset(struct pcie_device *dev)
 881{
 882	struct controller *ctrl = get_service_data(dev);
 883
 884	if (ctrl->state != ON_STATE)
 885		return 0;
 886
 887	pcie_capability_write_word(dev->port, PCI_EXP_SLTSTA,
 888				   PCI_EXP_SLTSTA_DLLSC);
 889
 890	if (!pciehp_check_link_active(ctrl))
 891		pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
 892
 893	return 0;
 894}
 895
 896/*
 897 * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
 898 * bus reset of the bridge, but at the same time we want to ensure that it is
 899 * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
 900 * disable link state notification and presence detection change notification
 901 * momentarily, if we see that they could interfere. Also, clear any spurious
 902 * events after.
 903 */
 904int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
 905{
 906	struct controller *ctrl = to_ctrl(hotplug_slot);
 907	struct pci_dev *pdev = ctrl_dev(ctrl);
 908	u16 stat_mask = 0, ctrl_mask = 0;
 909	int rc;
 910
 911	if (probe)
 912		return 0;
 913
 914	down_write_nested(&ctrl->reset_lock, ctrl->depth);
 915
 916	if (!ATTN_BUTTN(ctrl)) {
 917		ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
 918		stat_mask |= PCI_EXP_SLTSTA_PDC;
 919	}
 920	ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
 921	stat_mask |= PCI_EXP_SLTSTA_DLLSC;
 922
 923	pcie_write_cmd(ctrl, 0, ctrl_mask);
 924	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 925		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 926
 927	rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
 928
 929	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
 930	pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
 931	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 932		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
 933
 934	up_write(&ctrl->reset_lock);
 935	return rc;
 936}
 937
 938int pcie_init_notification(struct controller *ctrl)
 939{
 940	if (pciehp_request_irq(ctrl))
 941		return -1;
 942	pcie_enable_notification(ctrl);
 943	ctrl->notification_enabled = 1;
 944	return 0;
 945}
 946
 947void pcie_shutdown_notification(struct controller *ctrl)
 948{
 949	if (ctrl->notification_enabled) {
 950		pcie_disable_notification(ctrl);
 951		pciehp_free_irq(ctrl);
 952		ctrl->notification_enabled = 0;
 953	}
 954}
 955
 956static inline void dbg_ctrl(struct controller *ctrl)
 957{
 958	struct pci_dev *pdev = ctrl->pcie->port;
 959	u16 reg16;
 960
 961	ctrl_dbg(ctrl, "Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
 962	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
 963	ctrl_dbg(ctrl, "Slot Status            : 0x%04x\n", reg16);
 964	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
 965	ctrl_dbg(ctrl, "Slot Control           : 0x%04x\n", reg16);
 966}
 967
 968#define FLAG(x, y)	(((x) & (y)) ? '+' : '-')
 969
 970static inline int pcie_hotplug_depth(struct pci_dev *dev)
 971{
 972	struct pci_bus *bus = dev->bus;
 973	int depth = 0;
 974
 975	while (bus->parent) {
 976		bus = bus->parent;
 977		if (bus->self && bus->self->is_hotplug_bridge)
 978			depth++;
 979	}
 980
 981	return depth;
 982}
 983
 984struct controller *pcie_init(struct pcie_device *dev)
 985{
 986	struct controller *ctrl;
 987	u32 slot_cap, slot_cap2, link_cap;
 988	u8 poweron;
 989	struct pci_dev *pdev = dev->port;
 990	struct pci_bus *subordinate = pdev->subordinate;
 991
 992	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
 993	if (!ctrl)
 994		return NULL;
 995
 996	ctrl->pcie = dev;
 997	ctrl->depth = pcie_hotplug_depth(dev->port);
 998	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
 999
1000	if (pdev->hotplug_user_indicators)
1001		slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
1002
1003	/*
1004	 * We assume no Thunderbolt controllers support Command Complete events,
1005	 * but some controllers falsely claim they do.
1006	 */
1007	if (pdev->is_thunderbolt)
1008		slot_cap |= PCI_EXP_SLTCAP_NCCS;
1009
1010	ctrl->slot_cap = slot_cap;
1011	mutex_init(&ctrl->ctrl_lock);
1012	mutex_init(&ctrl->state_lock);
1013	init_rwsem(&ctrl->reset_lock);
1014	init_waitqueue_head(&ctrl->requester);
1015	init_waitqueue_head(&ctrl->queue);
1016	INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
1017	dbg_ctrl(ctrl);
1018
1019	down_read(&pci_bus_sem);
1020	ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
1021	up_read(&pci_bus_sem);
1022
1023	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2);
1024	if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) {
1025		pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE,
1026				      PCI_EXP_SLTCTL_IBPD_DISABLE);
1027		ctrl->inband_presence_disabled = 1;
1028	}
1029
1030	if (dmi_first_match(inband_presence_disabled_dmi_table))
1031		ctrl->inband_presence_disabled = 1;
1032
1033	/* Check if Data Link Layer Link Active Reporting is implemented */
1034	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
1035
1036	/* Clear all remaining event bits in Slot Status register. */
1037	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
1038		PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
1039		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
1040		PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
1041
1042	ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n",
1043		(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
1044		FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
1045		FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
1046		FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
1047		FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
1048		FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
1049		FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
1050		FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
1051		FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
1052		FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
1053		FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD),
1054		FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
1055		pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
1056
1057	/*
1058	 * If empty slot's power status is on, turn power off.  The IRQ isn't
1059	 * requested yet, so avoid triggering a notification with this command.
1060	 */
1061	if (POWER_CTRL(ctrl)) {
1062		pciehp_get_power_status(ctrl, &poweron);
1063		if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
1064			pcie_disable_notification(ctrl);
1065			pciehp_power_off_slot(ctrl);
1066		}
1067	}
1068
1069	return ctrl;
1070}
1071
1072void pciehp_release_ctrl(struct controller *ctrl)
1073{
1074	cancel_delayed_work_sync(&ctrl->button_work);
1075	kfree(ctrl);
1076}
1077
1078static void quirk_cmd_compl(struct pci_dev *pdev)
1079{
1080	u32 slot_cap;
1081
1082	if (pci_is_pcie(pdev)) {
1083		pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
1084		if (slot_cap & PCI_EXP_SLTCAP_HPC &&
1085		    !(slot_cap & PCI_EXP_SLTCAP_NCCS))
1086			pdev->broken_cmd_compl = 1;
1087	}
1088}
1089DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1090			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1091DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
1092			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1093DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
1094			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1095DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
1096			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1097DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
1098			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);