Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * PCI Express PCI Hot Plug Driver
  4 *
  5 * Copyright (C) 1995,2001 Compaq Computer Corporation
  6 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
  7 * Copyright (C) 2001 IBM Corp.
  8 * Copyright (C) 2003-2004 Intel Corporation
  9 *
 10 * All rights reserved.
 11 *
 12 * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
 13 */
 14
 15#define dev_fmt(fmt) "pciehp: " fmt
 16
 
 
 17#include <linux/kernel.h>
 18#include <linux/types.h>
 19#include <linux/jiffies.h>
 20#include <linux/kthread.h>
 21#include <linux/pci.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/interrupt.h>
 24#include <linux/slab.h>
 25
 26#include "../pci.h"
 27#include "pciehp.h"
 28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
 30{
 31	return ctrl->pcie->port;
 32}
 33
 34static irqreturn_t pciehp_isr(int irq, void *dev_id);
 35static irqreturn_t pciehp_ist(int irq, void *dev_id);
 36static int pciehp_poll(void *data);
 37
 38static inline int pciehp_request_irq(struct controller *ctrl)
 39{
 40	int retval, irq = ctrl->pcie->irq;
 41
 42	if (pciehp_poll_mode) {
 43		ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
 44						"pciehp_poll-%s",
 45						slot_name(ctrl));
 46		return PTR_ERR_OR_ZERO(ctrl->poll_thread);
 47	}
 48
 49	/* Installs the interrupt handler */
 50	retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
 51				      IRQF_SHARED, "pciehp", ctrl);
 52	if (retval)
 53		ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
 54			 irq);
 55	return retval;
 56}
 57
 58static inline void pciehp_free_irq(struct controller *ctrl)
 59{
 60	if (pciehp_poll_mode)
 61		kthread_stop(ctrl->poll_thread);
 62	else
 63		free_irq(ctrl->pcie->irq, ctrl);
 64}
 65
 66static int pcie_poll_cmd(struct controller *ctrl, int timeout)
 67{
 68	struct pci_dev *pdev = ctrl_dev(ctrl);
 69	u16 slot_status;
 70
 71	while (true) {
 72		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 73		if (slot_status == (u16) ~0) {
 74			ctrl_info(ctrl, "%s: no response from device\n",
 75				  __func__);
 76			return 0;
 77		}
 78
 79		if (slot_status & PCI_EXP_SLTSTA_CC) {
 80			pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 81						   PCI_EXP_SLTSTA_CC);
 
 
 82			return 1;
 83		}
 84		if (timeout < 0)
 85			break;
 86		msleep(10);
 87		timeout -= 10;
 88	}
 89	return 0;	/* timeout */
 90}
 91
 92static void pcie_wait_cmd(struct controller *ctrl)
 93{
 94	unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
 95	unsigned long duration = msecs_to_jiffies(msecs);
 96	unsigned long cmd_timeout = ctrl->cmd_started + duration;
 97	unsigned long now, timeout;
 98	int rc;
 99
100	/*
101	 * If the controller does not generate notifications for command
102	 * completions, we never need to wait between writes.
103	 */
104	if (NO_CMD_CMPL(ctrl))
105		return;
106
107	if (!ctrl->cmd_busy)
108		return;
109
110	/*
111	 * Even if the command has already timed out, we want to call
112	 * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
113	 */
114	now = jiffies;
115	if (time_before_eq(cmd_timeout, now))
116		timeout = 1;
117	else
118		timeout = cmd_timeout - now;
119
120	if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
121	    ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
122		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
123	else
124		rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
125
126	if (!rc)
127		ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
128			  ctrl->slot_ctrl,
129			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
130}
131
132#define CC_ERRATUM_MASK		(PCI_EXP_SLTCTL_PCC |	\
133				 PCI_EXP_SLTCTL_PIC |	\
134				 PCI_EXP_SLTCTL_AIC |	\
135				 PCI_EXP_SLTCTL_EIC)
136
137static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
138			      u16 mask, bool wait)
139{
140	struct pci_dev *pdev = ctrl_dev(ctrl);
141	u16 slot_ctrl_orig, slot_ctrl;
142
143	mutex_lock(&ctrl->ctrl_lock);
144
145	/*
146	 * Always wait for any previous command that might still be in progress
147	 */
148	pcie_wait_cmd(ctrl);
149
150	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
151	if (slot_ctrl == (u16) ~0) {
152		ctrl_info(ctrl, "%s: no response from device\n", __func__);
153		goto out;
154	}
155
156	slot_ctrl_orig = slot_ctrl;
157	slot_ctrl &= ~mask;
158	slot_ctrl |= (cmd & mask);
159	ctrl->cmd_busy = 1;
160	smp_mb();
161	ctrl->slot_ctrl = slot_ctrl;
162	pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
163	ctrl->cmd_started = jiffies;
164
165	/*
166	 * Controllers with the Intel CF118 and similar errata advertise
167	 * Command Completed support, but they only set Command Completed
168	 * if we change the "Control" bits for power, power indicator,
169	 * attention indicator, or interlock.  If we only change the
170	 * "Enable" bits, they never set the Command Completed bit.
171	 */
172	if (pdev->broken_cmd_compl &&
173	    (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
174		ctrl->cmd_busy = 0;
175
176	/*
177	 * Optionally wait for the hardware to be ready for a new command,
178	 * indicating completion of the above issued command.
179	 */
180	if (wait)
181		pcie_wait_cmd(ctrl);
182
183out:
184	mutex_unlock(&ctrl->ctrl_lock);
185}
186
187/**
188 * pcie_write_cmd - Issue controller command
189 * @ctrl: controller to which the command is issued
190 * @cmd:  command value written to slot control register
191 * @mask: bitmask of slot control register to be modified
192 */
193static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
194{
195	pcie_do_write_cmd(ctrl, cmd, mask, true);
196}
197
198/* Same as above without waiting for the hardware to latch */
199static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
200{
201	pcie_do_write_cmd(ctrl, cmd, mask, false);
202}
203
204bool pciehp_check_link_active(struct controller *ctrl)
 
 
 
 
 
 
 
 
 
 
 
205{
206	struct pci_dev *pdev = ctrl_dev(ctrl);
207	u16 lnk_status;
208	bool ret;
209
210	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
211	ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
 
212
213	if (ret)
214		ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
215
216	return ret;
217}
218
219static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
220{
221	u32 l;
222	int count = 0;
223	int delay = 1000, step = 20;
224	bool found = false;
225
226	do {
227		found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
228		count++;
229
230		if (found)
231			break;
232
233		msleep(step);
234		delay -= step;
235	} while (delay > 0);
236
237	if (count > 1)
238		pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
239			pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
240			PCI_FUNC(devfn), count, step, l);
241
242	return found;
243}
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245int pciehp_check_link_status(struct controller *ctrl)
246{
247	struct pci_dev *pdev = ctrl_dev(ctrl);
248	bool found;
249	u16 lnk_status;
250
251	if (!pcie_wait_for_link(pdev, true))
 
252		return -1;
 
 
 
 
253
254	found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
255					PCI_DEVFN(0, 0));
256
257	/* ignore link or presence changes up to this point */
258	if (found)
259		atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
260			   &ctrl->pending_events);
261
262	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
263	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
264	if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
265	    !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
266		ctrl_err(ctrl, "link training error: status %#06x\n",
267			 lnk_status);
268		return -1;
269	}
270
271	pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
272
273	if (!found)
 
 
274		return -1;
 
275
276	return 0;
277}
278
279static int __pciehp_link_set(struct controller *ctrl, bool enable)
280{
281	struct pci_dev *pdev = ctrl_dev(ctrl);
282	u16 lnk_ctrl;
283
284	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
 
 
285
286	if (enable)
287		lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
288	else
289		lnk_ctrl |= PCI_EXP_LNKCTL_LD;
290
291	pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
292	ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
293	return 0;
294}
295
296static int pciehp_link_enable(struct controller *ctrl)
297{
298	return __pciehp_link_set(ctrl, true);
299}
300
301int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
302				    u8 *status)
303{
304	struct controller *ctrl = to_ctrl(hotplug_slot);
305	struct pci_dev *pdev = ctrl_dev(ctrl);
306	u16 slot_ctrl;
307
308	pci_config_pm_runtime_get(pdev);
309	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
310	pci_config_pm_runtime_put(pdev);
311	*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
312	return 0;
313}
314
315int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
316{
317	struct controller *ctrl = to_ctrl(hotplug_slot);
318	struct pci_dev *pdev = ctrl_dev(ctrl);
319	u16 slot_ctrl;
320
321	pci_config_pm_runtime_get(pdev);
322	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
323	pci_config_pm_runtime_put(pdev);
324	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
325		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
326
327	switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
328	case PCI_EXP_SLTCTL_ATTN_IND_ON:
329		*status = 1;	/* On */
330		break;
331	case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
332		*status = 2;	/* Blink */
333		break;
334	case PCI_EXP_SLTCTL_ATTN_IND_OFF:
335		*status = 0;	/* Off */
336		break;
337	default:
338		*status = 0xFF;
339		break;
340	}
341
342	return 0;
343}
344
345void pciehp_get_power_status(struct controller *ctrl, u8 *status)
346{
347	struct pci_dev *pdev = ctrl_dev(ctrl);
348	u16 slot_ctrl;
349
350	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
351	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
352		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
353
354	switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
355	case PCI_EXP_SLTCTL_PWR_ON:
356		*status = 1;	/* On */
357		break;
358	case PCI_EXP_SLTCTL_PWR_OFF:
359		*status = 0;	/* Off */
360		break;
361	default:
362		*status = 0xFF;
363		break;
364	}
365}
366
367void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
368{
369	struct pci_dev *pdev = ctrl_dev(ctrl);
370	u16 slot_status;
371
372	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
373	*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
374}
375
376bool pciehp_card_present(struct controller *ctrl)
 
 
 
 
 
 
 
 
 
 
 
 
377{
378	struct pci_dev *pdev = ctrl_dev(ctrl);
379	u16 slot_status;
 
380
381	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
382	return slot_status & PCI_EXP_SLTSTA_PDS;
 
 
 
383}
384
385/**
386 * pciehp_card_present_or_link_active() - whether given slot is occupied
387 * @ctrl: PCIe hotplug controller
388 *
389 * Unlike pciehp_card_present(), which determines presence solely from the
390 * Presence Detect State bit, this helper also returns true if the Link Active
391 * bit is set.  This is a concession to broken hotplug ports which hardwire
392 * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
 
 
 
393 */
394bool pciehp_card_present_or_link_active(struct controller *ctrl)
395{
396	return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
 
 
 
 
 
 
397}
398
399int pciehp_query_power_fault(struct controller *ctrl)
400{
401	struct pci_dev *pdev = ctrl_dev(ctrl);
402	u16 slot_status;
403
404	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
405	return !!(slot_status & PCI_EXP_SLTSTA_PFD);
406}
407
408int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
409				    u8 status)
410{
411	struct controller *ctrl = to_ctrl(hotplug_slot);
412	struct pci_dev *pdev = ctrl_dev(ctrl);
413
414	pci_config_pm_runtime_get(pdev);
415	pcie_write_cmd_nowait(ctrl, status << 6,
 
 
416			      PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
417	pci_config_pm_runtime_put(pdev);
418	return 0;
419}
420
421/**
422 * pciehp_set_indicators() - set attention indicator, power indicator, or both
423 * @ctrl: PCIe hotplug controller
424 * @pwr: one of:
425 *	PCI_EXP_SLTCTL_PWR_IND_ON
426 *	PCI_EXP_SLTCTL_PWR_IND_BLINK
427 *	PCI_EXP_SLTCTL_PWR_IND_OFF
428 * @attn: one of:
429 *	PCI_EXP_SLTCTL_ATTN_IND_ON
430 *	PCI_EXP_SLTCTL_ATTN_IND_BLINK
431 *	PCI_EXP_SLTCTL_ATTN_IND_OFF
432 *
433 * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator
434 * unchanged.
435 */
436void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
437{
438	u16 cmd = 0, mask = 0;
439
440	if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
441		cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
442		mask |= PCI_EXP_SLTCTL_PIC;
443	}
444
445	if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
446		cmd |= (attn & PCI_EXP_SLTCTL_AIC);
447		mask |= PCI_EXP_SLTCTL_AIC;
448	}
449
450	if (cmd) {
451		pcie_write_cmd_nowait(ctrl, cmd, mask);
452		ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
453			 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
454	}
455}
456
457int pciehp_power_on_slot(struct controller *ctrl)
458{
459	struct pci_dev *pdev = ctrl_dev(ctrl);
460	u16 slot_status;
461	int retval;
462
463	/* Clear power-fault bit from previous power failures */
464	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
465	if (slot_status & PCI_EXP_SLTSTA_PFD)
466		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
467					   PCI_EXP_SLTSTA_PFD);
468	ctrl->power_fault_detected = 0;
469
470	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
471	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
472		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
473		 PCI_EXP_SLTCTL_PWR_ON);
474
475	retval = pciehp_link_enable(ctrl);
476	if (retval)
477		ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
478
479	return retval;
480}
481
482void pciehp_power_off_slot(struct controller *ctrl)
483{
484	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
485	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
486		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
487		 PCI_EXP_SLTCTL_PWR_OFF);
488}
489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490static irqreturn_t pciehp_isr(int irq, void *dev_id)
491{
492	struct controller *ctrl = (struct controller *)dev_id;
493	struct pci_dev *pdev = ctrl_dev(ctrl);
494	struct device *parent = pdev->dev.parent;
495	u16 status, events;
496
497	/*
498	 * Interrupts only occur in D3hot or shallower and only if enabled
499	 * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
500	 */
501	if (pdev->current_state == PCI_D3cold ||
502	    (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
503		return IRQ_NONE;
504
505	/*
506	 * Keep the port accessible by holding a runtime PM ref on its parent.
507	 * Defer resume of the parent to the IRQ thread if it's suspended.
508	 * Mask the interrupt until then.
509	 */
510	if (parent) {
511		pm_runtime_get_noresume(parent);
512		if (!pm_runtime_active(parent)) {
513			pm_runtime_put(parent);
514			disable_irq_nosync(irq);
515			atomic_or(RERUN_ISR, &ctrl->pending_events);
516			return IRQ_WAKE_THREAD;
517		}
518	}
519
 
520	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
521	if (status == (u16) ~0) {
522		ctrl_info(ctrl, "%s: no response from device\n", __func__);
523		if (parent)
524			pm_runtime_put(parent);
525		return IRQ_NONE;
526	}
527
528	/*
529	 * Slot Status contains plain status bits as well as event
530	 * notification bits; right now we only want the event bits.
531	 */
532	events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
533			   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
534			   PCI_EXP_SLTSTA_DLLSC);
535
536	/*
537	 * If we've already reported a power fault, don't report it again
538	 * until we've done something to handle it.
539	 */
540	if (ctrl->power_fault_detected)
541		events &= ~PCI_EXP_SLTSTA_PFD;
 
 
542
 
543	if (!events) {
544		if (parent)
545			pm_runtime_put(parent);
546		return IRQ_NONE;
547	}
548
549	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
 
 
 
 
 
 
 
 
 
 
 
 
550	ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
551	if (parent)
552		pm_runtime_put(parent);
553
554	/*
555	 * Command Completed notifications are not deferred to the
556	 * IRQ thread because it may be waiting for their arrival.
557	 */
558	if (events & PCI_EXP_SLTSTA_CC) {
559		ctrl->cmd_busy = 0;
560		smp_mb();
561		wake_up(&ctrl->queue);
562
563		if (events == PCI_EXP_SLTSTA_CC)
564			return IRQ_HANDLED;
565
566		events &= ~PCI_EXP_SLTSTA_CC;
567	}
568
569	if (pdev->ignore_hotplug) {
570		ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
571		return IRQ_HANDLED;
572	}
573
574	/* Save pending events for consumption by IRQ thread. */
575	atomic_or(events, &ctrl->pending_events);
576	return IRQ_WAKE_THREAD;
577}
578
579static irqreturn_t pciehp_ist(int irq, void *dev_id)
580{
581	struct controller *ctrl = (struct controller *)dev_id;
582	struct pci_dev *pdev = ctrl_dev(ctrl);
583	irqreturn_t ret;
584	u32 events;
585
 
586	pci_config_pm_runtime_get(pdev);
587
588	/* rerun pciehp_isr() if the port was inaccessible on interrupt */
589	if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
590		ret = pciehp_isr(irq, dev_id);
591		enable_irq(irq);
592		if (ret != IRQ_WAKE_THREAD) {
593			pci_config_pm_runtime_put(pdev);
594			return ret;
595		}
596	}
597
598	synchronize_hardirq(irq);
599	events = atomic_xchg(&ctrl->pending_events, 0);
600	if (!events) {
601		pci_config_pm_runtime_put(pdev);
602		return IRQ_NONE;
603	}
604
605	/* Check Attention Button Pressed */
606	if (events & PCI_EXP_SLTSTA_ABP) {
607		ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
608			  slot_name(ctrl));
609		pciehp_handle_button_press(ctrl);
610	}
611
612	/* Check Power Fault Detected */
613	if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
614		ctrl->power_fault_detected = 1;
615		ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
616		pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
617				      PCI_EXP_SLTCTL_ATTN_IND_ON);
618	}
619
620	/*
 
 
 
 
 
 
 
 
 
 
621	 * Disable requests have higher priority than Presence Detect Changed
622	 * or Data Link Layer State Changed events.
623	 */
624	down_read(&ctrl->reset_lock);
625	if (events & DISABLE_SLOT)
626		pciehp_handle_disable_request(ctrl);
627	else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
628		pciehp_handle_presence_or_link_change(ctrl, events);
629	up_read(&ctrl->reset_lock);
630
 
 
631	pci_config_pm_runtime_put(pdev);
 
632	wake_up(&ctrl->requester);
633	return IRQ_HANDLED;
634}
635
636static int pciehp_poll(void *data)
637{
638	struct controller *ctrl = data;
639
640	schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
641
642	while (!kthread_should_stop()) {
643		/* poll for interrupt events or user requests */
644		while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
645		       atomic_read(&ctrl->pending_events))
646			pciehp_ist(IRQ_NOTCONNECTED, ctrl);
647
648		if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
649			pciehp_poll_time = 2; /* clamp to sane value */
650
651		schedule_timeout_idle(pciehp_poll_time * HZ);
652	}
653
654	return 0;
655}
656
657static void pcie_enable_notification(struct controller *ctrl)
658{
659	u16 cmd, mask;
660
661	/*
662	 * TBD: Power fault detected software notification support.
663	 *
664	 * Power fault detected software notification is not enabled
665	 * now, because it caused power fault detected interrupt storm
666	 * on some machines. On those machines, power fault detected
667	 * bit in the slot status register was set again immediately
668	 * when it is cleared in the interrupt service routine, and
669	 * next power fault detected interrupt was notified again.
670	 */
671
672	/*
673	 * Always enable link events: thus link-up and link-down shall
674	 * always be treated as hotplug and unplug respectively. Enable
675	 * presence detect only if Attention Button is not present.
676	 */
677	cmd = PCI_EXP_SLTCTL_DLLSCE;
678	if (ATTN_BUTTN(ctrl))
679		cmd |= PCI_EXP_SLTCTL_ABPE;
680	else
681		cmd |= PCI_EXP_SLTCTL_PDCE;
682	if (!pciehp_poll_mode)
683		cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
 
 
684
685	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
686		PCI_EXP_SLTCTL_PFDE |
687		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
688		PCI_EXP_SLTCTL_DLLSCE);
689
690	pcie_write_cmd_nowait(ctrl, cmd, mask);
691	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
692		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
693}
694
695static void pcie_disable_notification(struct controller *ctrl)
696{
697	u16 mask;
698
699	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
700		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
701		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
702		PCI_EXP_SLTCTL_DLLSCE);
703	pcie_write_cmd(ctrl, 0, mask);
704	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
705		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
706}
707
708void pcie_clear_hotplug_events(struct controller *ctrl)
709{
710	pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
711				   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
712}
713
714void pcie_enable_interrupt(struct controller *ctrl)
715{
716	u16 mask;
717
718	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
719	pcie_write_cmd(ctrl, mask, mask);
720}
721
722void pcie_disable_interrupt(struct controller *ctrl)
723{
724	u16 mask;
725
726	/*
727	 * Mask hot-plug interrupt to prevent it triggering immediately
728	 * when the link goes inactive (we still get PME when any of the
729	 * enabled events is detected). Same goes with Link Layer State
730	 * changed event which generates PME immediately when the link goes
731	 * inactive so mask it as well.
732	 */
733	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
734	pcie_write_cmd(ctrl, 0, mask);
735}
736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
737/*
738 * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
739 * bus reset of the bridge, but at the same time we want to ensure that it is
740 * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
741 * disable link state notification and presence detection change notification
742 * momentarily, if we see that they could interfere. Also, clear any spurious
743 * events after.
744 */
745int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
746{
747	struct controller *ctrl = to_ctrl(hotplug_slot);
748	struct pci_dev *pdev = ctrl_dev(ctrl);
749	u16 stat_mask = 0, ctrl_mask = 0;
750	int rc;
751
752	if (probe)
753		return 0;
754
755	down_write(&ctrl->reset_lock);
756
757	if (!ATTN_BUTTN(ctrl)) {
758		ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
759		stat_mask |= PCI_EXP_SLTSTA_PDC;
760	}
761	ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
762	stat_mask |= PCI_EXP_SLTSTA_DLLSC;
763
764	pcie_write_cmd(ctrl, 0, ctrl_mask);
765	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
766		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
767
768	rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
769
770	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
771	pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
772	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
773		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
774
775	up_write(&ctrl->reset_lock);
776	return rc;
777}
778
779int pcie_init_notification(struct controller *ctrl)
780{
781	if (pciehp_request_irq(ctrl))
782		return -1;
783	pcie_enable_notification(ctrl);
784	ctrl->notification_enabled = 1;
785	return 0;
786}
787
788void pcie_shutdown_notification(struct controller *ctrl)
789{
790	if (ctrl->notification_enabled) {
791		pcie_disable_notification(ctrl);
792		pciehp_free_irq(ctrl);
793		ctrl->notification_enabled = 0;
794	}
795}
796
797static inline void dbg_ctrl(struct controller *ctrl)
798{
799	struct pci_dev *pdev = ctrl->pcie->port;
800	u16 reg16;
801
802	ctrl_dbg(ctrl, "Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
803	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
804	ctrl_dbg(ctrl, "Slot Status            : 0x%04x\n", reg16);
805	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
806	ctrl_dbg(ctrl, "Slot Control           : 0x%04x\n", reg16);
807}
808
809#define FLAG(x, y)	(((x) & (y)) ? '+' : '-')
810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
811struct controller *pcie_init(struct pcie_device *dev)
812{
813	struct controller *ctrl;
814	u32 slot_cap, link_cap;
815	u8 poweron;
816	struct pci_dev *pdev = dev->port;
817	struct pci_bus *subordinate = pdev->subordinate;
818
819	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
820	if (!ctrl)
821		return NULL;
822
823	ctrl->pcie = dev;
 
824	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
825
826	if (pdev->hotplug_user_indicators)
827		slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
828
829	/*
830	 * We assume no Thunderbolt controllers support Command Complete events,
831	 * but some controllers falsely claim they do.
832	 */
833	if (pdev->is_thunderbolt)
834		slot_cap |= PCI_EXP_SLTCAP_NCCS;
835
836	ctrl->slot_cap = slot_cap;
837	mutex_init(&ctrl->ctrl_lock);
838	mutex_init(&ctrl->state_lock);
839	init_rwsem(&ctrl->reset_lock);
840	init_waitqueue_head(&ctrl->requester);
841	init_waitqueue_head(&ctrl->queue);
842	INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
843	dbg_ctrl(ctrl);
844
845	down_read(&pci_bus_sem);
846	ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
847	up_read(&pci_bus_sem);
848
849	/* Check if Data Link Layer Link Active Reporting is implemented */
850	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
 
 
 
 
 
 
 
851
852	/* Clear all remaining event bits in Slot Status register. */
853	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
854		PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
855		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
856		PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
857
858	ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
859		(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
860		FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
861		FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
862		FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
863		FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
864		FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
865		FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
866		FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
867		FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
868		FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
869		FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
 
870		pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
871
872	/*
873	 * If empty slot's power status is on, turn power off.  The IRQ isn't
874	 * requested yet, so avoid triggering a notification with this command.
875	 */
876	if (POWER_CTRL(ctrl)) {
877		pciehp_get_power_status(ctrl, &poweron);
878		if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
879			pcie_disable_notification(ctrl);
880			pciehp_power_off_slot(ctrl);
881		}
882	}
883
 
 
 
 
 
884	return ctrl;
885}
886
887void pciehp_release_ctrl(struct controller *ctrl)
888{
889	cancel_delayed_work_sync(&ctrl->button_work);
890	kfree(ctrl);
891}
892
893static void quirk_cmd_compl(struct pci_dev *pdev)
894{
895	u32 slot_cap;
896
897	if (pci_is_pcie(pdev)) {
898		pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
899		if (slot_cap & PCI_EXP_SLTCAP_HPC &&
900		    !(slot_cap & PCI_EXP_SLTCAP_NCCS))
901			pdev->broken_cmd_compl = 1;
902	}
903}
904DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
 
 
 
 
905			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
906DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
907			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
908DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
909			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
910DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
911			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCI Express PCI Hot Plug Driver
   4 *
   5 * Copyright (C) 1995,2001 Compaq Computer Corporation
   6 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
   7 * Copyright (C) 2001 IBM Corp.
   8 * Copyright (C) 2003-2004 Intel Corporation
   9 *
  10 * All rights reserved.
  11 *
  12 * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
  13 */
  14
  15#define dev_fmt(fmt) "pciehp: " fmt
  16
  17#include <linux/bitfield.h>
  18#include <linux/dmi.h>
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/jiffies.h>
  22#include <linux/kthread.h>
  23#include <linux/pci.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/interrupt.h>
  26#include <linux/slab.h>
  27
  28#include "../pci.h"
  29#include "pciehp.h"
  30
  31static const struct dmi_system_id inband_presence_disabled_dmi_table[] = {
  32	/*
  33	 * Match all Dell systems, as some Dell systems have inband
  34	 * presence disabled on NVMe slots (but don't support the bit to
  35	 * report it). Setting inband presence disabled should have no
  36	 * negative effect, except on broken hotplug slots that never
  37	 * assert presence detect--and those will still work, they will
  38	 * just have a bit of extra delay before being probed.
  39	 */
  40	{
  41		.ident = "Dell System",
  42		.matches = {
  43			DMI_MATCH(DMI_OEM_STRING, "Dell System"),
  44		},
  45	},
  46	{}
  47};
  48
  49static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
  50{
  51	return ctrl->pcie->port;
  52}
  53
  54static irqreturn_t pciehp_isr(int irq, void *dev_id);
  55static irqreturn_t pciehp_ist(int irq, void *dev_id);
  56static int pciehp_poll(void *data);
  57
  58static inline int pciehp_request_irq(struct controller *ctrl)
  59{
  60	int retval, irq = ctrl->pcie->irq;
  61
  62	if (pciehp_poll_mode) {
  63		ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
  64						"pciehp_poll-%s",
  65						slot_name(ctrl));
  66		return PTR_ERR_OR_ZERO(ctrl->poll_thread);
  67	}
  68
  69	/* Installs the interrupt handler */
  70	retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
  71				      IRQF_SHARED, "pciehp", ctrl);
  72	if (retval)
  73		ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
  74			 irq);
  75	return retval;
  76}
  77
  78static inline void pciehp_free_irq(struct controller *ctrl)
  79{
  80	if (pciehp_poll_mode)
  81		kthread_stop(ctrl->poll_thread);
  82	else
  83		free_irq(ctrl->pcie->irq, ctrl);
  84}
  85
  86static int pcie_poll_cmd(struct controller *ctrl, int timeout)
  87{
  88	struct pci_dev *pdev = ctrl_dev(ctrl);
  89	u16 slot_status;
  90
  91	do {
  92		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  93		if (PCI_POSSIBLE_ERROR(slot_status)) {
  94			ctrl_info(ctrl, "%s: no response from device\n",
  95				  __func__);
  96			return 0;
  97		}
  98
  99		if (slot_status & PCI_EXP_SLTSTA_CC) {
 100			pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 101						   PCI_EXP_SLTSTA_CC);
 102			ctrl->cmd_busy = 0;
 103			smp_mb();
 104			return 1;
 105		}
 
 
 106		msleep(10);
 107		timeout -= 10;
 108	} while (timeout >= 0);
 109	return 0;	/* timeout */
 110}
 111
 112static void pcie_wait_cmd(struct controller *ctrl)
 113{
 114	unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
 115	unsigned long duration = msecs_to_jiffies(msecs);
 116	unsigned long cmd_timeout = ctrl->cmd_started + duration;
 117	unsigned long now, timeout;
 118	int rc;
 119
 120	/*
 121	 * If the controller does not generate notifications for command
 122	 * completions, we never need to wait between writes.
 123	 */
 124	if (NO_CMD_CMPL(ctrl))
 125		return;
 126
 127	if (!ctrl->cmd_busy)
 128		return;
 129
 130	/*
 131	 * Even if the command has already timed out, we want to call
 132	 * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
 133	 */
 134	now = jiffies;
 135	if (time_before_eq(cmd_timeout, now))
 136		timeout = 1;
 137	else
 138		timeout = cmd_timeout - now;
 139
 140	if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
 141	    ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
 142		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
 143	else
 144		rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
 145
 146	if (!rc)
 147		ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
 148			  ctrl->slot_ctrl,
 149			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
 150}
 151
 152#define CC_ERRATUM_MASK		(PCI_EXP_SLTCTL_PCC |	\
 153				 PCI_EXP_SLTCTL_PIC |	\
 154				 PCI_EXP_SLTCTL_AIC |	\
 155				 PCI_EXP_SLTCTL_EIC)
 156
 157static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
 158			      u16 mask, bool wait)
 159{
 160	struct pci_dev *pdev = ctrl_dev(ctrl);
 161	u16 slot_ctrl_orig, slot_ctrl;
 162
 163	mutex_lock(&ctrl->ctrl_lock);
 164
 165	/*
 166	 * Always wait for any previous command that might still be in progress
 167	 */
 168	pcie_wait_cmd(ctrl);
 169
 170	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 171	if (PCI_POSSIBLE_ERROR(slot_ctrl)) {
 172		ctrl_info(ctrl, "%s: no response from device\n", __func__);
 173		goto out;
 174	}
 175
 176	slot_ctrl_orig = slot_ctrl;
 177	slot_ctrl &= ~mask;
 178	slot_ctrl |= (cmd & mask);
 179	ctrl->cmd_busy = 1;
 180	smp_mb();
 181	ctrl->slot_ctrl = slot_ctrl;
 182	pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
 183	ctrl->cmd_started = jiffies;
 184
 185	/*
 186	 * Controllers with the Intel CF118 and similar errata advertise
 187	 * Command Completed support, but they only set Command Completed
 188	 * if we change the "Control" bits for power, power indicator,
 189	 * attention indicator, or interlock.  If we only change the
 190	 * "Enable" bits, they never set the Command Completed bit.
 191	 */
 192	if (pdev->broken_cmd_compl &&
 193	    (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
 194		ctrl->cmd_busy = 0;
 195
 196	/*
 197	 * Optionally wait for the hardware to be ready for a new command,
 198	 * indicating completion of the above issued command.
 199	 */
 200	if (wait)
 201		pcie_wait_cmd(ctrl);
 202
 203out:
 204	mutex_unlock(&ctrl->ctrl_lock);
 205}
 206
 207/**
 208 * pcie_write_cmd - Issue controller command
 209 * @ctrl: controller to which the command is issued
 210 * @cmd:  command value written to slot control register
 211 * @mask: bitmask of slot control register to be modified
 212 */
 213static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
 214{
 215	pcie_do_write_cmd(ctrl, cmd, mask, true);
 216}
 217
 218/* Same as above without waiting for the hardware to latch */
 219static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
 220{
 221	pcie_do_write_cmd(ctrl, cmd, mask, false);
 222}
 223
 224/**
 225 * pciehp_check_link_active() - Is the link active
 226 * @ctrl: PCIe hotplug controller
 227 *
 228 * Check whether the downstream link is currently active. Note it is
 229 * possible that the card is removed immediately after this so the
 230 * caller may need to take it into account.
 231 *
 232 * If the hotplug controller itself is not available anymore returns
 233 * %-ENODEV.
 234 */
 235int pciehp_check_link_active(struct controller *ctrl)
 236{
 237	struct pci_dev *pdev = ctrl_dev(ctrl);
 238	u16 lnk_status;
 239	int ret;
 240
 241	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
 242	if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
 243		return -ENODEV;
 244
 245	ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
 246	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 247
 248	return ret;
 249}
 250
 251static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
 252{
 253	u32 l;
 254	int count = 0;
 255	int delay = 1000, step = 20;
 256	bool found = false;
 257
 258	do {
 259		found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
 260		count++;
 261
 262		if (found)
 263			break;
 264
 265		msleep(step);
 266		delay -= step;
 267	} while (delay > 0);
 268
 269	if (count > 1)
 270		pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
 271			pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
 272			PCI_FUNC(devfn), count, step, l);
 273
 274	return found;
 275}
 276
 277static void pcie_wait_for_presence(struct pci_dev *pdev)
 278{
 279	int timeout = 1250;
 280	u16 slot_status;
 281
 282	do {
 283		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 284		if (slot_status & PCI_EXP_SLTSTA_PDS)
 285			return;
 286		msleep(10);
 287		timeout -= 10;
 288	} while (timeout > 0);
 289}
 290
 291int pciehp_check_link_status(struct controller *ctrl)
 292{
 293	struct pci_dev *pdev = ctrl_dev(ctrl);
 294	bool found;
 295	u16 lnk_status;
 296
 297	if (!pcie_wait_for_link(pdev, true)) {
 298		ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
 299		return -1;
 300	}
 301
 302	if (ctrl->inband_presence_disabled)
 303		pcie_wait_for_presence(pdev);
 304
 305	found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
 306					PCI_DEVFN(0, 0));
 307
 308	/* ignore link or presence changes up to this point */
 309	if (found)
 310		atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
 311			   &ctrl->pending_events);
 312
 313	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
 314	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 315	if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
 316	    !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
 317		ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n",
 318			  slot_name(ctrl), lnk_status);
 319		return -1;
 320	}
 321
 322	__pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
 323
 324	if (!found) {
 325		ctrl_info(ctrl, "Slot(%s): No device found\n",
 326			  slot_name(ctrl));
 327		return -1;
 328	}
 329
 330	return 0;
 331}
 332
 333static int __pciehp_link_set(struct controller *ctrl, bool enable)
 334{
 335	struct pci_dev *pdev = ctrl_dev(ctrl);
 
 336
 337	pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
 338					   PCI_EXP_LNKCTL_LD,
 339					   enable ? 0 : PCI_EXP_LNKCTL_LD);
 340
 
 
 
 
 
 
 
 341	return 0;
 342}
 343
 344static int pciehp_link_enable(struct controller *ctrl)
 345{
 346	return __pciehp_link_set(ctrl, true);
 347}
 348
 349int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
 350				    u8 *status)
 351{
 352	struct controller *ctrl = to_ctrl(hotplug_slot);
 353	struct pci_dev *pdev = ctrl_dev(ctrl);
 354	u16 slot_ctrl;
 355
 356	pci_config_pm_runtime_get(pdev);
 357	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 358	pci_config_pm_runtime_put(pdev);
 359	*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
 360	return 0;
 361}
 362
 363int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
 364{
 365	struct controller *ctrl = to_ctrl(hotplug_slot);
 366	struct pci_dev *pdev = ctrl_dev(ctrl);
 367	u16 slot_ctrl;
 368
 369	pci_config_pm_runtime_get(pdev);
 370	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 371	pci_config_pm_runtime_put(pdev);
 372	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
 373		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 374
 375	switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
 376	case PCI_EXP_SLTCTL_ATTN_IND_ON:
 377		*status = 1;	/* On */
 378		break;
 379	case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
 380		*status = 2;	/* Blink */
 381		break;
 382	case PCI_EXP_SLTCTL_ATTN_IND_OFF:
 383		*status = 0;	/* Off */
 384		break;
 385	default:
 386		*status = 0xFF;
 387		break;
 388	}
 389
 390	return 0;
 391}
 392
 393void pciehp_get_power_status(struct controller *ctrl, u8 *status)
 394{
 395	struct pci_dev *pdev = ctrl_dev(ctrl);
 396	u16 slot_ctrl;
 397
 398	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 399	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
 400		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 401
 402	switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
 403	case PCI_EXP_SLTCTL_PWR_ON:
 404		*status = 1;	/* On */
 405		break;
 406	case PCI_EXP_SLTCTL_PWR_OFF:
 407		*status = 0;	/* Off */
 408		break;
 409	default:
 410		*status = 0xFF;
 411		break;
 412	}
 413}
 414
 415void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
 416{
 417	struct pci_dev *pdev = ctrl_dev(ctrl);
 418	u16 slot_status;
 419
 420	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 421	*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
 422}
 423
 424/**
 425 * pciehp_card_present() - Is the card present
 426 * @ctrl: PCIe hotplug controller
 427 *
 428 * Function checks whether the card is currently present in the slot and
 429 * in that case returns true. Note it is possible that the card is
 430 * removed immediately after the check so the caller may need to take
 431 * this into account.
 432 *
 433 * It the hotplug controller itself is not available anymore returns
 434 * %-ENODEV.
 435 */
 436int pciehp_card_present(struct controller *ctrl)
 437{
 438	struct pci_dev *pdev = ctrl_dev(ctrl);
 439	u16 slot_status;
 440	int ret;
 441
 442	ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 443	if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(slot_status))
 444		return -ENODEV;
 445
 446	return !!(slot_status & PCI_EXP_SLTSTA_PDS);
 447}
 448
 449/**
 450 * pciehp_card_present_or_link_active() - whether given slot is occupied
 451 * @ctrl: PCIe hotplug controller
 452 *
 453 * Unlike pciehp_card_present(), which determines presence solely from the
 454 * Presence Detect State bit, this helper also returns true if the Link Active
 455 * bit is set.  This is a concession to broken hotplug ports which hardwire
 456 * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
 457 *
 458 * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
 459 *	    port is not present anymore returns %-ENODEV.
 460 */
 461int pciehp_card_present_or_link_active(struct controller *ctrl)
 462{
 463	int ret;
 464
 465	ret = pciehp_card_present(ctrl);
 466	if (ret)
 467		return ret;
 468
 469	return pciehp_check_link_active(ctrl);
 470}
 471
 472int pciehp_query_power_fault(struct controller *ctrl)
 473{
 474	struct pci_dev *pdev = ctrl_dev(ctrl);
 475	u16 slot_status;
 476
 477	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 478	return !!(slot_status & PCI_EXP_SLTSTA_PFD);
 479}
 480
 481int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
 482				    u8 status)
 483{
 484	struct controller *ctrl = to_ctrl(hotplug_slot);
 485	struct pci_dev *pdev = ctrl_dev(ctrl);
 486
 487	pci_config_pm_runtime_get(pdev);
 488
 489	/* Attention and Power Indicator Control bits are supported */
 490	pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC, status),
 491			      PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
 492	pci_config_pm_runtime_put(pdev);
 493	return 0;
 494}
 495
 496/**
 497 * pciehp_set_indicators() - set attention indicator, power indicator, or both
 498 * @ctrl: PCIe hotplug controller
 499 * @pwr: one of:
 500 *	PCI_EXP_SLTCTL_PWR_IND_ON
 501 *	PCI_EXP_SLTCTL_PWR_IND_BLINK
 502 *	PCI_EXP_SLTCTL_PWR_IND_OFF
 503 * @attn: one of:
 504 *	PCI_EXP_SLTCTL_ATTN_IND_ON
 505 *	PCI_EXP_SLTCTL_ATTN_IND_BLINK
 506 *	PCI_EXP_SLTCTL_ATTN_IND_OFF
 507 *
 508 * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator
 509 * unchanged.
 510 */
 511void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
 512{
 513	u16 cmd = 0, mask = 0;
 514
 515	if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
 516		cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
 517		mask |= PCI_EXP_SLTCTL_PIC;
 518	}
 519
 520	if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
 521		cmd |= (attn & PCI_EXP_SLTCTL_AIC);
 522		mask |= PCI_EXP_SLTCTL_AIC;
 523	}
 524
 525	if (cmd) {
 526		pcie_write_cmd_nowait(ctrl, cmd, mask);
 527		ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 528			 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 529	}
 530}
 531
 532int pciehp_power_on_slot(struct controller *ctrl)
 533{
 534	struct pci_dev *pdev = ctrl_dev(ctrl);
 535	u16 slot_status;
 536	int retval;
 537
 538	/* Clear power-fault bit from previous power failures */
 539	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 540	if (slot_status & PCI_EXP_SLTSTA_PFD)
 541		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 542					   PCI_EXP_SLTSTA_PFD);
 543	ctrl->power_fault_detected = 0;
 544
 545	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
 546	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 547		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
 548		 PCI_EXP_SLTCTL_PWR_ON);
 549
 550	retval = pciehp_link_enable(ctrl);
 551	if (retval)
 552		ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
 553
 554	return retval;
 555}
 556
 557void pciehp_power_off_slot(struct controller *ctrl)
 558{
 559	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
 560	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 561		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
 562		 PCI_EXP_SLTCTL_PWR_OFF);
 563}
 564
 565static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
 566					  struct pci_dev *pdev, int irq)
 567{
 568	/*
 569	 * Ignore link changes which occurred while waiting for DPC recovery.
 570	 * Could be several if DPC triggered multiple times consecutively.
 571	 */
 572	synchronize_hardirq(irq);
 573	atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
 574	if (pciehp_poll_mode)
 575		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 576					   PCI_EXP_SLTSTA_DLLSC);
 577	ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
 578		  slot_name(ctrl));
 579
 580	/*
 581	 * If the link is unexpectedly down after successful recovery,
 582	 * the corresponding link change may have been ignored above.
 583	 * Synthesize it to ensure that it is acted on.
 584	 */
 585	down_read_nested(&ctrl->reset_lock, ctrl->depth);
 586	if (!pciehp_check_link_active(ctrl))
 587		pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
 588	up_read(&ctrl->reset_lock);
 589}
 590
 591static irqreturn_t pciehp_isr(int irq, void *dev_id)
 592{
 593	struct controller *ctrl = (struct controller *)dev_id;
 594	struct pci_dev *pdev = ctrl_dev(ctrl);
 595	struct device *parent = pdev->dev.parent;
 596	u16 status, events = 0;
 597
 598	/*
 599	 * Interrupts only occur in D3hot or shallower and only if enabled
 600	 * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
 601	 */
 602	if (pdev->current_state == PCI_D3cold ||
 603	    (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
 604		return IRQ_NONE;
 605
 606	/*
 607	 * Keep the port accessible by holding a runtime PM ref on its parent.
 608	 * Defer resume of the parent to the IRQ thread if it's suspended.
 609	 * Mask the interrupt until then.
 610	 */
 611	if (parent) {
 612		pm_runtime_get_noresume(parent);
 613		if (!pm_runtime_active(parent)) {
 614			pm_runtime_put(parent);
 615			disable_irq_nosync(irq);
 616			atomic_or(RERUN_ISR, &ctrl->pending_events);
 617			return IRQ_WAKE_THREAD;
 618		}
 619	}
 620
 621read_status:
 622	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
 623	if (PCI_POSSIBLE_ERROR(status)) {
 624		ctrl_info(ctrl, "%s: no response from device\n", __func__);
 625		if (parent)
 626			pm_runtime_put(parent);
 627		return IRQ_NONE;
 628	}
 629
 630	/*
 631	 * Slot Status contains plain status bits as well as event
 632	 * notification bits; right now we only want the event bits.
 633	 */
 634	status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
 635		  PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
 636		  PCI_EXP_SLTSTA_DLLSC;
 637
 638	/*
 639	 * If we've already reported a power fault, don't report it again
 640	 * until we've done something to handle it.
 641	 */
 642	if (ctrl->power_fault_detected)
 643		status &= ~PCI_EXP_SLTSTA_PFD;
 644	else if (status & PCI_EXP_SLTSTA_PFD)
 645		ctrl->power_fault_detected = true;
 646
 647	events |= status;
 648	if (!events) {
 649		if (parent)
 650			pm_runtime_put(parent);
 651		return IRQ_NONE;
 652	}
 653
 654	if (status) {
 655		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
 656
 657		/*
 658		 * In MSI mode, all event bits must be zero before the port
 659		 * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
 660		 * So re-read the Slot Status register in case a bit was set
 661		 * between read and write.
 662		 */
 663		if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
 664			goto read_status;
 665	}
 666
 667	ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
 668	if (parent)
 669		pm_runtime_put(parent);
 670
 671	/*
 672	 * Command Completed notifications are not deferred to the
 673	 * IRQ thread because it may be waiting for their arrival.
 674	 */
 675	if (events & PCI_EXP_SLTSTA_CC) {
 676		ctrl->cmd_busy = 0;
 677		smp_mb();
 678		wake_up(&ctrl->queue);
 679
 680		if (events == PCI_EXP_SLTSTA_CC)
 681			return IRQ_HANDLED;
 682
 683		events &= ~PCI_EXP_SLTSTA_CC;
 684	}
 685
 686	if (pdev->ignore_hotplug) {
 687		ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
 688		return IRQ_HANDLED;
 689	}
 690
 691	/* Save pending events for consumption by IRQ thread. */
 692	atomic_or(events, &ctrl->pending_events);
 693	return IRQ_WAKE_THREAD;
 694}
 695
 696static irqreturn_t pciehp_ist(int irq, void *dev_id)
 697{
 698	struct controller *ctrl = (struct controller *)dev_id;
 699	struct pci_dev *pdev = ctrl_dev(ctrl);
 700	irqreturn_t ret;
 701	u32 events;
 702
 703	ctrl->ist_running = true;
 704	pci_config_pm_runtime_get(pdev);
 705
 706	/* rerun pciehp_isr() if the port was inaccessible on interrupt */
 707	if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
 708		ret = pciehp_isr(irq, dev_id);
 709		enable_irq(irq);
 710		if (ret != IRQ_WAKE_THREAD)
 711			goto out;
 
 
 712	}
 713
 714	synchronize_hardirq(irq);
 715	events = atomic_xchg(&ctrl->pending_events, 0);
 716	if (!events) {
 717		ret = IRQ_NONE;
 718		goto out;
 719	}
 720
 721	/* Check Attention Button Pressed */
 722	if (events & PCI_EXP_SLTSTA_ABP)
 
 
 723		pciehp_handle_button_press(ctrl);
 
 724
 725	/* Check Power Fault Detected */
 726	if (events & PCI_EXP_SLTSTA_PFD) {
 
 727		ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
 728		pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
 729				      PCI_EXP_SLTCTL_ATTN_IND_ON);
 730	}
 731
 732	/*
 733	 * Ignore Link Down/Up events caused by Downstream Port Containment
 734	 * if recovery from the error succeeded.
 735	 */
 736	if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
 737	    ctrl->state == ON_STATE) {
 738		events &= ~PCI_EXP_SLTSTA_DLLSC;
 739		pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
 740	}
 741
 742	/*
 743	 * Disable requests have higher priority than Presence Detect Changed
 744	 * or Data Link Layer State Changed events.
 745	 */
 746	down_read_nested(&ctrl->reset_lock, ctrl->depth);
 747	if (events & DISABLE_SLOT)
 748		pciehp_handle_disable_request(ctrl);
 749	else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
 750		pciehp_handle_presence_or_link_change(ctrl, events);
 751	up_read(&ctrl->reset_lock);
 752
 753	ret = IRQ_HANDLED;
 754out:
 755	pci_config_pm_runtime_put(pdev);
 756	ctrl->ist_running = false;
 757	wake_up(&ctrl->requester);
 758	return ret;
 759}
 760
 761static int pciehp_poll(void *data)
 762{
 763	struct controller *ctrl = data;
 764
 765	schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
 766
 767	while (!kthread_should_stop()) {
 768		/* poll for interrupt events or user requests */
 769		while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
 770		       atomic_read(&ctrl->pending_events))
 771			pciehp_ist(IRQ_NOTCONNECTED, ctrl);
 772
 773		if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
 774			pciehp_poll_time = 2; /* clamp to sane value */
 775
 776		schedule_timeout_idle(pciehp_poll_time * HZ);
 777	}
 778
 779	return 0;
 780}
 781
 782static void pcie_enable_notification(struct controller *ctrl)
 783{
 784	u16 cmd, mask;
 785
 786	/*
 787	 * TBD: Power fault detected software notification support.
 788	 *
 789	 * Power fault detected software notification is not enabled
 790	 * now, because it caused power fault detected interrupt storm
 791	 * on some machines. On those machines, power fault detected
 792	 * bit in the slot status register was set again immediately
 793	 * when it is cleared in the interrupt service routine, and
 794	 * next power fault detected interrupt was notified again.
 795	 */
 796
 797	/*
 798	 * Always enable link events: thus link-up and link-down shall
 799	 * always be treated as hotplug and unplug respectively. Enable
 800	 * presence detect only if Attention Button is not present.
 801	 */
 802	cmd = PCI_EXP_SLTCTL_DLLSCE;
 803	if (ATTN_BUTTN(ctrl))
 804		cmd |= PCI_EXP_SLTCTL_ABPE;
 805	else
 806		cmd |= PCI_EXP_SLTCTL_PDCE;
 807	if (!pciehp_poll_mode)
 808		cmd |= PCI_EXP_SLTCTL_HPIE;
 809	if (!pciehp_poll_mode && !NO_CMD_CMPL(ctrl))
 810		cmd |= PCI_EXP_SLTCTL_CCIE;
 811
 812	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
 813		PCI_EXP_SLTCTL_PFDE |
 814		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 815		PCI_EXP_SLTCTL_DLLSCE);
 816
 817	pcie_write_cmd_nowait(ctrl, cmd, mask);
 818	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 819		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 820}
 821
 822static void pcie_disable_notification(struct controller *ctrl)
 823{
 824	u16 mask;
 825
 826	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
 827		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
 828		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 829		PCI_EXP_SLTCTL_DLLSCE);
 830	pcie_write_cmd(ctrl, 0, mask);
 831	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 832		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 833}
 834
 835void pcie_clear_hotplug_events(struct controller *ctrl)
 836{
 837	pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
 838				   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
 839}
 840
 841void pcie_enable_interrupt(struct controller *ctrl)
 842{
 843	u16 mask;
 844
 845	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
 846	pcie_write_cmd(ctrl, mask, mask);
 847}
 848
 849void pcie_disable_interrupt(struct controller *ctrl)
 850{
 851	u16 mask;
 852
 853	/*
 854	 * Mask hot-plug interrupt to prevent it triggering immediately
 855	 * when the link goes inactive (we still get PME when any of the
 856	 * enabled events is detected). Same goes with Link Layer State
 857	 * changed event which generates PME immediately when the link goes
 858	 * inactive so mask it as well.
 859	 */
 860	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
 861	pcie_write_cmd(ctrl, 0, mask);
 862}
 863
 864/**
 865 * pciehp_slot_reset() - ignore link event caused by error-induced hot reset
 866 * @dev: PCI Express port service device
 867 *
 868 * Called from pcie_portdrv_slot_reset() after AER or DPC initiated a reset
 869 * further up in the hierarchy to recover from an error.  The reset was
 870 * propagated down to this hotplug port.  Ignore the resulting link flap.
 871 * If the link failed to retrain successfully, synthesize the ignored event.
 872 * Surprise removal during reset is detected through Presence Detect Changed.
 873 */
 874int pciehp_slot_reset(struct pcie_device *dev)
 875{
 876	struct controller *ctrl = get_service_data(dev);
 877
 878	if (ctrl->state != ON_STATE)
 879		return 0;
 880
 881	pcie_capability_write_word(dev->port, PCI_EXP_SLTSTA,
 882				   PCI_EXP_SLTSTA_DLLSC);
 883
 884	if (!pciehp_check_link_active(ctrl))
 885		pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
 886
 887	return 0;
 888}
 889
 890/*
 891 * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
 892 * bus reset of the bridge, but at the same time we want to ensure that it is
 893 * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
 894 * disable link state notification and presence detection change notification
 895 * momentarily, if we see that they could interfere. Also, clear any spurious
 896 * events after.
 897 */
 898int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
 899{
 900	struct controller *ctrl = to_ctrl(hotplug_slot);
 901	struct pci_dev *pdev = ctrl_dev(ctrl);
 902	u16 stat_mask = 0, ctrl_mask = 0;
 903	int rc;
 904
 905	if (probe)
 906		return 0;
 907
 908	down_write_nested(&ctrl->reset_lock, ctrl->depth);
 909
 910	if (!ATTN_BUTTN(ctrl)) {
 911		ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
 912		stat_mask |= PCI_EXP_SLTSTA_PDC;
 913	}
 914	ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
 915	stat_mask |= PCI_EXP_SLTSTA_DLLSC;
 916
 917	pcie_write_cmd(ctrl, 0, ctrl_mask);
 918	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 919		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 920
 921	rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
 922
 923	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
 924	pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
 925	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 926		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
 927
 928	up_write(&ctrl->reset_lock);
 929	return rc;
 930}
 931
 932int pcie_init_notification(struct controller *ctrl)
 933{
 934	if (pciehp_request_irq(ctrl))
 935		return -1;
 936	pcie_enable_notification(ctrl);
 937	ctrl->notification_enabled = 1;
 938	return 0;
 939}
 940
 941void pcie_shutdown_notification(struct controller *ctrl)
 942{
 943	if (ctrl->notification_enabled) {
 944		pcie_disable_notification(ctrl);
 945		pciehp_free_irq(ctrl);
 946		ctrl->notification_enabled = 0;
 947	}
 948}
 949
 950static inline void dbg_ctrl(struct controller *ctrl)
 951{
 952	struct pci_dev *pdev = ctrl->pcie->port;
 953	u16 reg16;
 954
 955	ctrl_dbg(ctrl, "Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
 956	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
 957	ctrl_dbg(ctrl, "Slot Status            : 0x%04x\n", reg16);
 958	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
 959	ctrl_dbg(ctrl, "Slot Control           : 0x%04x\n", reg16);
 960}
 961
 962#define FLAG(x, y)	(((x) & (y)) ? '+' : '-')
 963
 964static inline int pcie_hotplug_depth(struct pci_dev *dev)
 965{
 966	struct pci_bus *bus = dev->bus;
 967	int depth = 0;
 968
 969	while (bus->parent) {
 970		bus = bus->parent;
 971		if (bus->self && bus->self->is_hotplug_bridge)
 972			depth++;
 973	}
 974
 975	return depth;
 976}
 977
 978struct controller *pcie_init(struct pcie_device *dev)
 979{
 980	struct controller *ctrl;
 981	u32 slot_cap, slot_cap2;
 982	u8 poweron;
 983	struct pci_dev *pdev = dev->port;
 984	struct pci_bus *subordinate = pdev->subordinate;
 985
 986	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
 987	if (!ctrl)
 988		return NULL;
 989
 990	ctrl->pcie = dev;
 991	ctrl->depth = pcie_hotplug_depth(dev->port);
 992	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
 993
 994	if (pdev->hotplug_user_indicators)
 995		slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
 996
 997	/*
 998	 * We assume no Thunderbolt controllers support Command Complete events,
 999	 * but some controllers falsely claim they do.
1000	 */
1001	if (pdev->is_thunderbolt)
1002		slot_cap |= PCI_EXP_SLTCAP_NCCS;
1003
1004	ctrl->slot_cap = slot_cap;
1005	mutex_init(&ctrl->ctrl_lock);
1006	mutex_init(&ctrl->state_lock);
1007	init_rwsem(&ctrl->reset_lock);
1008	init_waitqueue_head(&ctrl->requester);
1009	init_waitqueue_head(&ctrl->queue);
1010	INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
1011	dbg_ctrl(ctrl);
1012
1013	down_read(&pci_bus_sem);
1014	ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
1015	up_read(&pci_bus_sem);
1016
1017	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2);
1018	if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) {
1019		pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE,
1020				      PCI_EXP_SLTCTL_IBPD_DISABLE);
1021		ctrl->inband_presence_disabled = 1;
1022	}
1023
1024	if (dmi_first_match(inband_presence_disabled_dmi_table))
1025		ctrl->inband_presence_disabled = 1;
1026
1027	/* Clear all remaining event bits in Slot Status register. */
1028	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
1029		PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
1030		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
1031		PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
1032
1033	ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n",
1034		FIELD_GET(PCI_EXP_SLTCAP_PSN, slot_cap),
1035		FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
1036		FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
1037		FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
1038		FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
1039		FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
1040		FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
1041		FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
1042		FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
1043		FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
1044		FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD),
1045		FLAG(pdev->link_active_reporting, true),
1046		pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
1047
1048	/*
1049	 * If empty slot's power status is on, turn power off.  The IRQ isn't
1050	 * requested yet, so avoid triggering a notification with this command.
1051	 */
1052	if (POWER_CTRL(ctrl)) {
1053		pciehp_get_power_status(ctrl, &poweron);
1054		if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
1055			pcie_disable_notification(ctrl);
1056			pciehp_power_off_slot(ctrl);
1057		}
1058	}
1059
1060	pdev = pci_get_slot(subordinate, PCI_DEVFN(0, 0));
1061	if (pdev)
1062		ctrl->dsn = pci_get_dsn(pdev);
1063	pci_dev_put(pdev);
1064
1065	return ctrl;
1066}
1067
1068void pciehp_release_ctrl(struct controller *ctrl)
1069{
1070	cancel_delayed_work_sync(&ctrl->button_work);
1071	kfree(ctrl);
1072}
1073
1074static void quirk_cmd_compl(struct pci_dev *pdev)
1075{
1076	u32 slot_cap;
1077
1078	if (pci_is_pcie(pdev)) {
1079		pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
1080		if (slot_cap & PCI_EXP_SLTCAP_HPC &&
1081		    !(slot_cap & PCI_EXP_SLTCAP_NCCS))
1082			pdev->broken_cmd_compl = 1;
1083	}
1084}
1085DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1086			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1087DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x010e,
1088			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1089DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
1090			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1091DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
1092			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1093DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
1094			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1095DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
1096			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);