Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v5.14.15.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * PLDA PCIe XpressRich host controller driver
  4 *
  5 * Copyright (C) 2023 Microchip Co. Ltd
  6 *		      StarFive Co. Ltd
  7 *
  8 * Author: Daire McNamara <daire.mcnamara@microchip.com>
  9 */
 10
 11#include <linux/align.h>
 12#include <linux/bitfield.h>
 13#include <linux/irqchip/chained_irq.h>
 14#include <linux/irqdomain.h>
 15#include <linux/msi.h>
 16#include <linux/pci_regs.h>
 17#include <linux/pci-ecam.h>
 18#include <linux/wordpart.h>
 19
 20#include "pcie-plda.h"
 21
 22void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
 23				int where)
 24{
 25	struct plda_pcie_rp *pcie = bus->sysdata;
 26
 27	return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
 28}
 29EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
 30
 31static void plda_handle_msi(struct irq_desc *desc)
 32{
 33	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
 34	struct irq_chip *chip = irq_desc_get_chip(desc);
 35	struct device *dev = port->dev;
 36	struct plda_msi *msi = &port->msi;
 37	void __iomem *bridge_base_addr = port->bridge_addr;
 38	unsigned long status;
 39	u32 bit;
 40	int ret;
 41
 42	chained_irq_enter(chip, desc);
 43
 44	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
 45	if (status & PM_MSI_INT_MSI_MASK) {
 46		writel_relaxed(status & PM_MSI_INT_MSI_MASK,
 47			       bridge_base_addr + ISTATUS_LOCAL);
 48		status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
 49		for_each_set_bit(bit, &status, msi->num_vectors) {
 50			ret = generic_handle_domain_irq(msi->dev_domain, bit);
 51			if (ret)
 52				dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
 53						    bit);
 54		}
 55	}
 56
 57	chained_irq_exit(chip, desc);
 58}
 59
 60static void plda_msi_bottom_irq_ack(struct irq_data *data)
 61{
 62	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
 63	void __iomem *bridge_base_addr = port->bridge_addr;
 64	u32 bitpos = data->hwirq;
 65
 66	writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
 67}
 68
 69static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 70{
 71	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
 72	phys_addr_t addr = port->msi.vector_phy;
 73
 74	msg->address_lo = lower_32_bits(addr);
 75	msg->address_hi = upper_32_bits(addr);
 76	msg->data = data->hwirq;
 77
 78	dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
 79		(int)data->hwirq, msg->address_hi, msg->address_lo);
 80}
 81
 82static struct irq_chip plda_msi_bottom_irq_chip = {
 83	.name = "PLDA MSI",
 84	.irq_ack = plda_msi_bottom_irq_ack,
 85	.irq_compose_msi_msg = plda_compose_msi_msg,
 86};
 87
 88static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
 89				     unsigned int virq,
 90				     unsigned int nr_irqs,
 91				     void *args)
 92{
 93	struct plda_pcie_rp *port = domain->host_data;
 94	struct plda_msi *msi = &port->msi;
 95	unsigned long bit;
 96
 97	mutex_lock(&msi->lock);
 98	bit = find_first_zero_bit(msi->used, msi->num_vectors);
 99	if (bit >= msi->num_vectors) {
100		mutex_unlock(&msi->lock);
101		return -ENOSPC;
102	}
103
104	set_bit(bit, msi->used);
105
106	irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
107			    domain->host_data, handle_edge_irq, NULL, NULL);
108
109	mutex_unlock(&msi->lock);
110
111	return 0;
112}
113
114static void plda_irq_msi_domain_free(struct irq_domain *domain,
115				     unsigned int virq,
116				     unsigned int nr_irqs)
117{
118	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
119	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
120	struct plda_msi *msi = &port->msi;
121
122	mutex_lock(&msi->lock);
123
124	if (test_bit(d->hwirq, msi->used))
125		__clear_bit(d->hwirq, msi->used);
126	else
127		dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
128
129	mutex_unlock(&msi->lock);
130}
131
132static const struct irq_domain_ops msi_domain_ops = {
133	.alloc	= plda_irq_msi_domain_alloc,
134	.free	= plda_irq_msi_domain_free,
135};
136
137static struct irq_chip plda_msi_irq_chip = {
138	.name = "PLDA PCIe MSI",
139	.irq_ack = irq_chip_ack_parent,
140	.irq_mask = pci_msi_mask_irq,
141	.irq_unmask = pci_msi_unmask_irq,
142};
143
144static struct msi_domain_info plda_msi_domain_info = {
145	.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
146		 MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
147	.chip = &plda_msi_irq_chip,
148};
149
150static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
151{
152	struct device *dev = port->dev;
153	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
154	struct plda_msi *msi = &port->msi;
155
156	mutex_init(&port->msi.lock);
157
158	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
159						&msi_domain_ops, port);
160	if (!msi->dev_domain) {
161		dev_err(dev, "failed to create IRQ domain\n");
162		return -ENOMEM;
163	}
164
165	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
166						    &plda_msi_domain_info,
167						    msi->dev_domain);
168	if (!msi->msi_domain) {
169		dev_err(dev, "failed to create MSI domain\n");
170		irq_domain_remove(msi->dev_domain);
171		return -ENOMEM;
172	}
173
174	return 0;
175}
176
177static void plda_handle_intx(struct irq_desc *desc)
178{
179	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
180	struct irq_chip *chip = irq_desc_get_chip(desc);
181	struct device *dev = port->dev;
182	void __iomem *bridge_base_addr = port->bridge_addr;
183	unsigned long status;
184	u32 bit;
185	int ret;
186
187	chained_irq_enter(chip, desc);
188
189	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
190	if (status & PM_MSI_INT_INTX_MASK) {
191		status &= PM_MSI_INT_INTX_MASK;
192		status >>= PM_MSI_INT_INTX_SHIFT;
193		for_each_set_bit(bit, &status, PCI_NUM_INTX) {
194			ret = generic_handle_domain_irq(port->intx_domain, bit);
195			if (ret)
196				dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
197						    bit);
198		}
199	}
200
201	chained_irq_exit(chip, desc);
202}
203
204static void plda_ack_intx_irq(struct irq_data *data)
205{
206	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
207	void __iomem *bridge_base_addr = port->bridge_addr;
208	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
209
210	writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
211}
212
213static void plda_mask_intx_irq(struct irq_data *data)
214{
215	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
216	void __iomem *bridge_base_addr = port->bridge_addr;
217	unsigned long flags;
218	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
219	u32 val;
220
221	raw_spin_lock_irqsave(&port->lock, flags);
222	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
223	val &= ~mask;
224	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
225	raw_spin_unlock_irqrestore(&port->lock, flags);
226}
227
228static void plda_unmask_intx_irq(struct irq_data *data)
229{
230	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
231	void __iomem *bridge_base_addr = port->bridge_addr;
232	unsigned long flags;
233	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
234	u32 val;
235
236	raw_spin_lock_irqsave(&port->lock, flags);
237	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
238	val |= mask;
239	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
240	raw_spin_unlock_irqrestore(&port->lock, flags);
241}
242
243static struct irq_chip plda_intx_irq_chip = {
244	.name = "PLDA PCIe INTx",
245	.irq_ack = plda_ack_intx_irq,
246	.irq_mask = plda_mask_intx_irq,
247	.irq_unmask = plda_unmask_intx_irq,
248};
249
250static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
251			      irq_hw_number_t hwirq)
252{
253	irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
254	irq_set_chip_data(irq, domain->host_data);
255
256	return 0;
257}
258
259static const struct irq_domain_ops intx_domain_ops = {
260	.map = plda_pcie_intx_map,
261};
262
263static u32 plda_get_events(struct plda_pcie_rp *port)
264{
265	u32 events, val, origin;
266
267	origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
268
269	/* MSI event and sys events */
270	val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
271	events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
272
273	/* INTx events */
274	if (origin & PM_MSI_INT_INTX_MASK)
275		events |= BIT(PM_MSI_INT_INTX_SHIFT);
276
277	/* remains are same with register */
278	events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
279
280	return events;
281}
282
283static irqreturn_t plda_event_handler(int irq, void *dev_id)
284{
285	return IRQ_HANDLED;
286}
287
288static void plda_handle_event(struct irq_desc *desc)
289{
290	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
291	unsigned long events;
292	u32 bit;
293	struct irq_chip *chip = irq_desc_get_chip(desc);
294
295	chained_irq_enter(chip, desc);
296
297	events = port->event_ops->get_events(port);
298
299	events &= port->events_bitmap;
300	for_each_set_bit(bit, &events, port->num_events)
301		generic_handle_domain_irq(port->event_domain, bit);
302
303	chained_irq_exit(chip, desc);
304}
305
306static u32 plda_hwirq_to_mask(int hwirq)
307{
308	u32 mask;
309
310	/* hwirq 23 - 0 are the same with register */
311	if (hwirq < EVENT_PM_MSI_INT_INTX)
312		mask = BIT(hwirq);
313	else if (hwirq == EVENT_PM_MSI_INT_INTX)
314		mask = PM_MSI_INT_INTX_MASK;
315	else
316		mask = BIT(hwirq + PCI_NUM_INTX - 1);
317
318	return mask;
319}
320
321static void plda_ack_event_irq(struct irq_data *data)
322{
323	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
324
325	writel_relaxed(plda_hwirq_to_mask(data->hwirq),
326		       port->bridge_addr + ISTATUS_LOCAL);
327}
328
329static void plda_mask_event_irq(struct irq_data *data)
330{
331	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
332	u32 mask, val;
333
334	mask = plda_hwirq_to_mask(data->hwirq);
335
336	raw_spin_lock(&port->lock);
337	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
338	val &= ~mask;
339	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
340	raw_spin_unlock(&port->lock);
341}
342
343static void plda_unmask_event_irq(struct irq_data *data)
344{
345	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
346	u32 mask, val;
347
348	mask = plda_hwirq_to_mask(data->hwirq);
349
350	raw_spin_lock(&port->lock);
351	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
352	val |= mask;
353	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
354	raw_spin_unlock(&port->lock);
355}
356
357static struct irq_chip plda_event_irq_chip = {
358	.name = "PLDA PCIe EVENT",
359	.irq_ack = plda_ack_event_irq,
360	.irq_mask = plda_mask_event_irq,
361	.irq_unmask = plda_unmask_event_irq,
362};
363
364static const struct plda_event_ops plda_event_ops = {
365	.get_events = plda_get_events,
366};
367
368static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
369			       irq_hw_number_t hwirq)
370{
371	struct plda_pcie_rp *port = (void *)domain->host_data;
372
373	irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
374	irq_set_chip_data(irq, domain->host_data);
375
376	return 0;
377}
378
379static const struct irq_domain_ops plda_event_domain_ops = {
380	.map = plda_pcie_event_map,
381};
382
383static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
384{
385	struct device *dev = port->dev;
386	struct device_node *node = dev->of_node;
387	struct device_node *pcie_intc_node;
388
389	/* Setup INTx */
390	pcie_intc_node = of_get_next_child(node, NULL);
391	if (!pcie_intc_node) {
392		dev_err(dev, "failed to find PCIe Intc node\n");
393		return -EINVAL;
394	}
395
396	port->event_domain = irq_domain_add_linear(pcie_intc_node,
397						   port->num_events,
398						   &plda_event_domain_ops,
399						   port);
400	if (!port->event_domain) {
401		dev_err(dev, "failed to get event domain\n");
402		of_node_put(pcie_intc_node);
403		return -ENOMEM;
404	}
405
406	irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
407
408	port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
409						  &intx_domain_ops, port);
410	if (!port->intx_domain) {
411		dev_err(dev, "failed to get an INTx IRQ domain\n");
412		of_node_put(pcie_intc_node);
413		return -ENOMEM;
414	}
415
416	irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
417
418	of_node_put(pcie_intc_node);
419	raw_spin_lock_init(&port->lock);
420
421	return plda_allocate_msi_domains(port);
422}
423
424int plda_init_interrupts(struct platform_device *pdev,
425			 struct plda_pcie_rp *port,
426			 const struct plda_event *event)
427{
428	struct device *dev = &pdev->dev;
429	int event_irq, ret;
430	u32 i;
431
432	if (!port->event_ops)
433		port->event_ops = &plda_event_ops;
434
435	if (!port->event_irq_chip)
436		port->event_irq_chip = &plda_event_irq_chip;
437
438	ret = plda_pcie_init_irq_domains(port);
439	if (ret) {
440		dev_err(dev, "failed creating IRQ domains\n");
441		return ret;
442	}
443
444	port->irq = platform_get_irq(pdev, 0);
445	if (port->irq < 0)
446		return -ENODEV;
447
448	for_each_set_bit(i, &port->events_bitmap, port->num_events) {
449		event_irq = irq_create_mapping(port->event_domain, i);
450		if (!event_irq) {
451			dev_err(dev, "failed to map hwirq %d\n", i);
452			return -ENXIO;
453		}
454
455		if (event->request_event_irq)
456			ret = event->request_event_irq(port, event_irq, i);
457		else
458			ret = devm_request_irq(dev, event_irq,
459					       plda_event_handler,
460					       0, NULL, port);
461
462		if (ret) {
463			dev_err(dev, "failed to request IRQ %d\n", event_irq);
464			return ret;
465		}
466	}
467
468	port->intx_irq = irq_create_mapping(port->event_domain,
469					    event->intx_event);
470	if (!port->intx_irq) {
471		dev_err(dev, "failed to map INTx interrupt\n");
472		return -ENXIO;
473	}
474
475	/* Plug the INTx chained handler */
476	irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
477
478	port->msi_irq = irq_create_mapping(port->event_domain,
479					   event->msi_event);
480	if (!port->msi_irq)
481		return -ENXIO;
482
483	/* Plug the MSI chained handler */
484	irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
485
486	/* Plug the main event chained handler */
487	irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
488
489	return 0;
490}
491EXPORT_SYMBOL_GPL(plda_init_interrupts);
492
493void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
494			    phys_addr_t axi_addr, phys_addr_t pci_addr,
495			    size_t size)
496{
497	u32 atr_sz = ilog2(size) - 1;
498	u32 val;
499
500	if (index == 0)
501		val = PCIE_CONFIG_INTERFACE;
502	else
503		val = PCIE_TX_RX_INTERFACE;
504
505	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
506	       ATR0_AXI4_SLV0_TRSL_PARAM);
507
508	val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
509	val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
510	val |= ATR_IMPL_ENABLE;
511	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
512	       ATR0_AXI4_SLV0_SRCADDR_PARAM);
513
514	val = upper_32_bits(axi_addr);
515	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
516	       ATR0_AXI4_SLV0_SRC_ADDR);
517
518	val = lower_32_bits(pci_addr);
519	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
520	       ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
521
522	val = upper_32_bits(pci_addr);
523	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
524	       ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
525}
526EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
527
528void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
529{
530	void __iomem *bridge_base_addr = port->bridge_addr;
531	u32 val;
532
533	val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
534	val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
535	writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
536	writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
537}
538EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
539
540int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
541			   struct plda_pcie_rp *port)
542{
543	void __iomem *bridge_base_addr = port->bridge_addr;
544	struct resource_entry *entry;
545	u64 pci_addr;
546	u32 index = 1;
547
548	resource_list_for_each_entry(entry, &bridge->windows) {
549		if (resource_type(entry->res) == IORESOURCE_MEM) {
550			pci_addr = entry->res->start - entry->offset;
551			plda_pcie_setup_window(bridge_base_addr, index,
552					       entry->res->start, pci_addr,
553					       resource_size(entry->res));
554			index++;
555		}
556	}
557
558	return 0;
559}
560EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
561
562static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
563{
564	irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
565	irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
566	irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
567
568	irq_domain_remove(pcie->msi.msi_domain);
569	irq_domain_remove(pcie->msi.dev_domain);
570
571	irq_domain_remove(pcie->intx_domain);
572	irq_domain_remove(pcie->event_domain);
573}
574
575int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
576			const struct plda_event *plda_event)
577{
578	struct device *dev = port->dev;
579	struct pci_host_bridge *bridge;
580	struct platform_device *pdev = to_platform_device(dev);
581	struct resource *cfg_res;
582	int ret;
583
584	pdev = to_platform_device(dev);
585
586	port->bridge_addr =
587		devm_platform_ioremap_resource_byname(pdev, "apb");
588
589	if (IS_ERR(port->bridge_addr))
590		return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
591				     "failed to map reg memory\n");
592
593	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
594	if (!cfg_res)
595		return dev_err_probe(dev, -ENODEV,
596				     "failed to get config memory\n");
597
598	port->config_base = devm_ioremap_resource(dev, cfg_res);
599	if (IS_ERR(port->config_base))
600		return dev_err_probe(dev, PTR_ERR(port->config_base),
601				     "failed to map config memory\n");
602
603	bridge = devm_pci_alloc_host_bridge(dev, 0);
604	if (!bridge)
605		return dev_err_probe(dev, -ENOMEM,
606				     "failed to alloc bridge\n");
607
608	if (port->host_ops && port->host_ops->host_init) {
609		ret = port->host_ops->host_init(port);
610		if (ret)
611			return ret;
612	}
613
614	port->bridge = bridge;
615	plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
616			       resource_size(cfg_res));
617	plda_pcie_setup_iomems(bridge, port);
618	plda_set_default_msi(&port->msi);
619	ret = plda_init_interrupts(pdev, port, plda_event);
620	if (ret)
621		goto err_host;
622
623	/* Set default bus ops */
624	bridge->ops = ops;
625	bridge->sysdata = port;
626
627	ret = pci_host_probe(bridge);
628	if (ret < 0) {
629		dev_err_probe(dev, ret, "failed to probe pci host\n");
630		goto err_probe;
631	}
632
633	return ret;
634
635err_probe:
636	plda_pcie_irq_domain_deinit(port);
637err_host:
638	if (port->host_ops && port->host_ops->host_deinit)
639		port->host_ops->host_deinit(port);
640
641	return ret;
642}
643EXPORT_SYMBOL_GPL(plda_pcie_host_init);
644
645void plda_pcie_host_deinit(struct plda_pcie_rp *port)
646{
647	pci_stop_root_bus(port->bridge->bus);
648	pci_remove_root_bus(port->bridge->bus);
649
650	plda_pcie_irq_domain_deinit(port);
651
652	if (port->host_ops && port->host_ops->host_deinit)
653		port->host_ops->host_deinit(port);
654}
655EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);