Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.8
  1/*
  2 * Broadcom specific AMBA
  3 * Bus subsystem
  4 *
  5 * Licensed under the GNU/GPL. See COPYING for details.
  6 */
  7
  8#include "bcma_private.h"
  9#include <linux/module.h>
 10#include <linux/mmc/sdio_func.h>
 11#include <linux/platform_device.h>
 12#include <linux/pci.h>
 13#include <linux/bcma/bcma.h>
 14#include <linux/slab.h>
 15#include <linux/of_address.h>
 16#include <linux/of_irq.h>
 17#include <linux/of_device.h>
 18#include <linux/of_platform.h>
 19
 20MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
 21MODULE_LICENSE("GPL");
 22
 23/* contains the number the next bus should get. */
 24static unsigned int bcma_bus_next_num;
 25
 26/* bcma_buses_mutex locks the bcma_bus_next_num */
 27static DEFINE_MUTEX(bcma_buses_mutex);
 28
 29static int bcma_bus_match(struct device *dev, struct device_driver *drv);
 30static int bcma_device_probe(struct device *dev);
 31static void bcma_device_remove(struct device *dev);
 32static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
 33
 34static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
 35{
 36	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 37	return sprintf(buf, "0x%03X\n", core->id.manuf);
 38}
 39static DEVICE_ATTR_RO(manuf);
 40
 41static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
 42{
 43	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 44	return sprintf(buf, "0x%03X\n", core->id.id);
 45}
 46static DEVICE_ATTR_RO(id);
 47
 48static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
 49{
 50	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 51	return sprintf(buf, "0x%02X\n", core->id.rev);
 52}
 53static DEVICE_ATTR_RO(rev);
 54
 55static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
 56{
 57	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 58	return sprintf(buf, "0x%X\n", core->id.class);
 59}
 60static DEVICE_ATTR_RO(class);
 61
 62static struct attribute *bcma_device_attrs[] = {
 63	&dev_attr_manuf.attr,
 64	&dev_attr_id.attr,
 65	&dev_attr_rev.attr,
 66	&dev_attr_class.attr,
 67	NULL,
 68};
 69ATTRIBUTE_GROUPS(bcma_device);
 70
 71static struct bus_type bcma_bus_type = {
 72	.name		= "bcma",
 73	.match		= bcma_bus_match,
 74	.probe		= bcma_device_probe,
 75	.remove		= bcma_device_remove,
 76	.uevent		= bcma_device_uevent,
 77	.dev_groups	= bcma_device_groups,
 78};
 79
 80static u16 bcma_cc_core_id(struct bcma_bus *bus)
 81{
 82	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
 83		return BCMA_CORE_4706_CHIPCOMMON;
 84	return BCMA_CORE_CHIPCOMMON;
 85}
 86
 87struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
 88					u8 unit)
 89{
 90	struct bcma_device *core;
 91
 92	list_for_each_entry(core, &bus->cores, list) {
 93		if (core->id.id == coreid && core->core_unit == unit)
 94			return core;
 95	}
 96	return NULL;
 97}
 98EXPORT_SYMBOL_GPL(bcma_find_core_unit);
 99
100bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
101		     int timeout)
102{
103	unsigned long deadline = jiffies + timeout;
104	u32 val;
105
106	do {
107		val = bcma_read32(core, reg);
108		if ((val & mask) == value)
109			return true;
110		cpu_relax();
111		udelay(10);
112	} while (!time_after_eq(jiffies, deadline));
113
114	bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
115
116	return false;
117}
118
119static void bcma_release_core_dev(struct device *dev)
120{
121	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
122	if (core->io_addr)
123		iounmap(core->io_addr);
124	if (core->io_wrap)
125		iounmap(core->io_wrap);
126	kfree(core);
127}
128
129static bool bcma_is_core_needed_early(u16 core_id)
130{
131	switch (core_id) {
132	case BCMA_CORE_NS_NAND:
133	case BCMA_CORE_NS_QSPI:
134		return true;
135	}
136
137	return false;
138}
139
140static struct device_node *bcma_of_find_child_device(struct device *parent,
141						     struct bcma_device *core)
142{
143	struct device_node *node;
144	int ret;
 
145
146	if (!parent->of_node)
147		return NULL;
148
149	for_each_child_of_node(parent->of_node, node) {
150		struct resource res;
151		ret = of_address_to_resource(node, 0, &res);
152		if (ret)
153			continue;
154		if (res.start == core->addr)
155			return node;
156	}
157	return NULL;
158}
159
160static int bcma_of_irq_parse(struct device *parent,
161			     struct bcma_device *core,
162			     struct of_phandle_args *out_irq, int num)
163{
164	__be32 laddr[1];
165	int rc;
166
167	if (core->dev.of_node) {
168		rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
169		if (!rc)
170			return rc;
171	}
172
173	out_irq->np = parent->of_node;
174	out_irq->args_count = 1;
175	out_irq->args[0] = num;
176
177	laddr[0] = cpu_to_be32(core->addr);
178	return of_irq_parse_raw(laddr, out_irq);
179}
180
181static unsigned int bcma_of_get_irq(struct device *parent,
182				    struct bcma_device *core, int num)
183{
184	struct of_phandle_args out_irq;
185	int ret;
186
187	if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node)
188		return 0;
189
190	ret = bcma_of_irq_parse(parent, core, &out_irq, num);
191	if (ret) {
192		bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
193			   ret);
194		return 0;
195	}
196
197	return irq_create_of_mapping(&out_irq);
198}
199
200static void bcma_of_fill_device(struct device *parent,
201				struct bcma_device *core)
202{
203	struct device_node *node;
204
205	node = bcma_of_find_child_device(parent, core);
206	if (node)
207		core->dev.of_node = node;
208
209	core->irq = bcma_of_get_irq(parent, core, 0);
210
211	of_dma_configure(&core->dev, node, false);
212}
213
214unsigned int bcma_core_irq(struct bcma_device *core, int num)
215{
216	struct bcma_bus *bus = core->bus;
217	unsigned int mips_irq;
218
219	switch (bus->hosttype) {
220	case BCMA_HOSTTYPE_PCI:
221		return bus->host_pci->irq;
222	case BCMA_HOSTTYPE_SOC:
223		if (bus->drv_mips.core && num == 0) {
224			mips_irq = bcma_core_mips_irq(core);
225			return mips_irq <= 4 ? mips_irq + 2 : 0;
226		}
227		if (bus->dev)
228			return bcma_of_get_irq(bus->dev, core, num);
229		return 0;
230	case BCMA_HOSTTYPE_SDIO:
231		return 0;
232	}
233
234	return 0;
235}
236EXPORT_SYMBOL(bcma_core_irq);
237
238void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
239{
240	device_initialize(&core->dev);
241	core->dev.release = bcma_release_core_dev;
242	core->dev.bus = &bcma_bus_type;
243	dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
244	core->dev.parent = bus->dev;
245	if (bus->dev)
246		bcma_of_fill_device(bus->dev, core);
247
248	switch (bus->hosttype) {
249	case BCMA_HOSTTYPE_PCI:
250		core->dma_dev = bus->dev;
251		core->irq = bus->host_pci->irq;
252		break;
253	case BCMA_HOSTTYPE_SOC:
254		if (IS_ENABLED(CONFIG_OF) && bus->dev) {
255			core->dma_dev = bus->dev;
256		} else {
257			core->dev.dma_mask = &core->dev.coherent_dma_mask;
258			core->dma_dev = &core->dev;
259		}
260		break;
261	case BCMA_HOSTTYPE_SDIO:
262		break;
263	}
264}
265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266void bcma_init_bus(struct bcma_bus *bus)
267{
268	mutex_lock(&bcma_buses_mutex);
269	bus->num = bcma_bus_next_num++;
270	mutex_unlock(&bcma_buses_mutex);
271
272	INIT_LIST_HEAD(&bus->cores);
273	bus->nr_cores = 0;
274
275	bcma_detect_chip(bus);
276}
277
278static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
279{
280	int err;
281
282	err = device_add(&core->dev);
283	if (err) {
284		bcma_err(bus, "Could not register dev for core 0x%03X\n",
285			 core->id.id);
 
286		return;
287	}
288	core->dev_registered = true;
289}
290
291static int bcma_register_devices(struct bcma_bus *bus)
292{
293	struct bcma_device *core;
294	int err;
295
296	list_for_each_entry(core, &bus->cores, list) {
297		/* We support that core ourselves */
298		switch (core->id.id) {
299		case BCMA_CORE_4706_CHIPCOMMON:
300		case BCMA_CORE_CHIPCOMMON:
301		case BCMA_CORE_NS_CHIPCOMMON_B:
302		case BCMA_CORE_PCI:
303		case BCMA_CORE_PCIE:
304		case BCMA_CORE_PCIE2:
305		case BCMA_CORE_MIPS_74K:
306		case BCMA_CORE_4706_MAC_GBIT_COMMON:
307			continue;
308		}
309
310		/* Early cores were already registered */
311		if (bcma_is_core_needed_early(core->id.id))
312			continue;
313
314		/* Only first GMAC core on BCM4706 is connected and working */
315		if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
316		    core->core_unit > 0)
317			continue;
318
319		bcma_register_core(bus, core);
320	}
321
322#ifdef CONFIG_BCMA_PFLASH
323	if (bus->drv_cc.pflash.present) {
324		err = platform_device_register(&bcma_pflash_dev);
325		if (err)
326			bcma_err(bus, "Error registering parallel flash\n");
327	}
328#endif
329
330#ifdef CONFIG_BCMA_SFLASH
331	if (bus->drv_cc.sflash.present) {
332		err = platform_device_register(&bcma_sflash_dev);
333		if (err)
334			bcma_err(bus, "Error registering serial flash\n");
335	}
336#endif
337
338#ifdef CONFIG_BCMA_NFLASH
339	if (bus->drv_cc.nflash.present) {
340		err = platform_device_register(&bcma_nflash_dev);
341		if (err)
342			bcma_err(bus, "Error registering NAND flash\n");
343	}
344#endif
345	err = bcma_gpio_init(&bus->drv_cc);
346	if (err == -ENOTSUPP)
347		bcma_debug(bus, "GPIO driver not activated\n");
348	else if (err) {
349		bcma_err(bus, "Error registering GPIO driver: %i\n", err);
350		return err;
351	}
352
353	if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
354		err = bcma_chipco_watchdog_register(&bus->drv_cc);
355		if (err)
356			bcma_err(bus, "Error registering watchdog driver\n");
357	}
358
359	return 0;
360}
361
362void bcma_unregister_cores(struct bcma_bus *bus)
363{
364	struct bcma_device *core, *tmp;
365
366	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
367		if (!core->dev_registered)
368			continue;
369		list_del(&core->list);
370		device_unregister(&core->dev);
371	}
372	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
373		platform_device_unregister(bus->drv_cc.watchdog);
374
375	/* Now no one uses internally-handled cores, we can free them */
376	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
377		list_del(&core->list);
378		put_device(&core->dev);
379	}
380}
381
382int bcma_bus_register(struct bcma_bus *bus)
383{
384	int err;
385	struct bcma_device *core;
 
386
387	/* Scan for devices (cores) */
388	err = bcma_bus_scan(bus);
389	if (err) {
390		bcma_err(bus, "Failed to scan: %d\n", err);
391		return err;
392	}
393
394	/* Early init CC core */
395	core = bcma_find_core(bus, bcma_cc_core_id(bus));
396	if (core) {
397		bus->drv_cc.core = core;
398		bcma_core_chipcommon_early_init(&bus->drv_cc);
399	}
400
401	/* Early init PCIE core */
402	core = bcma_find_core(bus, BCMA_CORE_PCIE);
403	if (core) {
404		bus->drv_pci[0].core = core;
405		bcma_core_pci_early_init(&bus->drv_pci[0]);
406	}
407
408	if (bus->dev)
409		of_platform_default_populate(bus->dev->of_node, NULL, bus->dev);
 
 
410
411	/* Cores providing flash access go before SPROM init */
412	list_for_each_entry(core, &bus->cores, list) {
413		if (bcma_is_core_needed_early(core->id.id))
414			bcma_register_core(bus, core);
415	}
416
417	/* Try to get SPROM */
418	err = bcma_sprom_get(bus);
419	if (err == -ENOENT) {
420		bcma_err(bus, "No SPROM available\n");
421	} else if (err)
422		bcma_err(bus, "Failed to get SPROM: %d\n", err);
423
424	/* Init CC core */
425	core = bcma_find_core(bus, bcma_cc_core_id(bus));
426	if (core) {
427		bus->drv_cc.core = core;
428		bcma_core_chipcommon_init(&bus->drv_cc);
429	}
430
431	/* Init CC core */
432	core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
433	if (core) {
434		bus->drv_cc_b.core = core;
435		bcma_core_chipcommon_b_init(&bus->drv_cc_b);
436	}
437
438	/* Init MIPS core */
439	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
440	if (core) {
441		bus->drv_mips.core = core;
442		bcma_core_mips_init(&bus->drv_mips);
443	}
444
445	/* Init PCIE core */
446	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
447	if (core) {
448		bus->drv_pci[0].core = core;
449		bcma_core_pci_init(&bus->drv_pci[0]);
450	}
451
452	/* Init PCIE core */
453	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
454	if (core) {
455		bus->drv_pci[1].core = core;
456		bcma_core_pci_init(&bus->drv_pci[1]);
457	}
458
459	/* Init PCIe Gen 2 core */
460	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
461	if (core) {
462		bus->drv_pcie2.core = core;
463		bcma_core_pcie2_init(&bus->drv_pcie2);
464	}
465
466	/* Init GBIT MAC COMMON core */
467	core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
468	if (core) {
469		bus->drv_gmac_cmn.core = core;
470		bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
471	}
472
473	/* Register found cores */
474	bcma_register_devices(bus);
475
476	bcma_info(bus, "Bus registered\n");
477
478	return 0;
479}
480
481void bcma_bus_unregister(struct bcma_bus *bus)
482{
483	int err;
484
485	err = bcma_gpio_unregister(&bus->drv_cc);
486	if (err == -EBUSY)
487		bcma_err(bus, "Some GPIOs are still in use.\n");
488	else if (err)
489		bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
490
491	bcma_core_chipcommon_b_free(&bus->drv_cc_b);
492
493	bcma_unregister_cores(bus);
494}
495
496/*
497 * This is a special version of bus registration function designed for SoCs.
498 * It scans bus and performs basic initialization of main cores only.
499 * Please note it requires memory allocation, however it won't try to sleep.
500 */
501int __init bcma_bus_early_register(struct bcma_bus *bus)
502{
503	int err;
504	struct bcma_device *core;
505
506	/* Scan for devices (cores) */
507	err = bcma_bus_scan(bus);
508	if (err) {
509		bcma_err(bus, "Failed to scan bus: %d\n", err);
510		return -1;
511	}
512
513	/* Early init CC core */
514	core = bcma_find_core(bus, bcma_cc_core_id(bus));
515	if (core) {
516		bus->drv_cc.core = core;
517		bcma_core_chipcommon_early_init(&bus->drv_cc);
518	}
519
520	/* Early init MIPS core */
521	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
522	if (core) {
523		bus->drv_mips.core = core;
524		bcma_core_mips_early_init(&bus->drv_mips);
525	}
526
527	bcma_info(bus, "Early bus registered\n");
528
529	return 0;
530}
531
532#ifdef CONFIG_PM
533int bcma_bus_suspend(struct bcma_bus *bus)
534{
535	struct bcma_device *core;
536
537	list_for_each_entry(core, &bus->cores, list) {
538		struct device_driver *drv = core->dev.driver;
539		if (drv) {
540			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
541			if (adrv->suspend)
542				adrv->suspend(core);
543		}
544	}
545	return 0;
546}
547
548int bcma_bus_resume(struct bcma_bus *bus)
549{
550	struct bcma_device *core;
551
552	/* Init CC core */
553	if (bus->drv_cc.core) {
554		bus->drv_cc.setup_done = false;
555		bcma_core_chipcommon_init(&bus->drv_cc);
556	}
557
558	list_for_each_entry(core, &bus->cores, list) {
559		struct device_driver *drv = core->dev.driver;
560		if (drv) {
561			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
562			if (adrv->resume)
563				adrv->resume(core);
564		}
565	}
566
567	return 0;
568}
569#endif
570
571int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
572{
573	drv->drv.name = drv->name;
574	drv->drv.bus = &bcma_bus_type;
575	drv->drv.owner = owner;
576
577	return driver_register(&drv->drv);
578}
579EXPORT_SYMBOL_GPL(__bcma_driver_register);
580
581void bcma_driver_unregister(struct bcma_driver *drv)
582{
583	driver_unregister(&drv->drv);
584}
585EXPORT_SYMBOL_GPL(bcma_driver_unregister);
586
587static int bcma_bus_match(struct device *dev, struct device_driver *drv)
588{
589	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
590	struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
591	const struct bcma_device_id *cid = &core->id;
592	const struct bcma_device_id *did;
593
594	for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
595	    if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
596		(did->id == cid->id || did->id == BCMA_ANY_ID) &&
597		(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
598		(did->class == cid->class || did->class == BCMA_ANY_CLASS))
599			return 1;
600	}
601	return 0;
602}
603
604static int bcma_device_probe(struct device *dev)
605{
606	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
607	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
608					       drv);
609	int err = 0;
610
611	get_device(dev);
612	if (adrv->probe)
613		err = adrv->probe(core);
614	if (err)
615		put_device(dev);
616
617	return err;
618}
619
620static void bcma_device_remove(struct device *dev)
621{
622	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
623	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
624					       drv);
625
626	if (adrv->remove)
627		adrv->remove(core);
628	put_device(dev);
 
 
629}
630
631static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
632{
633	const struct bcma_device *core = container_of_const(dev, struct bcma_device, dev);
634
635	return add_uevent_var(env,
636			      "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
637			      core->id.manuf, core->id.id,
638			      core->id.rev, core->id.class);
639}
640
641static unsigned int bcma_bus_registered;
642
643/*
644 * If built-in, bus has to be registered early, before any driver calls
645 * bcma_driver_register.
646 * Otherwise registering driver would trigger BUG in driver_register.
647 */
648static int __init bcma_init_bus_register(void)
649{
650	int err;
651
652	if (bcma_bus_registered)
653		return 0;
654
655	err = bus_register(&bcma_bus_type);
656	if (!err)
657		bcma_bus_registered = 1;
658
659	return err;
660}
661#ifndef MODULE
662fs_initcall(bcma_init_bus_register);
663#endif
664
665/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
666static int __init bcma_modinit(void)
667{
668	int err;
669
670	err = bcma_init_bus_register();
671	if (err)
672		return err;
673
674	err = bcma_host_soc_register_driver();
675	if (err) {
676		pr_err("SoC host initialization failed\n");
677		err = 0;
678	}
679#ifdef CONFIG_BCMA_HOST_PCI
680	err = bcma_host_pci_init();
681	if (err) {
682		pr_err("PCI host initialization failed\n");
683		err = 0;
684	}
685#endif
686
687	return err;
688}
689module_init(bcma_modinit);
690
691static void __exit bcma_modexit(void)
692{
693#ifdef CONFIG_BCMA_HOST_PCI
694	bcma_host_pci_exit();
695#endif
696	bcma_host_soc_unregister_driver();
697	bus_unregister(&bcma_bus_type);
698}
699module_exit(bcma_modexit)
v4.17
  1/*
  2 * Broadcom specific AMBA
  3 * Bus subsystem
  4 *
  5 * Licensed under the GNU/GPL. See COPYING for details.
  6 */
  7
  8#include "bcma_private.h"
  9#include <linux/module.h>
 10#include <linux/mmc/sdio_func.h>
 11#include <linux/platform_device.h>
 12#include <linux/pci.h>
 13#include <linux/bcma/bcma.h>
 14#include <linux/slab.h>
 15#include <linux/of_address.h>
 16#include <linux/of_irq.h>
 
 17#include <linux/of_platform.h>
 18
 19MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
 20MODULE_LICENSE("GPL");
 21
 22/* contains the number the next bus should get. */
 23static unsigned int bcma_bus_next_num = 0;
 24
 25/* bcma_buses_mutex locks the bcma_bus_next_num */
 26static DEFINE_MUTEX(bcma_buses_mutex);
 27
 28static int bcma_bus_match(struct device *dev, struct device_driver *drv);
 29static int bcma_device_probe(struct device *dev);
 30static int bcma_device_remove(struct device *dev);
 31static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
 32
 33static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
 34{
 35	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 36	return sprintf(buf, "0x%03X\n", core->id.manuf);
 37}
 38static DEVICE_ATTR_RO(manuf);
 39
 40static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
 41{
 42	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 43	return sprintf(buf, "0x%03X\n", core->id.id);
 44}
 45static DEVICE_ATTR_RO(id);
 46
 47static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
 48{
 49	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 50	return sprintf(buf, "0x%02X\n", core->id.rev);
 51}
 52static DEVICE_ATTR_RO(rev);
 53
 54static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
 55{
 56	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 57	return sprintf(buf, "0x%X\n", core->id.class);
 58}
 59static DEVICE_ATTR_RO(class);
 60
 61static struct attribute *bcma_device_attrs[] = {
 62	&dev_attr_manuf.attr,
 63	&dev_attr_id.attr,
 64	&dev_attr_rev.attr,
 65	&dev_attr_class.attr,
 66	NULL,
 67};
 68ATTRIBUTE_GROUPS(bcma_device);
 69
 70static struct bus_type bcma_bus_type = {
 71	.name		= "bcma",
 72	.match		= bcma_bus_match,
 73	.probe		= bcma_device_probe,
 74	.remove		= bcma_device_remove,
 75	.uevent		= bcma_device_uevent,
 76	.dev_groups	= bcma_device_groups,
 77};
 78
 79static u16 bcma_cc_core_id(struct bcma_bus *bus)
 80{
 81	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
 82		return BCMA_CORE_4706_CHIPCOMMON;
 83	return BCMA_CORE_CHIPCOMMON;
 84}
 85
 86struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
 87					u8 unit)
 88{
 89	struct bcma_device *core;
 90
 91	list_for_each_entry(core, &bus->cores, list) {
 92		if (core->id.id == coreid && core->core_unit == unit)
 93			return core;
 94	}
 95	return NULL;
 96}
 97EXPORT_SYMBOL_GPL(bcma_find_core_unit);
 98
 99bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
100		     int timeout)
101{
102	unsigned long deadline = jiffies + timeout;
103	u32 val;
104
105	do {
106		val = bcma_read32(core, reg);
107		if ((val & mask) == value)
108			return true;
109		cpu_relax();
110		udelay(10);
111	} while (!time_after_eq(jiffies, deadline));
112
113	bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
114
115	return false;
116}
117
118static void bcma_release_core_dev(struct device *dev)
119{
120	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
121	if (core->io_addr)
122		iounmap(core->io_addr);
123	if (core->io_wrap)
124		iounmap(core->io_wrap);
125	kfree(core);
126}
127
128static bool bcma_is_core_needed_early(u16 core_id)
129{
130	switch (core_id) {
131	case BCMA_CORE_NS_NAND:
132	case BCMA_CORE_NS_QSPI:
133		return true;
134	}
135
136	return false;
137}
138
139static struct device_node *bcma_of_find_child_device(struct device *parent,
140						     struct bcma_device *core)
141{
142	struct device_node *node;
143	u64 size;
144	const __be32 *reg;
145
146	if (!parent->of_node)
147		return NULL;
148
149	for_each_child_of_node(parent->of_node, node) {
150		reg = of_get_address(node, 0, &size, NULL);
151		if (!reg)
 
152			continue;
153		if (of_translate_address(node, reg) == core->addr)
154			return node;
155	}
156	return NULL;
157}
158
159static int bcma_of_irq_parse(struct device *parent,
160			     struct bcma_device *core,
161			     struct of_phandle_args *out_irq, int num)
162{
163	__be32 laddr[1];
164	int rc;
165
166	if (core->dev.of_node) {
167		rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
168		if (!rc)
169			return rc;
170	}
171
172	out_irq->np = parent->of_node;
173	out_irq->args_count = 1;
174	out_irq->args[0] = num;
175
176	laddr[0] = cpu_to_be32(core->addr);
177	return of_irq_parse_raw(laddr, out_irq);
178}
179
180static unsigned int bcma_of_get_irq(struct device *parent,
181				    struct bcma_device *core, int num)
182{
183	struct of_phandle_args out_irq;
184	int ret;
185
186	if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node)
187		return 0;
188
189	ret = bcma_of_irq_parse(parent, core, &out_irq, num);
190	if (ret) {
191		bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
192			   ret);
193		return 0;
194	}
195
196	return irq_create_of_mapping(&out_irq);
197}
198
199static void bcma_of_fill_device(struct device *parent,
200				struct bcma_device *core)
201{
202	struct device_node *node;
203
204	node = bcma_of_find_child_device(parent, core);
205	if (node)
206		core->dev.of_node = node;
207
208	core->irq = bcma_of_get_irq(parent, core, 0);
209
210	of_dma_configure(&core->dev, node);
211}
212
213unsigned int bcma_core_irq(struct bcma_device *core, int num)
214{
215	struct bcma_bus *bus = core->bus;
216	unsigned int mips_irq;
217
218	switch (bus->hosttype) {
219	case BCMA_HOSTTYPE_PCI:
220		return bus->host_pci->irq;
221	case BCMA_HOSTTYPE_SOC:
222		if (bus->drv_mips.core && num == 0) {
223			mips_irq = bcma_core_mips_irq(core);
224			return mips_irq <= 4 ? mips_irq + 2 : 0;
225		}
226		if (bus->host_pdev)
227			return bcma_of_get_irq(&bus->host_pdev->dev, core, num);
228		return 0;
229	case BCMA_HOSTTYPE_SDIO:
230		return 0;
231	}
232
233	return 0;
234}
235EXPORT_SYMBOL(bcma_core_irq);
236
237void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
238{
 
239	core->dev.release = bcma_release_core_dev;
240	core->dev.bus = &bcma_bus_type;
241	dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
242	core->dev.parent = bcma_bus_get_host_dev(bus);
243	if (core->dev.parent)
244		bcma_of_fill_device(core->dev.parent, core);
245
246	switch (bus->hosttype) {
247	case BCMA_HOSTTYPE_PCI:
248		core->dma_dev = &bus->host_pci->dev;
249		core->irq = bus->host_pci->irq;
250		break;
251	case BCMA_HOSTTYPE_SOC:
252		if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
253			core->dma_dev = &bus->host_pdev->dev;
254		} else {
255			core->dev.dma_mask = &core->dev.coherent_dma_mask;
256			core->dma_dev = &core->dev;
257		}
258		break;
259	case BCMA_HOSTTYPE_SDIO:
260		break;
261	}
262}
263
264struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
265{
266	switch (bus->hosttype) {
267	case BCMA_HOSTTYPE_PCI:
268		if (bus->host_pci)
269			return &bus->host_pci->dev;
270		else
271			return NULL;
272	case BCMA_HOSTTYPE_SOC:
273		if (bus->host_pdev)
274			return &bus->host_pdev->dev;
275		else
276			return NULL;
277	case BCMA_HOSTTYPE_SDIO:
278		if (bus->host_sdio)
279			return &bus->host_sdio->dev;
280		else
281			return NULL;
282	}
283	return NULL;
284}
285
286void bcma_init_bus(struct bcma_bus *bus)
287{
288	mutex_lock(&bcma_buses_mutex);
289	bus->num = bcma_bus_next_num++;
290	mutex_unlock(&bcma_buses_mutex);
291
292	INIT_LIST_HEAD(&bus->cores);
293	bus->nr_cores = 0;
294
295	bcma_detect_chip(bus);
296}
297
298static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
299{
300	int err;
301
302	err = device_register(&core->dev);
303	if (err) {
304		bcma_err(bus, "Could not register dev for core 0x%03X\n",
305			 core->id.id);
306		put_device(&core->dev);
307		return;
308	}
309	core->dev_registered = true;
310}
311
312static int bcma_register_devices(struct bcma_bus *bus)
313{
314	struct bcma_device *core;
315	int err;
316
317	list_for_each_entry(core, &bus->cores, list) {
318		/* We support that cores ourself */
319		switch (core->id.id) {
320		case BCMA_CORE_4706_CHIPCOMMON:
321		case BCMA_CORE_CHIPCOMMON:
322		case BCMA_CORE_NS_CHIPCOMMON_B:
323		case BCMA_CORE_PCI:
324		case BCMA_CORE_PCIE:
325		case BCMA_CORE_PCIE2:
326		case BCMA_CORE_MIPS_74K:
327		case BCMA_CORE_4706_MAC_GBIT_COMMON:
328			continue;
329		}
330
331		/* Early cores were already registered */
332		if (bcma_is_core_needed_early(core->id.id))
333			continue;
334
335		/* Only first GMAC core on BCM4706 is connected and working */
336		if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
337		    core->core_unit > 0)
338			continue;
339
340		bcma_register_core(bus, core);
341	}
342
343#ifdef CONFIG_BCMA_PFLASH
344	if (bus->drv_cc.pflash.present) {
345		err = platform_device_register(&bcma_pflash_dev);
346		if (err)
347			bcma_err(bus, "Error registering parallel flash\n");
348	}
349#endif
350
351#ifdef CONFIG_BCMA_SFLASH
352	if (bus->drv_cc.sflash.present) {
353		err = platform_device_register(&bcma_sflash_dev);
354		if (err)
355			bcma_err(bus, "Error registering serial flash\n");
356	}
357#endif
358
359#ifdef CONFIG_BCMA_NFLASH
360	if (bus->drv_cc.nflash.present) {
361		err = platform_device_register(&bcma_nflash_dev);
362		if (err)
363			bcma_err(bus, "Error registering NAND flash\n");
364	}
365#endif
366	err = bcma_gpio_init(&bus->drv_cc);
367	if (err == -ENOTSUPP)
368		bcma_debug(bus, "GPIO driver not activated\n");
369	else if (err)
370		bcma_err(bus, "Error registering GPIO driver: %i\n", err);
 
 
371
372	if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
373		err = bcma_chipco_watchdog_register(&bus->drv_cc);
374		if (err)
375			bcma_err(bus, "Error registering watchdog driver\n");
376	}
377
378	return 0;
379}
380
381void bcma_unregister_cores(struct bcma_bus *bus)
382{
383	struct bcma_device *core, *tmp;
384
385	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
386		if (!core->dev_registered)
387			continue;
388		list_del(&core->list);
389		device_unregister(&core->dev);
390	}
391	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
392		platform_device_unregister(bus->drv_cc.watchdog);
393
394	/* Now noone uses internally-handled cores, we can free them */
395	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
396		list_del(&core->list);
397		kfree(core);
398	}
399}
400
401int bcma_bus_register(struct bcma_bus *bus)
402{
403	int err;
404	struct bcma_device *core;
405	struct device *dev;
406
407	/* Scan for devices (cores) */
408	err = bcma_bus_scan(bus);
409	if (err) {
410		bcma_err(bus, "Failed to scan: %d\n", err);
411		return err;
412	}
413
414	/* Early init CC core */
415	core = bcma_find_core(bus, bcma_cc_core_id(bus));
416	if (core) {
417		bus->drv_cc.core = core;
418		bcma_core_chipcommon_early_init(&bus->drv_cc);
419	}
420
421	/* Early init PCIE core */
422	core = bcma_find_core(bus, BCMA_CORE_PCIE);
423	if (core) {
424		bus->drv_pci[0].core = core;
425		bcma_core_pci_early_init(&bus->drv_pci[0]);
426	}
427
428	dev = bcma_bus_get_host_dev(bus);
429	if (dev) {
430		of_platform_default_populate(dev->of_node, NULL, dev);
431	}
432
433	/* Cores providing flash access go before SPROM init */
434	list_for_each_entry(core, &bus->cores, list) {
435		if (bcma_is_core_needed_early(core->id.id))
436			bcma_register_core(bus, core);
437	}
438
439	/* Try to get SPROM */
440	err = bcma_sprom_get(bus);
441	if (err == -ENOENT) {
442		bcma_err(bus, "No SPROM available\n");
443	} else if (err)
444		bcma_err(bus, "Failed to get SPROM: %d\n", err);
445
446	/* Init CC core */
447	core = bcma_find_core(bus, bcma_cc_core_id(bus));
448	if (core) {
449		bus->drv_cc.core = core;
450		bcma_core_chipcommon_init(&bus->drv_cc);
451	}
452
453	/* Init CC core */
454	core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
455	if (core) {
456		bus->drv_cc_b.core = core;
457		bcma_core_chipcommon_b_init(&bus->drv_cc_b);
458	}
459
460	/* Init MIPS core */
461	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
462	if (core) {
463		bus->drv_mips.core = core;
464		bcma_core_mips_init(&bus->drv_mips);
465	}
466
467	/* Init PCIE core */
468	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
469	if (core) {
470		bus->drv_pci[0].core = core;
471		bcma_core_pci_init(&bus->drv_pci[0]);
472	}
473
474	/* Init PCIE core */
475	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
476	if (core) {
477		bus->drv_pci[1].core = core;
478		bcma_core_pci_init(&bus->drv_pci[1]);
479	}
480
481	/* Init PCIe Gen 2 core */
482	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
483	if (core) {
484		bus->drv_pcie2.core = core;
485		bcma_core_pcie2_init(&bus->drv_pcie2);
486	}
487
488	/* Init GBIT MAC COMMON core */
489	core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
490	if (core) {
491		bus->drv_gmac_cmn.core = core;
492		bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
493	}
494
495	/* Register found cores */
496	bcma_register_devices(bus);
497
498	bcma_info(bus, "Bus registered\n");
499
500	return 0;
501}
502
503void bcma_bus_unregister(struct bcma_bus *bus)
504{
505	int err;
506
507	err = bcma_gpio_unregister(&bus->drv_cc);
508	if (err == -EBUSY)
509		bcma_err(bus, "Some GPIOs are still in use.\n");
510	else if (err)
511		bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
512
513	bcma_core_chipcommon_b_free(&bus->drv_cc_b);
514
515	bcma_unregister_cores(bus);
516}
517
518/*
519 * This is a special version of bus registration function designed for SoCs.
520 * It scans bus and performs basic initialization of main cores only.
521 * Please note it requires memory allocation, however it won't try to sleep.
522 */
523int __init bcma_bus_early_register(struct bcma_bus *bus)
524{
525	int err;
526	struct bcma_device *core;
527
528	/* Scan for devices (cores) */
529	err = bcma_bus_scan(bus);
530	if (err) {
531		bcma_err(bus, "Failed to scan bus: %d\n", err);
532		return -1;
533	}
534
535	/* Early init CC core */
536	core = bcma_find_core(bus, bcma_cc_core_id(bus));
537	if (core) {
538		bus->drv_cc.core = core;
539		bcma_core_chipcommon_early_init(&bus->drv_cc);
540	}
541
542	/* Early init MIPS core */
543	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
544	if (core) {
545		bus->drv_mips.core = core;
546		bcma_core_mips_early_init(&bus->drv_mips);
547	}
548
549	bcma_info(bus, "Early bus registered\n");
550
551	return 0;
552}
553
554#ifdef CONFIG_PM
555int bcma_bus_suspend(struct bcma_bus *bus)
556{
557	struct bcma_device *core;
558
559	list_for_each_entry(core, &bus->cores, list) {
560		struct device_driver *drv = core->dev.driver;
561		if (drv) {
562			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
563			if (adrv->suspend)
564				adrv->suspend(core);
565		}
566	}
567	return 0;
568}
569
570int bcma_bus_resume(struct bcma_bus *bus)
571{
572	struct bcma_device *core;
573
574	/* Init CC core */
575	if (bus->drv_cc.core) {
576		bus->drv_cc.setup_done = false;
577		bcma_core_chipcommon_init(&bus->drv_cc);
578	}
579
580	list_for_each_entry(core, &bus->cores, list) {
581		struct device_driver *drv = core->dev.driver;
582		if (drv) {
583			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
584			if (adrv->resume)
585				adrv->resume(core);
586		}
587	}
588
589	return 0;
590}
591#endif
592
593int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
594{
595	drv->drv.name = drv->name;
596	drv->drv.bus = &bcma_bus_type;
597	drv->drv.owner = owner;
598
599	return driver_register(&drv->drv);
600}
601EXPORT_SYMBOL_GPL(__bcma_driver_register);
602
603void bcma_driver_unregister(struct bcma_driver *drv)
604{
605	driver_unregister(&drv->drv);
606}
607EXPORT_SYMBOL_GPL(bcma_driver_unregister);
608
609static int bcma_bus_match(struct device *dev, struct device_driver *drv)
610{
611	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
612	struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
613	const struct bcma_device_id *cid = &core->id;
614	const struct bcma_device_id *did;
615
616	for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
617	    if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
618		(did->id == cid->id || did->id == BCMA_ANY_ID) &&
619		(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
620		(did->class == cid->class || did->class == BCMA_ANY_CLASS))
621			return 1;
622	}
623	return 0;
624}
625
626static int bcma_device_probe(struct device *dev)
627{
628	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
629	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
630					       drv);
631	int err = 0;
632
633	get_device(dev);
634	if (adrv->probe)
635		err = adrv->probe(core);
636	if (err)
637		put_device(dev);
638
639	return err;
640}
641
642static int bcma_device_remove(struct device *dev)
643{
644	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
645	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
646					       drv);
647
648	if (adrv->remove)
649		adrv->remove(core);
650	put_device(dev);
651
652	return 0;
653}
654
655static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
656{
657	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
658
659	return add_uevent_var(env,
660			      "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
661			      core->id.manuf, core->id.id,
662			      core->id.rev, core->id.class);
663}
664
665static unsigned int bcma_bus_registered;
666
667/*
668 * If built-in, bus has to be registered early, before any driver calls
669 * bcma_driver_register.
670 * Otherwise registering driver would trigger BUG in driver_register.
671 */
672static int __init bcma_init_bus_register(void)
673{
674	int err;
675
676	if (bcma_bus_registered)
677		return 0;
678
679	err = bus_register(&bcma_bus_type);
680	if (!err)
681		bcma_bus_registered = 1;
682
683	return err;
684}
685#ifndef MODULE
686fs_initcall(bcma_init_bus_register);
687#endif
688
689/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
690static int __init bcma_modinit(void)
691{
692	int err;
693
694	err = bcma_init_bus_register();
695	if (err)
696		return err;
697
698	err = bcma_host_soc_register_driver();
699	if (err) {
700		pr_err("SoC host initialization failed\n");
701		err = 0;
702	}
703#ifdef CONFIG_BCMA_HOST_PCI
704	err = bcma_host_pci_init();
705	if (err) {
706		pr_err("PCI host initialization failed\n");
707		err = 0;
708	}
709#endif
710
711	return err;
712}
713module_init(bcma_modinit);
714
715static void __exit bcma_modexit(void)
716{
717#ifdef CONFIG_BCMA_HOST_PCI
718	bcma_host_pci_exit();
719#endif
720	bcma_host_soc_unregister_driver();
721	bus_unregister(&bcma_bus_type);
722}
723module_exit(bcma_modexit)