Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Broadcom specific AMBA
  3 * Bus subsystem
  4 *
  5 * Licensed under the GNU/GPL. See COPYING for details.
  6 */
  7
  8#include "bcma_private.h"
  9#include <linux/module.h>
 10#include <linux/mmc/sdio_func.h>
 11#include <linux/platform_device.h>
 12#include <linux/pci.h>
 13#include <linux/bcma/bcma.h>
 14#include <linux/slab.h>
 15#include <linux/of_address.h>
 16#include <linux/of_irq.h>
 17#include <linux/of_device.h>
 18#include <linux/of_platform.h>
 19
 20MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
 21MODULE_LICENSE("GPL");
 22
 23/* contains the number the next bus should get. */
 24static unsigned int bcma_bus_next_num;
 25
 26/* bcma_buses_mutex locks the bcma_bus_next_num */
 27static DEFINE_MUTEX(bcma_buses_mutex);
 28
 29static int bcma_bus_match(struct device *dev, struct device_driver *drv);
 30static int bcma_device_probe(struct device *dev);
 31static void bcma_device_remove(struct device *dev);
 32static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
 33
 34static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
 35{
 36	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 37	return sprintf(buf, "0x%03X\n", core->id.manuf);
 38}
 39static DEVICE_ATTR_RO(manuf);
 40
 41static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
 42{
 43	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 44	return sprintf(buf, "0x%03X\n", core->id.id);
 45}
 46static DEVICE_ATTR_RO(id);
 47
 48static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
 49{
 50	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 51	return sprintf(buf, "0x%02X\n", core->id.rev);
 52}
 53static DEVICE_ATTR_RO(rev);
 54
 55static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
 56{
 57	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 58	return sprintf(buf, "0x%X\n", core->id.class);
 59}
 60static DEVICE_ATTR_RO(class);
 61
 62static struct attribute *bcma_device_attrs[] = {
 63	&dev_attr_manuf.attr,
 64	&dev_attr_id.attr,
 65	&dev_attr_rev.attr,
 66	&dev_attr_class.attr,
 67	NULL,
 68};
 69ATTRIBUTE_GROUPS(bcma_device);
 70
 71static struct bus_type bcma_bus_type = {
 72	.name		= "bcma",
 73	.match		= bcma_bus_match,
 74	.probe		= bcma_device_probe,
 75	.remove		= bcma_device_remove,
 76	.uevent		= bcma_device_uevent,
 77	.dev_groups	= bcma_device_groups,
 78};
 79
 80static u16 bcma_cc_core_id(struct bcma_bus *bus)
 81{
 82	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
 83		return BCMA_CORE_4706_CHIPCOMMON;
 84	return BCMA_CORE_CHIPCOMMON;
 85}
 86
 87struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
 88					u8 unit)
 89{
 90	struct bcma_device *core;
 91
 92	list_for_each_entry(core, &bus->cores, list) {
 93		if (core->id.id == coreid && core->core_unit == unit)
 94			return core;
 95	}
 96	return NULL;
 97}
 98EXPORT_SYMBOL_GPL(bcma_find_core_unit);
 99
100bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
101		     int timeout)
102{
103	unsigned long deadline = jiffies + timeout;
104	u32 val;
105
106	do {
107		val = bcma_read32(core, reg);
108		if ((val & mask) == value)
109			return true;
110		cpu_relax();
111		udelay(10);
112	} while (!time_after_eq(jiffies, deadline));
113
114	bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
115
116	return false;
117}
118
119static void bcma_release_core_dev(struct device *dev)
120{
121	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
122	if (core->io_addr)
123		iounmap(core->io_addr);
124	if (core->io_wrap)
125		iounmap(core->io_wrap);
126	kfree(core);
127}
128
129static bool bcma_is_core_needed_early(u16 core_id)
130{
131	switch (core_id) {
132	case BCMA_CORE_NS_NAND:
133	case BCMA_CORE_NS_QSPI:
134		return true;
135	}
136
137	return false;
138}
139
140static struct device_node *bcma_of_find_child_device(struct device *parent,
141						     struct bcma_device *core)
142{
143	struct device_node *node;
144	int ret;
 
145
146	if (!parent->of_node)
147		return NULL;
148
149	for_each_child_of_node(parent->of_node, node) {
150		struct resource res;
151		ret = of_address_to_resource(node, 0, &res);
152		if (ret)
153			continue;
154		if (res.start == core->addr)
155			return node;
156	}
157	return NULL;
158}
159
160static int bcma_of_irq_parse(struct device *parent,
161			     struct bcma_device *core,
162			     struct of_phandle_args *out_irq, int num)
163{
164	__be32 laddr[1];
165	int rc;
166
167	if (core->dev.of_node) {
168		rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
169		if (!rc)
170			return rc;
171	}
172
173	out_irq->np = parent->of_node;
174	out_irq->args_count = 1;
175	out_irq->args[0] = num;
176
177	laddr[0] = cpu_to_be32(core->addr);
178	return of_irq_parse_raw(laddr, out_irq);
179}
180
181static unsigned int bcma_of_get_irq(struct device *parent,
182				    struct bcma_device *core, int num)
183{
184	struct of_phandle_args out_irq;
185	int ret;
186
187	if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node)
188		return 0;
189
190	ret = bcma_of_irq_parse(parent, core, &out_irq, num);
191	if (ret) {
192		bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
193			   ret);
194		return 0;
195	}
196
197	return irq_create_of_mapping(&out_irq);
198}
199
200static void bcma_of_fill_device(struct device *parent,
201				struct bcma_device *core)
202{
203	struct device_node *node;
204
 
 
 
205	node = bcma_of_find_child_device(parent, core);
206	if (node)
207		core->dev.of_node = node;
208
209	core->irq = bcma_of_get_irq(parent, core, 0);
210
211	of_dma_configure(&core->dev, node, false);
212}
213
214unsigned int bcma_core_irq(struct bcma_device *core, int num)
215{
216	struct bcma_bus *bus = core->bus;
217	unsigned int mips_irq;
218
219	switch (bus->hosttype) {
220	case BCMA_HOSTTYPE_PCI:
221		return bus->host_pci->irq;
222	case BCMA_HOSTTYPE_SOC:
223		if (bus->drv_mips.core && num == 0) {
224			mips_irq = bcma_core_mips_irq(core);
225			return mips_irq <= 4 ? mips_irq + 2 : 0;
226		}
227		if (bus->dev)
228			return bcma_of_get_irq(bus->dev, core, num);
229		return 0;
230	case BCMA_HOSTTYPE_SDIO:
231		return 0;
232	}
233
234	return 0;
235}
236EXPORT_SYMBOL(bcma_core_irq);
237
238void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
239{
240	device_initialize(&core->dev);
241	core->dev.release = bcma_release_core_dev;
242	core->dev.bus = &bcma_bus_type;
243	dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
244	core->dev.parent = bus->dev;
245	if (bus->dev)
246		bcma_of_fill_device(bus->dev, core);
247
248	switch (bus->hosttype) {
249	case BCMA_HOSTTYPE_PCI:
250		core->dma_dev = bus->dev;
 
251		core->irq = bus->host_pci->irq;
252		break;
253	case BCMA_HOSTTYPE_SOC:
254		if (IS_ENABLED(CONFIG_OF) && bus->dev) {
255			core->dma_dev = bus->dev;
 
 
 
256		} else {
257			core->dev.dma_mask = &core->dev.coherent_dma_mask;
258			core->dma_dev = &core->dev;
259		}
260		break;
261	case BCMA_HOSTTYPE_SDIO:
262		break;
263	}
264}
265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266void bcma_init_bus(struct bcma_bus *bus)
267{
268	mutex_lock(&bcma_buses_mutex);
269	bus->num = bcma_bus_next_num++;
270	mutex_unlock(&bcma_buses_mutex);
271
272	INIT_LIST_HEAD(&bus->cores);
273	bus->nr_cores = 0;
274
275	bcma_detect_chip(bus);
276}
277
278static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
279{
280	int err;
281
282	err = device_add(&core->dev);
283	if (err) {
284		bcma_err(bus, "Could not register dev for core 0x%03X\n",
285			 core->id.id);
 
286		return;
287	}
288	core->dev_registered = true;
289}
290
291static int bcma_register_devices(struct bcma_bus *bus)
292{
293	struct bcma_device *core;
294	int err;
295
296	list_for_each_entry(core, &bus->cores, list) {
297		/* We support that core ourselves */
298		switch (core->id.id) {
299		case BCMA_CORE_4706_CHIPCOMMON:
300		case BCMA_CORE_CHIPCOMMON:
301		case BCMA_CORE_NS_CHIPCOMMON_B:
302		case BCMA_CORE_PCI:
303		case BCMA_CORE_PCIE:
304		case BCMA_CORE_PCIE2:
305		case BCMA_CORE_MIPS_74K:
306		case BCMA_CORE_4706_MAC_GBIT_COMMON:
307			continue;
308		}
309
310		/* Early cores were already registered */
311		if (bcma_is_core_needed_early(core->id.id))
312			continue;
313
314		/* Only first GMAC core on BCM4706 is connected and working */
315		if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
316		    core->core_unit > 0)
317			continue;
318
319		bcma_register_core(bus, core);
320	}
321
322#ifdef CONFIG_BCMA_PFLASH
323	if (bus->drv_cc.pflash.present) {
324		err = platform_device_register(&bcma_pflash_dev);
325		if (err)
326			bcma_err(bus, "Error registering parallel flash\n");
327	}
328#endif
329
330#ifdef CONFIG_BCMA_SFLASH
331	if (bus->drv_cc.sflash.present) {
332		err = platform_device_register(&bcma_sflash_dev);
333		if (err)
334			bcma_err(bus, "Error registering serial flash\n");
335	}
336#endif
337
338#ifdef CONFIG_BCMA_NFLASH
339	if (bus->drv_cc.nflash.present) {
340		err = platform_device_register(&bcma_nflash_dev);
341		if (err)
342			bcma_err(bus, "Error registering NAND flash\n");
343	}
344#endif
345	err = bcma_gpio_init(&bus->drv_cc);
346	if (err == -ENOTSUPP)
347		bcma_debug(bus, "GPIO driver not activated\n");
348	else if (err) {
349		bcma_err(bus, "Error registering GPIO driver: %i\n", err);
350		return err;
351	}
352
353	if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
354		err = bcma_chipco_watchdog_register(&bus->drv_cc);
355		if (err)
356			bcma_err(bus, "Error registering watchdog driver\n");
357	}
358
359	return 0;
360}
361
362void bcma_unregister_cores(struct bcma_bus *bus)
363{
364	struct bcma_device *core, *tmp;
365
366	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
367		if (!core->dev_registered)
368			continue;
369		list_del(&core->list);
370		device_unregister(&core->dev);
371	}
372	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
373		platform_device_unregister(bus->drv_cc.watchdog);
374
375	/* Now no one uses internally-handled cores, we can free them */
376	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
377		list_del(&core->list);
378		put_device(&core->dev);
379	}
380}
381
382int bcma_bus_register(struct bcma_bus *bus)
383{
384	int err;
385	struct bcma_device *core;
 
386
387	/* Scan for devices (cores) */
388	err = bcma_bus_scan(bus);
389	if (err) {
390		bcma_err(bus, "Failed to scan: %d\n", err);
391		return err;
392	}
393
394	/* Early init CC core */
395	core = bcma_find_core(bus, bcma_cc_core_id(bus));
396	if (core) {
397		bus->drv_cc.core = core;
398		bcma_core_chipcommon_early_init(&bus->drv_cc);
399	}
400
401	/* Early init PCIE core */
402	core = bcma_find_core(bus, BCMA_CORE_PCIE);
403	if (core) {
404		bus->drv_pci[0].core = core;
405		bcma_core_pci_early_init(&bus->drv_pci[0]);
406	}
407
408	if (bus->dev)
409		of_platform_default_populate(bus->dev->of_node, NULL, bus->dev);
 
 
410
411	/* Cores providing flash access go before SPROM init */
412	list_for_each_entry(core, &bus->cores, list) {
413		if (bcma_is_core_needed_early(core->id.id))
414			bcma_register_core(bus, core);
415	}
416
417	/* Try to get SPROM */
418	err = bcma_sprom_get(bus);
419	if (err == -ENOENT) {
420		bcma_err(bus, "No SPROM available\n");
421	} else if (err)
422		bcma_err(bus, "Failed to get SPROM: %d\n", err);
423
424	/* Init CC core */
425	core = bcma_find_core(bus, bcma_cc_core_id(bus));
426	if (core) {
427		bus->drv_cc.core = core;
428		bcma_core_chipcommon_init(&bus->drv_cc);
429	}
430
431	/* Init CC core */
432	core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
433	if (core) {
434		bus->drv_cc_b.core = core;
435		bcma_core_chipcommon_b_init(&bus->drv_cc_b);
436	}
437
438	/* Init MIPS core */
439	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
440	if (core) {
441		bus->drv_mips.core = core;
442		bcma_core_mips_init(&bus->drv_mips);
443	}
444
445	/* Init PCIE core */
446	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
447	if (core) {
448		bus->drv_pci[0].core = core;
449		bcma_core_pci_init(&bus->drv_pci[0]);
450	}
451
452	/* Init PCIE core */
453	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
454	if (core) {
455		bus->drv_pci[1].core = core;
456		bcma_core_pci_init(&bus->drv_pci[1]);
457	}
458
459	/* Init PCIe Gen 2 core */
460	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
461	if (core) {
462		bus->drv_pcie2.core = core;
463		bcma_core_pcie2_init(&bus->drv_pcie2);
464	}
465
466	/* Init GBIT MAC COMMON core */
467	core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
468	if (core) {
469		bus->drv_gmac_cmn.core = core;
470		bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
471	}
472
473	/* Register found cores */
474	bcma_register_devices(bus);
475
476	bcma_info(bus, "Bus registered\n");
477
478	return 0;
479}
480
481void bcma_bus_unregister(struct bcma_bus *bus)
482{
483	int err;
484
485	err = bcma_gpio_unregister(&bus->drv_cc);
486	if (err == -EBUSY)
487		bcma_err(bus, "Some GPIOs are still in use.\n");
488	else if (err)
489		bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
490
491	bcma_core_chipcommon_b_free(&bus->drv_cc_b);
492
493	bcma_unregister_cores(bus);
494}
495
496/*
497 * This is a special version of bus registration function designed for SoCs.
498 * It scans bus and performs basic initialization of main cores only.
499 * Please note it requires memory allocation, however it won't try to sleep.
500 */
501int __init bcma_bus_early_register(struct bcma_bus *bus)
502{
503	int err;
504	struct bcma_device *core;
505
506	/* Scan for devices (cores) */
507	err = bcma_bus_scan(bus);
508	if (err) {
509		bcma_err(bus, "Failed to scan bus: %d\n", err);
510		return -1;
511	}
512
513	/* Early init CC core */
514	core = bcma_find_core(bus, bcma_cc_core_id(bus));
515	if (core) {
516		bus->drv_cc.core = core;
517		bcma_core_chipcommon_early_init(&bus->drv_cc);
518	}
519
520	/* Early init MIPS core */
521	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
522	if (core) {
523		bus->drv_mips.core = core;
524		bcma_core_mips_early_init(&bus->drv_mips);
525	}
526
527	bcma_info(bus, "Early bus registered\n");
528
529	return 0;
530}
531
532#ifdef CONFIG_PM
533int bcma_bus_suspend(struct bcma_bus *bus)
534{
535	struct bcma_device *core;
536
537	list_for_each_entry(core, &bus->cores, list) {
538		struct device_driver *drv = core->dev.driver;
539		if (drv) {
540			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
541			if (adrv->suspend)
542				adrv->suspend(core);
543		}
544	}
545	return 0;
546}
547
548int bcma_bus_resume(struct bcma_bus *bus)
549{
550	struct bcma_device *core;
551
552	/* Init CC core */
553	if (bus->drv_cc.core) {
554		bus->drv_cc.setup_done = false;
555		bcma_core_chipcommon_init(&bus->drv_cc);
556	}
557
558	list_for_each_entry(core, &bus->cores, list) {
559		struct device_driver *drv = core->dev.driver;
560		if (drv) {
561			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
562			if (adrv->resume)
563				adrv->resume(core);
564		}
565	}
566
567	return 0;
568}
569#endif
570
571int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
572{
573	drv->drv.name = drv->name;
574	drv->drv.bus = &bcma_bus_type;
575	drv->drv.owner = owner;
576
577	return driver_register(&drv->drv);
578}
579EXPORT_SYMBOL_GPL(__bcma_driver_register);
580
581void bcma_driver_unregister(struct bcma_driver *drv)
582{
583	driver_unregister(&drv->drv);
584}
585EXPORT_SYMBOL_GPL(bcma_driver_unregister);
586
587static int bcma_bus_match(struct device *dev, struct device_driver *drv)
588{
589	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
590	struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
591	const struct bcma_device_id *cid = &core->id;
592	const struct bcma_device_id *did;
593
594	for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
595	    if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
596		(did->id == cid->id || did->id == BCMA_ANY_ID) &&
597		(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
598		(did->class == cid->class || did->class == BCMA_ANY_CLASS))
599			return 1;
600	}
601	return 0;
602}
603
604static int bcma_device_probe(struct device *dev)
605{
606	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
607	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
608					       drv);
609	int err = 0;
610
611	get_device(dev);
612	if (adrv->probe)
613		err = adrv->probe(core);
614	if (err)
615		put_device(dev);
616
617	return err;
618}
619
620static void bcma_device_remove(struct device *dev)
621{
622	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
623	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
624					       drv);
625
626	if (adrv->remove)
627		adrv->remove(core);
628	put_device(dev);
 
629}
630
631static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
632{
633	const struct bcma_device *core = container_of_const(dev, struct bcma_device, dev);
634
635	return add_uevent_var(env,
636			      "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
637			      core->id.manuf, core->id.id,
638			      core->id.rev, core->id.class);
639}
640
641static unsigned int bcma_bus_registered;
642
643/*
644 * If built-in, bus has to be registered early, before any driver calls
645 * bcma_driver_register.
646 * Otherwise registering driver would trigger BUG in driver_register.
647 */
648static int __init bcma_init_bus_register(void)
649{
650	int err;
651
652	if (bcma_bus_registered)
653		return 0;
654
655	err = bus_register(&bcma_bus_type);
656	if (!err)
657		bcma_bus_registered = 1;
658
659	return err;
660}
661#ifndef MODULE
662fs_initcall(bcma_init_bus_register);
663#endif
664
665/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
666static int __init bcma_modinit(void)
667{
668	int err;
669
670	err = bcma_init_bus_register();
671	if (err)
672		return err;
673
674	err = bcma_host_soc_register_driver();
675	if (err) {
676		pr_err("SoC host initialization failed\n");
677		err = 0;
678	}
679#ifdef CONFIG_BCMA_HOST_PCI
680	err = bcma_host_pci_init();
681	if (err) {
682		pr_err("PCI host initialization failed\n");
683		err = 0;
684	}
685#endif
686
687	return err;
688}
689module_init(bcma_modinit);
690
691static void __exit bcma_modexit(void)
692{
693#ifdef CONFIG_BCMA_HOST_PCI
694	bcma_host_pci_exit();
695#endif
696	bcma_host_soc_unregister_driver();
697	bus_unregister(&bcma_bus_type);
698}
699module_exit(bcma_modexit)
v4.6
  1/*
  2 * Broadcom specific AMBA
  3 * Bus subsystem
  4 *
  5 * Licensed under the GNU/GPL. See COPYING for details.
  6 */
  7
  8#include "bcma_private.h"
  9#include <linux/module.h>
 10#include <linux/mmc/sdio_func.h>
 11#include <linux/platform_device.h>
 12#include <linux/pci.h>
 13#include <linux/bcma/bcma.h>
 14#include <linux/slab.h>
 15#include <linux/of_address.h>
 16#include <linux/of_irq.h>
 
 17#include <linux/of_platform.h>
 18
 19MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
 20MODULE_LICENSE("GPL");
 21
 22/* contains the number the next bus should get. */
 23static unsigned int bcma_bus_next_num = 0;
 24
 25/* bcma_buses_mutex locks the bcma_bus_next_num */
 26static DEFINE_MUTEX(bcma_buses_mutex);
 27
 28static int bcma_bus_match(struct device *dev, struct device_driver *drv);
 29static int bcma_device_probe(struct device *dev);
 30static int bcma_device_remove(struct device *dev);
 31static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
 32
 33static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
 34{
 35	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 36	return sprintf(buf, "0x%03X\n", core->id.manuf);
 37}
 38static DEVICE_ATTR_RO(manuf);
 39
 40static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
 41{
 42	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 43	return sprintf(buf, "0x%03X\n", core->id.id);
 44}
 45static DEVICE_ATTR_RO(id);
 46
 47static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
 48{
 49	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 50	return sprintf(buf, "0x%02X\n", core->id.rev);
 51}
 52static DEVICE_ATTR_RO(rev);
 53
 54static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
 55{
 56	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 57	return sprintf(buf, "0x%X\n", core->id.class);
 58}
 59static DEVICE_ATTR_RO(class);
 60
 61static struct attribute *bcma_device_attrs[] = {
 62	&dev_attr_manuf.attr,
 63	&dev_attr_id.attr,
 64	&dev_attr_rev.attr,
 65	&dev_attr_class.attr,
 66	NULL,
 67};
 68ATTRIBUTE_GROUPS(bcma_device);
 69
 70static struct bus_type bcma_bus_type = {
 71	.name		= "bcma",
 72	.match		= bcma_bus_match,
 73	.probe		= bcma_device_probe,
 74	.remove		= bcma_device_remove,
 75	.uevent		= bcma_device_uevent,
 76	.dev_groups	= bcma_device_groups,
 77};
 78
 79static u16 bcma_cc_core_id(struct bcma_bus *bus)
 80{
 81	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
 82		return BCMA_CORE_4706_CHIPCOMMON;
 83	return BCMA_CORE_CHIPCOMMON;
 84}
 85
 86struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
 87					u8 unit)
 88{
 89	struct bcma_device *core;
 90
 91	list_for_each_entry(core, &bus->cores, list) {
 92		if (core->id.id == coreid && core->core_unit == unit)
 93			return core;
 94	}
 95	return NULL;
 96}
 97EXPORT_SYMBOL_GPL(bcma_find_core_unit);
 98
 99bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
100		     int timeout)
101{
102	unsigned long deadline = jiffies + timeout;
103	u32 val;
104
105	do {
106		val = bcma_read32(core, reg);
107		if ((val & mask) == value)
108			return true;
109		cpu_relax();
110		udelay(10);
111	} while (!time_after_eq(jiffies, deadline));
112
113	bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
114
115	return false;
116}
117
118static void bcma_release_core_dev(struct device *dev)
119{
120	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
121	if (core->io_addr)
122		iounmap(core->io_addr);
123	if (core->io_wrap)
124		iounmap(core->io_wrap);
125	kfree(core);
126}
127
128static bool bcma_is_core_needed_early(u16 core_id)
129{
130	switch (core_id) {
131	case BCMA_CORE_NS_NAND:
132	case BCMA_CORE_NS_QSPI:
133		return true;
134	}
135
136	return false;
137}
138
139static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
140						     struct bcma_device *core)
141{
142	struct device_node *node;
143	u64 size;
144	const __be32 *reg;
145
146	if (!parent || !parent->dev.of_node)
147		return NULL;
148
149	for_each_child_of_node(parent->dev.of_node, node) {
150		reg = of_get_address(node, 0, &size, NULL);
151		if (!reg)
 
152			continue;
153		if (of_translate_address(node, reg) == core->addr)
154			return node;
155	}
156	return NULL;
157}
158
159static int bcma_of_irq_parse(struct platform_device *parent,
160			     struct bcma_device *core,
161			     struct of_phandle_args *out_irq, int num)
162{
163	__be32 laddr[1];
164	int rc;
165
166	if (core->dev.of_node) {
167		rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
168		if (!rc)
169			return rc;
170	}
171
172	out_irq->np = parent->dev.of_node;
173	out_irq->args_count = 1;
174	out_irq->args[0] = num;
175
176	laddr[0] = cpu_to_be32(core->addr);
177	return of_irq_parse_raw(laddr, out_irq);
178}
179
180static unsigned int bcma_of_get_irq(struct platform_device *parent,
181				    struct bcma_device *core, int num)
182{
183	struct of_phandle_args out_irq;
184	int ret;
185
186	if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
187		return 0;
188
189	ret = bcma_of_irq_parse(parent, core, &out_irq, num);
190	if (ret) {
191		bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
192			   ret);
193		return 0;
194	}
195
196	return irq_create_of_mapping(&out_irq);
197}
198
199static void bcma_of_fill_device(struct platform_device *parent,
200				struct bcma_device *core)
201{
202	struct device_node *node;
203
204	if (!IS_ENABLED(CONFIG_OF_IRQ))
205		return;
206
207	node = bcma_of_find_child_device(parent, core);
208	if (node)
209		core->dev.of_node = node;
210
211	core->irq = bcma_of_get_irq(parent, core, 0);
 
 
212}
213
214unsigned int bcma_core_irq(struct bcma_device *core, int num)
215{
216	struct bcma_bus *bus = core->bus;
217	unsigned int mips_irq;
218
219	switch (bus->hosttype) {
220	case BCMA_HOSTTYPE_PCI:
221		return bus->host_pci->irq;
222	case BCMA_HOSTTYPE_SOC:
223		if (bus->drv_mips.core && num == 0) {
224			mips_irq = bcma_core_mips_irq(core);
225			return mips_irq <= 4 ? mips_irq + 2 : 0;
226		}
227		if (bus->host_pdev)
228			return bcma_of_get_irq(bus->host_pdev, core, num);
229		return 0;
230	case BCMA_HOSTTYPE_SDIO:
231		return 0;
232	}
233
234	return 0;
235}
236EXPORT_SYMBOL(bcma_core_irq);
237
238void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
239{
 
240	core->dev.release = bcma_release_core_dev;
241	core->dev.bus = &bcma_bus_type;
242	dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
 
 
 
243
244	switch (bus->hosttype) {
245	case BCMA_HOSTTYPE_PCI:
246		core->dev.parent = &bus->host_pci->dev;
247		core->dma_dev = &bus->host_pci->dev;
248		core->irq = bus->host_pci->irq;
249		break;
250	case BCMA_HOSTTYPE_SOC:
251		core->dev.dma_mask = &core->dev.coherent_dma_mask;
252		if (bus->host_pdev) {
253			core->dma_dev = &bus->host_pdev->dev;
254			core->dev.parent = &bus->host_pdev->dev;
255			bcma_of_fill_device(bus->host_pdev, core);
256		} else {
 
257			core->dma_dev = &core->dev;
258		}
259		break;
260	case BCMA_HOSTTYPE_SDIO:
261		break;
262	}
263}
264
265struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
266{
267	switch (bus->hosttype) {
268	case BCMA_HOSTTYPE_PCI:
269		if (bus->host_pci)
270			return &bus->host_pci->dev;
271		else
272			return NULL;
273	case BCMA_HOSTTYPE_SOC:
274		if (bus->host_pdev)
275			return &bus->host_pdev->dev;
276		else
277			return NULL;
278	case BCMA_HOSTTYPE_SDIO:
279		if (bus->host_sdio)
280			return &bus->host_sdio->dev;
281		else
282			return NULL;
283	}
284	return NULL;
285}
286
287void bcma_init_bus(struct bcma_bus *bus)
288{
289	mutex_lock(&bcma_buses_mutex);
290	bus->num = bcma_bus_next_num++;
291	mutex_unlock(&bcma_buses_mutex);
292
293	INIT_LIST_HEAD(&bus->cores);
294	bus->nr_cores = 0;
295
296	bcma_detect_chip(bus);
297}
298
299static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
300{
301	int err;
302
303	err = device_register(&core->dev);
304	if (err) {
305		bcma_err(bus, "Could not register dev for core 0x%03X\n",
306			 core->id.id);
307		put_device(&core->dev);
308		return;
309	}
310	core->dev_registered = true;
311}
312
313static int bcma_register_devices(struct bcma_bus *bus)
314{
315	struct bcma_device *core;
316	int err;
317
318	list_for_each_entry(core, &bus->cores, list) {
319		/* We support that cores ourself */
320		switch (core->id.id) {
321		case BCMA_CORE_4706_CHIPCOMMON:
322		case BCMA_CORE_CHIPCOMMON:
323		case BCMA_CORE_NS_CHIPCOMMON_B:
324		case BCMA_CORE_PCI:
325		case BCMA_CORE_PCIE:
326		case BCMA_CORE_PCIE2:
327		case BCMA_CORE_MIPS_74K:
328		case BCMA_CORE_4706_MAC_GBIT_COMMON:
329			continue;
330		}
331
332		/* Early cores were already registered */
333		if (bcma_is_core_needed_early(core->id.id))
334			continue;
335
336		/* Only first GMAC core on BCM4706 is connected and working */
337		if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
338		    core->core_unit > 0)
339			continue;
340
341		bcma_register_core(bus, core);
342	}
343
344#ifdef CONFIG_BCMA_PFLASH
345	if (bus->drv_cc.pflash.present) {
346		err = platform_device_register(&bcma_pflash_dev);
347		if (err)
348			bcma_err(bus, "Error registering parallel flash\n");
349	}
350#endif
351
352#ifdef CONFIG_BCMA_SFLASH
353	if (bus->drv_cc.sflash.present) {
354		err = platform_device_register(&bcma_sflash_dev);
355		if (err)
356			bcma_err(bus, "Error registering serial flash\n");
357	}
358#endif
359
360#ifdef CONFIG_BCMA_NFLASH
361	if (bus->drv_cc.nflash.present) {
362		err = platform_device_register(&bcma_nflash_dev);
363		if (err)
364			bcma_err(bus, "Error registering NAND flash\n");
365	}
366#endif
367	err = bcma_gpio_init(&bus->drv_cc);
368	if (err == -ENOTSUPP)
369		bcma_debug(bus, "GPIO driver not activated\n");
370	else if (err)
371		bcma_err(bus, "Error registering GPIO driver: %i\n", err);
 
 
372
373	if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
374		err = bcma_chipco_watchdog_register(&bus->drv_cc);
375		if (err)
376			bcma_err(bus, "Error registering watchdog driver\n");
377	}
378
379	return 0;
380}
381
382void bcma_unregister_cores(struct bcma_bus *bus)
383{
384	struct bcma_device *core, *tmp;
385
386	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
387		if (!core->dev_registered)
388			continue;
389		list_del(&core->list);
390		device_unregister(&core->dev);
391	}
392	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
393		platform_device_unregister(bus->drv_cc.watchdog);
394
395	/* Now noone uses internally-handled cores, we can free them */
396	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
397		list_del(&core->list);
398		kfree(core);
399	}
400}
401
402int bcma_bus_register(struct bcma_bus *bus)
403{
404	int err;
405	struct bcma_device *core;
406	struct device *dev;
407
408	/* Scan for devices (cores) */
409	err = bcma_bus_scan(bus);
410	if (err) {
411		bcma_err(bus, "Failed to scan: %d\n", err);
412		return err;
413	}
414
415	/* Early init CC core */
416	core = bcma_find_core(bus, bcma_cc_core_id(bus));
417	if (core) {
418		bus->drv_cc.core = core;
419		bcma_core_chipcommon_early_init(&bus->drv_cc);
420	}
421
422	/* Early init PCIE core */
423	core = bcma_find_core(bus, BCMA_CORE_PCIE);
424	if (core) {
425		bus->drv_pci[0].core = core;
426		bcma_core_pci_early_init(&bus->drv_pci[0]);
427	}
428
429	dev = bcma_bus_get_host_dev(bus);
430	if (dev) {
431		of_platform_default_populate(dev->of_node, NULL, dev);
432	}
433
434	/* Cores providing flash access go before SPROM init */
435	list_for_each_entry(core, &bus->cores, list) {
436		if (bcma_is_core_needed_early(core->id.id))
437			bcma_register_core(bus, core);
438	}
439
440	/* Try to get SPROM */
441	err = bcma_sprom_get(bus);
442	if (err == -ENOENT) {
443		bcma_err(bus, "No SPROM available\n");
444	} else if (err)
445		bcma_err(bus, "Failed to get SPROM: %d\n", err);
446
447	/* Init CC core */
448	core = bcma_find_core(bus, bcma_cc_core_id(bus));
449	if (core) {
450		bus->drv_cc.core = core;
451		bcma_core_chipcommon_init(&bus->drv_cc);
452	}
453
454	/* Init CC core */
455	core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
456	if (core) {
457		bus->drv_cc_b.core = core;
458		bcma_core_chipcommon_b_init(&bus->drv_cc_b);
459	}
460
461	/* Init MIPS core */
462	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
463	if (core) {
464		bus->drv_mips.core = core;
465		bcma_core_mips_init(&bus->drv_mips);
466	}
467
468	/* Init PCIE core */
469	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
470	if (core) {
471		bus->drv_pci[0].core = core;
472		bcma_core_pci_init(&bus->drv_pci[0]);
473	}
474
475	/* Init PCIE core */
476	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
477	if (core) {
478		bus->drv_pci[1].core = core;
479		bcma_core_pci_init(&bus->drv_pci[1]);
480	}
481
482	/* Init PCIe Gen 2 core */
483	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
484	if (core) {
485		bus->drv_pcie2.core = core;
486		bcma_core_pcie2_init(&bus->drv_pcie2);
487	}
488
489	/* Init GBIT MAC COMMON core */
490	core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
491	if (core) {
492		bus->drv_gmac_cmn.core = core;
493		bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
494	}
495
496	/* Register found cores */
497	bcma_register_devices(bus);
498
499	bcma_info(bus, "Bus registered\n");
500
501	return 0;
502}
503
504void bcma_bus_unregister(struct bcma_bus *bus)
505{
506	int err;
507
508	err = bcma_gpio_unregister(&bus->drv_cc);
509	if (err == -EBUSY)
510		bcma_err(bus, "Some GPIOs are still in use.\n");
511	else if (err)
512		bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
513
514	bcma_core_chipcommon_b_free(&bus->drv_cc_b);
515
516	bcma_unregister_cores(bus);
517}
518
519/*
520 * This is a special version of bus registration function designed for SoCs.
521 * It scans bus and performs basic initialization of main cores only.
522 * Please note it requires memory allocation, however it won't try to sleep.
523 */
524int __init bcma_bus_early_register(struct bcma_bus *bus)
525{
526	int err;
527	struct bcma_device *core;
528
529	/* Scan for devices (cores) */
530	err = bcma_bus_scan(bus);
531	if (err) {
532		bcma_err(bus, "Failed to scan bus: %d\n", err);
533		return -1;
534	}
535
536	/* Early init CC core */
537	core = bcma_find_core(bus, bcma_cc_core_id(bus));
538	if (core) {
539		bus->drv_cc.core = core;
540		bcma_core_chipcommon_early_init(&bus->drv_cc);
541	}
542
543	/* Early init MIPS core */
544	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
545	if (core) {
546		bus->drv_mips.core = core;
547		bcma_core_mips_early_init(&bus->drv_mips);
548	}
549
550	bcma_info(bus, "Early bus registered\n");
551
552	return 0;
553}
554
555#ifdef CONFIG_PM
556int bcma_bus_suspend(struct bcma_bus *bus)
557{
558	struct bcma_device *core;
559
560	list_for_each_entry(core, &bus->cores, list) {
561		struct device_driver *drv = core->dev.driver;
562		if (drv) {
563			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
564			if (adrv->suspend)
565				adrv->suspend(core);
566		}
567	}
568	return 0;
569}
570
571int bcma_bus_resume(struct bcma_bus *bus)
572{
573	struct bcma_device *core;
574
575	/* Init CC core */
576	if (bus->drv_cc.core) {
577		bus->drv_cc.setup_done = false;
578		bcma_core_chipcommon_init(&bus->drv_cc);
579	}
580
581	list_for_each_entry(core, &bus->cores, list) {
582		struct device_driver *drv = core->dev.driver;
583		if (drv) {
584			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
585			if (adrv->resume)
586				adrv->resume(core);
587		}
588	}
589
590	return 0;
591}
592#endif
593
594int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
595{
596	drv->drv.name = drv->name;
597	drv->drv.bus = &bcma_bus_type;
598	drv->drv.owner = owner;
599
600	return driver_register(&drv->drv);
601}
602EXPORT_SYMBOL_GPL(__bcma_driver_register);
603
604void bcma_driver_unregister(struct bcma_driver *drv)
605{
606	driver_unregister(&drv->drv);
607}
608EXPORT_SYMBOL_GPL(bcma_driver_unregister);
609
610static int bcma_bus_match(struct device *dev, struct device_driver *drv)
611{
612	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
613	struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
614	const struct bcma_device_id *cid = &core->id;
615	const struct bcma_device_id *did;
616
617	for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
618	    if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
619		(did->id == cid->id || did->id == BCMA_ANY_ID) &&
620		(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
621		(did->class == cid->class || did->class == BCMA_ANY_CLASS))
622			return 1;
623	}
624	return 0;
625}
626
627static int bcma_device_probe(struct device *dev)
628{
629	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
630	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
631					       drv);
632	int err = 0;
633
 
634	if (adrv->probe)
635		err = adrv->probe(core);
 
 
636
637	return err;
638}
639
640static int bcma_device_remove(struct device *dev)
641{
642	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
643	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
644					       drv);
645
646	if (adrv->remove)
647		adrv->remove(core);
648
649	return 0;
650}
651
652static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
653{
654	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
655
656	return add_uevent_var(env,
657			      "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
658			      core->id.manuf, core->id.id,
659			      core->id.rev, core->id.class);
660}
661
662static unsigned int bcma_bus_registered;
663
664/*
665 * If built-in, bus has to be registered early, before any driver calls
666 * bcma_driver_register.
667 * Otherwise registering driver would trigger BUG in driver_register.
668 */
669static int __init bcma_init_bus_register(void)
670{
671	int err;
672
673	if (bcma_bus_registered)
674		return 0;
675
676	err = bus_register(&bcma_bus_type);
677	if (!err)
678		bcma_bus_registered = 1;
679
680	return err;
681}
682#ifndef MODULE
683fs_initcall(bcma_init_bus_register);
684#endif
685
686/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
687static int __init bcma_modinit(void)
688{
689	int err;
690
691	err = bcma_init_bus_register();
692	if (err)
693		return err;
694
695	err = bcma_host_soc_register_driver();
696	if (err) {
697		pr_err("SoC host initialization failed\n");
698		err = 0;
699	}
700#ifdef CONFIG_BCMA_HOST_PCI
701	err = bcma_host_pci_init();
702	if (err) {
703		pr_err("PCI host initialization failed\n");
704		err = 0;
705	}
706#endif
707
708	return err;
709}
710module_init(bcma_modinit);
711
712static void __exit bcma_modexit(void)
713{
714#ifdef CONFIG_BCMA_HOST_PCI
715	bcma_host_pci_exit();
716#endif
717	bcma_host_soc_unregister_driver();
718	bus_unregister(&bcma_bus_type);
719}
720module_exit(bcma_modexit)