Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*
   2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
   3 *		http://www.samsung.com
   4 *
   5 * Common Codes for EXYNOS
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/interrupt.h>
  14#include <linux/irq.h>
  15#include <linux/io.h>
  16#include <linux/device.h>
  17#include <linux/gpio.h>
  18#include <linux/sched.h>
  19#include <linux/serial_core.h>
  20#include <linux/of.h>
  21#include <linux/of_irq.h>
  22#include <linux/export.h>
  23#include <linux/irqdomain.h>
  24#include <linux/of_address.h>
  25
  26#include <asm/proc-fns.h>
  27#include <asm/exception.h>
  28#include <asm/hardware/cache-l2x0.h>
  29#include <asm/hardware/gic.h>
  30#include <asm/mach/map.h>
  31#include <asm/mach/irq.h>
  32#include <asm/cacheflush.h>
  33
  34#include <mach/regs-irq.h>
  35#include <mach/regs-pmu.h>
  36#include <mach/regs-gpio.h>
  37#include <mach/pmu.h>
  38
  39#include <plat/cpu.h>
  40#include <plat/clock.h>
  41#include <plat/devs.h>
  42#include <plat/pm.h>
  43#include <plat/sdhci.h>
  44#include <plat/gpio-cfg.h>
  45#include <plat/adc-core.h>
  46#include <plat/fb-core.h>
  47#include <plat/fimc-core.h>
  48#include <plat/iic-core.h>
  49#include <plat/tv-core.h>
  50#include <plat/regs-serial.h>
  51
  52#include "common.h"
  53#define L2_AUX_VAL 0x7C470001
  54#define L2_AUX_MASK 0xC200ffff
  55
  56static const char name_exynos4210[] = "EXYNOS4210";
  57static const char name_exynos4212[] = "EXYNOS4212";
  58static const char name_exynos4412[] = "EXYNOS4412";
  59static const char name_exynos5250[] = "EXYNOS5250";
  60
  61static void exynos4_map_io(void);
  62static void exynos5_map_io(void);
  63static void exynos4_init_clocks(int xtal);
  64static void exynos5_init_clocks(int xtal);
  65static void exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no);
  66static int exynos_init(void);
  67
  68static struct cpu_table cpu_ids[] __initdata = {
  69	{
  70		.idcode		= EXYNOS4210_CPU_ID,
  71		.idmask		= EXYNOS4_CPU_MASK,
  72		.map_io		= exynos4_map_io,
  73		.init_clocks	= exynos4_init_clocks,
  74		.init_uarts	= exynos_init_uarts,
  75		.init		= exynos_init,
  76		.name		= name_exynos4210,
  77	}, {
  78		.idcode		= EXYNOS4212_CPU_ID,
  79		.idmask		= EXYNOS4_CPU_MASK,
  80		.map_io		= exynos4_map_io,
  81		.init_clocks	= exynos4_init_clocks,
  82		.init_uarts	= exynos_init_uarts,
  83		.init		= exynos_init,
  84		.name		= name_exynos4212,
  85	}, {
  86		.idcode		= EXYNOS4412_CPU_ID,
  87		.idmask		= EXYNOS4_CPU_MASK,
  88		.map_io		= exynos4_map_io,
  89		.init_clocks	= exynos4_init_clocks,
  90		.init_uarts	= exynos_init_uarts,
  91		.init		= exynos_init,
  92		.name		= name_exynos4412,
  93	}, {
  94		.idcode		= EXYNOS5250_SOC_ID,
  95		.idmask		= EXYNOS5_SOC_MASK,
  96		.map_io		= exynos5_map_io,
  97		.init_clocks	= exynos5_init_clocks,
  98		.init_uarts	= exynos_init_uarts,
  99		.init		= exynos_init,
 100		.name		= name_exynos5250,
 101	},
 102};
 103
 104/* Initial IO mappings */
 105
 106static struct map_desc exynos_iodesc[] __initdata = {
 107	{
 108		.virtual	= (unsigned long)S5P_VA_CHIPID,
 109		.pfn		= __phys_to_pfn(EXYNOS_PA_CHIPID),
 110		.length		= SZ_4K,
 111		.type		= MT_DEVICE,
 112	},
 113};
 114
 115static struct map_desc exynos4_iodesc[] __initdata = {
 116	{
 117		.virtual	= (unsigned long)S3C_VA_SYS,
 118		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSCON),
 119		.length		= SZ_64K,
 120		.type		= MT_DEVICE,
 121	}, {
 122		.virtual	= (unsigned long)S3C_VA_TIMER,
 123		.pfn		= __phys_to_pfn(EXYNOS4_PA_TIMER),
 124		.length		= SZ_16K,
 125		.type		= MT_DEVICE,
 126	}, {
 127		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
 128		.pfn		= __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
 129		.length		= SZ_4K,
 130		.type		= MT_DEVICE,
 131	}, {
 132		.virtual	= (unsigned long)S5P_VA_SROMC,
 133		.pfn		= __phys_to_pfn(EXYNOS4_PA_SROMC),
 134		.length		= SZ_4K,
 135		.type		= MT_DEVICE,
 136	}, {
 137		.virtual	= (unsigned long)S5P_VA_SYSTIMER,
 138		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
 139		.length		= SZ_4K,
 140		.type		= MT_DEVICE,
 141	}, {
 142		.virtual	= (unsigned long)S5P_VA_PMU,
 143		.pfn		= __phys_to_pfn(EXYNOS4_PA_PMU),
 144		.length		= SZ_64K,
 145		.type		= MT_DEVICE,
 146	}, {
 147		.virtual	= (unsigned long)S5P_VA_COMBINER_BASE,
 148		.pfn		= __phys_to_pfn(EXYNOS4_PA_COMBINER),
 149		.length		= SZ_4K,
 150		.type		= MT_DEVICE,
 151	}, {
 152		.virtual	= (unsigned long)S5P_VA_GIC_CPU,
 153		.pfn		= __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
 154		.length		= SZ_64K,
 155		.type		= MT_DEVICE,
 156	}, {
 157		.virtual	= (unsigned long)S5P_VA_GIC_DIST,
 158		.pfn		= __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
 159		.length		= SZ_64K,
 160		.type		= MT_DEVICE,
 161	}, {
 162		.virtual	= (unsigned long)S3C_VA_UART,
 163		.pfn		= __phys_to_pfn(EXYNOS4_PA_UART),
 164		.length		= SZ_512K,
 165		.type		= MT_DEVICE,
 166	}, {
 167		.virtual	= (unsigned long)S5P_VA_CMU,
 168		.pfn		= __phys_to_pfn(EXYNOS4_PA_CMU),
 169		.length		= SZ_128K,
 170		.type		= MT_DEVICE,
 171	}, {
 172		.virtual	= (unsigned long)S5P_VA_COREPERI_BASE,
 173		.pfn		= __phys_to_pfn(EXYNOS4_PA_COREPERI),
 174		.length		= SZ_8K,
 175		.type		= MT_DEVICE,
 176	}, {
 177		.virtual	= (unsigned long)S5P_VA_L2CC,
 178		.pfn		= __phys_to_pfn(EXYNOS4_PA_L2CC),
 179		.length		= SZ_4K,
 180		.type		= MT_DEVICE,
 181	}, {
 182		.virtual	= (unsigned long)S5P_VA_DMC0,
 183		.pfn		= __phys_to_pfn(EXYNOS4_PA_DMC0),
 184		.length		= SZ_64K,
 185		.type		= MT_DEVICE,
 186	}, {
 187		.virtual	= (unsigned long)S5P_VA_DMC1,
 188		.pfn		= __phys_to_pfn(EXYNOS4_PA_DMC1),
 189		.length		= SZ_64K,
 190		.type		= MT_DEVICE,
 191	}, {
 192		.virtual	= (unsigned long)S3C_VA_USB_HSPHY,
 193		.pfn		= __phys_to_pfn(EXYNOS4_PA_HSPHY),
 194		.length		= SZ_4K,
 195		.type		= MT_DEVICE,
 196	},
 197};
 198
 199static struct map_desc exynos4_iodesc0[] __initdata = {
 200	{
 201		.virtual	= (unsigned long)S5P_VA_SYSRAM,
 202		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
 203		.length		= SZ_4K,
 204		.type		= MT_DEVICE,
 205	},
 206};
 207
 208static struct map_desc exynos4_iodesc1[] __initdata = {
 209	{
 210		.virtual	= (unsigned long)S5P_VA_SYSRAM,
 211		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
 212		.length		= SZ_4K,
 213		.type		= MT_DEVICE,
 214	},
 215};
 216
 217static struct map_desc exynos5_iodesc[] __initdata = {
 218	{
 219		.virtual	= (unsigned long)S3C_VA_SYS,
 220		.pfn		= __phys_to_pfn(EXYNOS5_PA_SYSCON),
 221		.length		= SZ_64K,
 222		.type		= MT_DEVICE,
 223	}, {
 224		.virtual	= (unsigned long)S3C_VA_TIMER,
 225		.pfn		= __phys_to_pfn(EXYNOS5_PA_TIMER),
 226		.length		= SZ_16K,
 227		.type		= MT_DEVICE,
 228	}, {
 229		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
 230		.pfn		= __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
 231		.length		= SZ_4K,
 232		.type		= MT_DEVICE,
 233	}, {
 234		.virtual	= (unsigned long)S5P_VA_SROMC,
 235		.pfn		= __phys_to_pfn(EXYNOS5_PA_SROMC),
 236		.length		= SZ_4K,
 237		.type		= MT_DEVICE,
 238	}, {
 239		.virtual	= (unsigned long)S5P_VA_SYSTIMER,
 240		.pfn		= __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
 241		.length		= SZ_4K,
 242		.type		= MT_DEVICE,
 243	}, {
 244		.virtual	= (unsigned long)S5P_VA_SYSRAM,
 245		.pfn		= __phys_to_pfn(EXYNOS5_PA_SYSRAM),
 246		.length		= SZ_4K,
 247		.type		= MT_DEVICE,
 248	}, {
 249		.virtual	= (unsigned long)S5P_VA_CMU,
 250		.pfn		= __phys_to_pfn(EXYNOS5_PA_CMU),
 251		.length		= 144 * SZ_1K,
 252		.type		= MT_DEVICE,
 253	}, {
 254		.virtual	= (unsigned long)S5P_VA_PMU,
 255		.pfn		= __phys_to_pfn(EXYNOS5_PA_PMU),
 256		.length		= SZ_64K,
 257		.type		= MT_DEVICE,
 258	}, {
 259		.virtual	= (unsigned long)S5P_VA_COMBINER_BASE,
 260		.pfn		= __phys_to_pfn(EXYNOS5_PA_COMBINER),
 261		.length		= SZ_4K,
 262		.type		= MT_DEVICE,
 263	}, {
 264		.virtual	= (unsigned long)S3C_VA_UART,
 265		.pfn		= __phys_to_pfn(EXYNOS5_PA_UART),
 266		.length		= SZ_512K,
 267		.type		= MT_DEVICE,
 268	}, {
 269		.virtual	= (unsigned long)S5P_VA_GIC_CPU,
 270		.pfn		= __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
 271		.length		= SZ_8K,
 272		.type		= MT_DEVICE,
 273	}, {
 274		.virtual	= (unsigned long)S5P_VA_GIC_DIST,
 275		.pfn		= __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
 276		.length		= SZ_4K,
 277		.type		= MT_DEVICE,
 278	},
 279};
 280
 281void exynos4_restart(char mode, const char *cmd)
 282{
 283	__raw_writel(0x1, S5P_SWRESET);
 284}
 285
 286void exynos5_restart(char mode, const char *cmd)
 287{
 288	__raw_writel(0x1, EXYNOS_SWRESET);
 289}
 290
 291void __init exynos_init_late(void)
 292{
 293	exynos_pm_late_initcall();
 294}
 295
 296/*
 297 * exynos_map_io
 298 *
 299 * register the standard cpu IO areas
 300 */
 301
 302void __init exynos_init_io(struct map_desc *mach_desc, int size)
 303{
 304	/* initialize the io descriptors we need for initialization */
 305	iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
 306	if (mach_desc)
 307		iotable_init(mach_desc, size);
 308
 309	/* detect cpu id and rev. */
 310	s5p_init_cpu(S5P_VA_CHIPID);
 311
 312	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
 313}
 314
 315static void __init exynos4_map_io(void)
 316{
 317	iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
 318
 319	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
 320		iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
 321	else
 322		iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
 323
 324	/* initialize device information early */
 325	exynos4_default_sdhci0();
 326	exynos4_default_sdhci1();
 327	exynos4_default_sdhci2();
 328	exynos4_default_sdhci3();
 329
 330	s3c_adc_setname("samsung-adc-v3");
 331
 332	s3c_fimc_setname(0, "exynos4-fimc");
 333	s3c_fimc_setname(1, "exynos4-fimc");
 334	s3c_fimc_setname(2, "exynos4-fimc");
 335	s3c_fimc_setname(3, "exynos4-fimc");
 336
 337	s3c_sdhci_setname(0, "exynos4-sdhci");
 338	s3c_sdhci_setname(1, "exynos4-sdhci");
 339	s3c_sdhci_setname(2, "exynos4-sdhci");
 340	s3c_sdhci_setname(3, "exynos4-sdhci");
 341
 342	/* The I2C bus controllers are directly compatible with s3c2440 */
 343	s3c_i2c0_setname("s3c2440-i2c");
 344	s3c_i2c1_setname("s3c2440-i2c");
 345	s3c_i2c2_setname("s3c2440-i2c");
 346
 347	s5p_fb_setname(0, "exynos4-fb");
 348	s5p_hdmi_setname("exynos4-hdmi");
 349}
 350
 351static void __init exynos5_map_io(void)
 352{
 353	iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
 354
 355	s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0);
 356	s3c_device_i2c0.resource[0].end   = EXYNOS5_PA_IIC(0) + SZ_4K - 1;
 357	s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
 358	s3c_device_i2c0.resource[1].end   = EXYNOS5_IRQ_IIC;
 359
 360	s3c_sdhci_setname(0, "exynos4-sdhci");
 361	s3c_sdhci_setname(1, "exynos4-sdhci");
 362	s3c_sdhci_setname(2, "exynos4-sdhci");
 363	s3c_sdhci_setname(3, "exynos4-sdhci");
 364
 365	/* The I2C bus controllers are directly compatible with s3c2440 */
 366	s3c_i2c0_setname("s3c2440-i2c");
 367	s3c_i2c1_setname("s3c2440-i2c");
 368	s3c_i2c2_setname("s3c2440-i2c");
 369}
 370
 371static void __init exynos4_init_clocks(int xtal)
 372{
 373	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
 374
 375	s3c24xx_register_baseclocks(xtal);
 376	s5p_register_clocks(xtal);
 377
 378	if (soc_is_exynos4210())
 379		exynos4210_register_clocks();
 380	else if (soc_is_exynos4212() || soc_is_exynos4412())
 381		exynos4212_register_clocks();
 382
 383	exynos4_register_clocks();
 384	exynos4_setup_clocks();
 385}
 386
 387static void __init exynos5_init_clocks(int xtal)
 388{
 389	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
 390
 391	s3c24xx_register_baseclocks(xtal);
 392	s5p_register_clocks(xtal);
 393
 394	exynos5_register_clocks();
 395	exynos5_setup_clocks();
 396}
 397
 398#define COMBINER_ENABLE_SET	0x0
 399#define COMBINER_ENABLE_CLEAR	0x4
 400#define COMBINER_INT_STATUS	0xC
 401
 402static DEFINE_SPINLOCK(irq_controller_lock);
 403
 404struct combiner_chip_data {
 405	unsigned int irq_offset;
 406	unsigned int irq_mask;
 407	void __iomem *base;
 408};
 409
 410static struct irq_domain *combiner_irq_domain;
 411static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
 412
 413static inline void __iomem *combiner_base(struct irq_data *data)
 414{
 415	struct combiner_chip_data *combiner_data =
 416		irq_data_get_irq_chip_data(data);
 417
 418	return combiner_data->base;
 419}
 420
 421static void combiner_mask_irq(struct irq_data *data)
 422{
 423	u32 mask = 1 << (data->hwirq % 32);
 424
 425	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
 426}
 427
 428static void combiner_unmask_irq(struct irq_data *data)
 429{
 430	u32 mask = 1 << (data->hwirq % 32);
 431
 432	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
 433}
 434
 435static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
 436{
 437	struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
 438	struct irq_chip *chip = irq_get_chip(irq);
 439	unsigned int cascade_irq, combiner_irq;
 440	unsigned long status;
 441
 442	chained_irq_enter(chip, desc);
 443
 444	spin_lock(&irq_controller_lock);
 445	status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
 446	spin_unlock(&irq_controller_lock);
 447	status &= chip_data->irq_mask;
 448
 449	if (status == 0)
 450		goto out;
 451
 452	combiner_irq = __ffs(status);
 453
 454	cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
 455	if (unlikely(cascade_irq >= NR_IRQS))
 456		do_bad_IRQ(cascade_irq, desc);
 457	else
 458		generic_handle_irq(cascade_irq);
 459
 460 out:
 461	chained_irq_exit(chip, desc);
 462}
 463
 464static struct irq_chip combiner_chip = {
 465	.name		= "COMBINER",
 466	.irq_mask	= combiner_mask_irq,
 467	.irq_unmask	= combiner_unmask_irq,
 468};
 469
 470static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
 471{
 472	unsigned int max_nr;
 473
 474	if (soc_is_exynos5250())
 475		max_nr = EXYNOS5_MAX_COMBINER_NR;
 476	else
 477		max_nr = EXYNOS4_MAX_COMBINER_NR;
 478
 479	if (combiner_nr >= max_nr)
 480		BUG();
 481	if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
 482		BUG();
 483	irq_set_chained_handler(irq, combiner_handle_cascade_irq);
 484}
 485
 486static void __init combiner_init_one(unsigned int combiner_nr,
 487				     void __iomem *base)
 488{
 489	combiner_data[combiner_nr].base = base;
 490	combiner_data[combiner_nr].irq_offset = irq_find_mapping(
 491		combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
 492	combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
 493
 494	/* Disable all interrupts */
 495	__raw_writel(combiner_data[combiner_nr].irq_mask,
 496		     base + COMBINER_ENABLE_CLEAR);
 497}
 498
 499#ifdef CONFIG_OF
 500static int combiner_irq_domain_xlate(struct irq_domain *d,
 501				     struct device_node *controller,
 502				     const u32 *intspec, unsigned int intsize,
 503				     unsigned long *out_hwirq,
 504				     unsigned int *out_type)
 505{
 506	if (d->of_node != controller)
 507		return -EINVAL;
 508
 509	if (intsize < 2)
 510		return -EINVAL;
 511
 512	*out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
 513	*out_type = 0;
 514
 515	return 0;
 516}
 517#else
 518static int combiner_irq_domain_xlate(struct irq_domain *d,
 519				     struct device_node *controller,
 520				     const u32 *intspec, unsigned int intsize,
 521				     unsigned long *out_hwirq,
 522				     unsigned int *out_type)
 523{
 524	return -EINVAL;
 525}
 526#endif
 527
 528static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
 529				   irq_hw_number_t hw)
 530{
 531	irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
 532	irq_set_chip_data(irq, &combiner_data[hw >> 3]);
 533	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
 534
 535	return 0;
 536}
 537
 538static struct irq_domain_ops combiner_irq_domain_ops = {
 539	.xlate	= combiner_irq_domain_xlate,
 540	.map	= combiner_irq_domain_map,
 541};
 542
 543void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
 544{
 545	int i, irq, irq_base;
 546	unsigned int max_nr, nr_irq;
 547
 548	if (np) {
 549		if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
 550			pr_warning("%s: number of combiners not specified, "
 551				"setting default as %d.\n",
 552				__func__, EXYNOS4_MAX_COMBINER_NR);
 553			max_nr = EXYNOS4_MAX_COMBINER_NR;
 554		}
 555	} else {
 556		max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
 557						EXYNOS4_MAX_COMBINER_NR;
 558	}
 559	nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
 560
 561	irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
 562	if (IS_ERR_VALUE(irq_base)) {
 563		irq_base = COMBINER_IRQ(0, 0);
 564		pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
 565	}
 566
 567	combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
 568				&combiner_irq_domain_ops, &combiner_data);
 569	if (WARN_ON(!combiner_irq_domain)) {
 570		pr_warning("%s: irq domain init failed\n", __func__);
 571		return;
 572	}
 573
 574	for (i = 0; i < max_nr; i++) {
 575		combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
 576		irq = IRQ_SPI(i);
 577#ifdef CONFIG_OF
 578		if (np)
 579			irq = irq_of_parse_and_map(np, i);
 580#endif
 581		combiner_cascade_irq(i, irq);
 582	}
 583}
 584
 585#ifdef CONFIG_OF
 586int __init combiner_of_init(struct device_node *np, struct device_node *parent)
 587{
 588	void __iomem *combiner_base;
 589
 590	combiner_base = of_iomap(np, 0);
 591	if (!combiner_base) {
 592		pr_err("%s: failed to map combiner registers\n", __func__);
 593		return -ENXIO;
 594	}
 595
 596	combiner_init(combiner_base, np);
 597
 598	return 0;
 599}
 600
 601static const struct of_device_id exynos4_dt_irq_match[] = {
 602	{ .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
 603	{ .compatible = "samsung,exynos4210-combiner",
 604			.data = combiner_of_init, },
 605	{},
 606};
 607#endif
 608
 609void __init exynos4_init_irq(void)
 610{
 611	unsigned int gic_bank_offset;
 612
 613	gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
 614
 615	if (!of_have_populated_dt())
 616		gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
 617#ifdef CONFIG_OF
 618	else
 619		of_irq_init(exynos4_dt_irq_match);
 620#endif
 621
 622	if (!of_have_populated_dt())
 623		combiner_init(S5P_VA_COMBINER_BASE, NULL);
 624
 625	/*
 626	 * The parameters of s5p_init_irq() are for VIC init.
 627	 * Theses parameters should be NULL and 0 because EXYNOS4
 628	 * uses GIC instead of VIC.
 629	 */
 630	s5p_init_irq(NULL, 0);
 631}
 632
 633void __init exynos5_init_irq(void)
 634{
 635#ifdef CONFIG_OF
 636	of_irq_init(exynos4_dt_irq_match);
 637#endif
 638	/*
 639	 * The parameters of s5p_init_irq() are for VIC init.
 640	 * Theses parameters should be NULL and 0 because EXYNOS4
 641	 * uses GIC instead of VIC.
 642	 */
 643	s5p_init_irq(NULL, 0);
 644}
 645
 646struct bus_type exynos_subsys = {
 647	.name		= "exynos-core",
 648	.dev_name	= "exynos-core",
 649};
 650
 651static struct device exynos4_dev = {
 652	.bus	= &exynos_subsys,
 653};
 654
 655static int __init exynos_core_init(void)
 656{
 657	return subsys_system_register(&exynos_subsys, NULL);
 658}
 659core_initcall(exynos_core_init);
 660
 661#ifdef CONFIG_CACHE_L2X0
 662static int __init exynos4_l2x0_cache_init(void)
 663{
 664	int ret;
 665
 666	if (soc_is_exynos5250())
 667		return 0;
 668
 669	ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
 670	if (!ret) {
 671		l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
 672		clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
 673		return 0;
 674	}
 675
 676	if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
 677		l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
 678		/* TAG, Data Latency Control: 2 cycles */
 679		l2x0_saved_regs.tag_latency = 0x110;
 680
 681		if (soc_is_exynos4212() || soc_is_exynos4412())
 682			l2x0_saved_regs.data_latency = 0x120;
 683		else
 684			l2x0_saved_regs.data_latency = 0x110;
 685
 686		l2x0_saved_regs.prefetch_ctrl = 0x30000007;
 687		l2x0_saved_regs.pwr_ctrl =
 688			(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
 689
 690		l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
 691
 692		__raw_writel(l2x0_saved_regs.tag_latency,
 693				S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
 694		__raw_writel(l2x0_saved_regs.data_latency,
 695				S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
 696
 697		/* L2X0 Prefetch Control */
 698		__raw_writel(l2x0_saved_regs.prefetch_ctrl,
 699				S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
 700
 701		/* L2X0 Power Control */
 702		__raw_writel(l2x0_saved_regs.pwr_ctrl,
 703				S5P_VA_L2CC + L2X0_POWER_CTRL);
 704
 705		clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
 706		clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
 707	}
 708
 709	l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
 710	return 0;
 711}
 712early_initcall(exynos4_l2x0_cache_init);
 713#endif
 714
 715static int __init exynos5_l2_cache_init(void)
 716{
 717	unsigned int val;
 718
 719	if (!soc_is_exynos5250())
 720		return 0;
 721
 722	asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
 723		     "bic %0, %0, #(1 << 2)\n"	/* cache disable */
 724		     "mcr p15, 0, %0, c1, c0, 0\n"
 725		     "mrc p15, 1, %0, c9, c0, 2\n"
 726		     : "=r"(val));
 727
 728	val |= (1 << 9) | (1 << 5) | (2 << 6) | (2 << 0);
 729
 730	asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val));
 731	asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
 732		     "orr %0, %0, #(1 << 2)\n"	/* cache enable */
 733		     "mcr p15, 0, %0, c1, c0, 0\n"
 734		     : : "r"(val));
 735
 736	return 0;
 737}
 738early_initcall(exynos5_l2_cache_init);
 739
 740static int __init exynos_init(void)
 741{
 742	printk(KERN_INFO "EXYNOS: Initializing architecture\n");
 743
 744	return device_register(&exynos4_dev);
 745}
 746
 747/* uart registration process */
 748
 749static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no)
 750{
 751	struct s3c2410_uartcfg *tcfg = cfg;
 752	u32 ucnt;
 753
 754	for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
 755		tcfg->has_fracval = 1;
 756
 757	if (soc_is_exynos5250())
 758		s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no);
 759	else
 760		s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
 761}
 762
 763static void __iomem *exynos_eint_base;
 764
 765static DEFINE_SPINLOCK(eint_lock);
 766
 767static unsigned int eint0_15_data[16];
 768
 769static inline int exynos4_irq_to_gpio(unsigned int irq)
 770{
 771	if (irq < IRQ_EINT(0))
 772		return -EINVAL;
 773
 774	irq -= IRQ_EINT(0);
 775	if (irq < 8)
 776		return EXYNOS4_GPX0(irq);
 777
 778	irq -= 8;
 779	if (irq < 8)
 780		return EXYNOS4_GPX1(irq);
 781
 782	irq -= 8;
 783	if (irq < 8)
 784		return EXYNOS4_GPX2(irq);
 785
 786	irq -= 8;
 787	if (irq < 8)
 788		return EXYNOS4_GPX3(irq);
 789
 790	return -EINVAL;
 791}
 792
 793static inline int exynos5_irq_to_gpio(unsigned int irq)
 794{
 795	if (irq < IRQ_EINT(0))
 796		return -EINVAL;
 797
 798	irq -= IRQ_EINT(0);
 799	if (irq < 8)
 800		return EXYNOS5_GPX0(irq);
 801
 802	irq -= 8;
 803	if (irq < 8)
 804		return EXYNOS5_GPX1(irq);
 805
 806	irq -= 8;
 807	if (irq < 8)
 808		return EXYNOS5_GPX2(irq);
 809
 810	irq -= 8;
 811	if (irq < 8)
 812		return EXYNOS5_GPX3(irq);
 813
 814	return -EINVAL;
 815}
 816
 817static unsigned int exynos4_eint0_15_src_int[16] = {
 818	EXYNOS4_IRQ_EINT0,
 819	EXYNOS4_IRQ_EINT1,
 820	EXYNOS4_IRQ_EINT2,
 821	EXYNOS4_IRQ_EINT3,
 822	EXYNOS4_IRQ_EINT4,
 823	EXYNOS4_IRQ_EINT5,
 824	EXYNOS4_IRQ_EINT6,
 825	EXYNOS4_IRQ_EINT7,
 826	EXYNOS4_IRQ_EINT8,
 827	EXYNOS4_IRQ_EINT9,
 828	EXYNOS4_IRQ_EINT10,
 829	EXYNOS4_IRQ_EINT11,
 830	EXYNOS4_IRQ_EINT12,
 831	EXYNOS4_IRQ_EINT13,
 832	EXYNOS4_IRQ_EINT14,
 833	EXYNOS4_IRQ_EINT15,
 834};
 835
 836static unsigned int exynos5_eint0_15_src_int[16] = {
 837	EXYNOS5_IRQ_EINT0,
 838	EXYNOS5_IRQ_EINT1,
 839	EXYNOS5_IRQ_EINT2,
 840	EXYNOS5_IRQ_EINT3,
 841	EXYNOS5_IRQ_EINT4,
 842	EXYNOS5_IRQ_EINT5,
 843	EXYNOS5_IRQ_EINT6,
 844	EXYNOS5_IRQ_EINT7,
 845	EXYNOS5_IRQ_EINT8,
 846	EXYNOS5_IRQ_EINT9,
 847	EXYNOS5_IRQ_EINT10,
 848	EXYNOS5_IRQ_EINT11,
 849	EXYNOS5_IRQ_EINT12,
 850	EXYNOS5_IRQ_EINT13,
 851	EXYNOS5_IRQ_EINT14,
 852	EXYNOS5_IRQ_EINT15,
 853};
 854static inline void exynos_irq_eint_mask(struct irq_data *data)
 855{
 856	u32 mask;
 857
 858	spin_lock(&eint_lock);
 859	mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
 860	mask |= EINT_OFFSET_BIT(data->irq);
 861	__raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
 862	spin_unlock(&eint_lock);
 863}
 864
 865static void exynos_irq_eint_unmask(struct irq_data *data)
 866{
 867	u32 mask;
 868
 869	spin_lock(&eint_lock);
 870	mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
 871	mask &= ~(EINT_OFFSET_BIT(data->irq));
 872	__raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
 873	spin_unlock(&eint_lock);
 874}
 875
 876static inline void exynos_irq_eint_ack(struct irq_data *data)
 877{
 878	__raw_writel(EINT_OFFSET_BIT(data->irq),
 879		     EINT_PEND(exynos_eint_base, data->irq));
 880}
 881
 882static void exynos_irq_eint_maskack(struct irq_data *data)
 883{
 884	exynos_irq_eint_mask(data);
 885	exynos_irq_eint_ack(data);
 886}
 887
 888static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
 889{
 890	int offs = EINT_OFFSET(data->irq);
 891	int shift;
 892	u32 ctrl, mask;
 893	u32 newvalue = 0;
 894
 895	switch (type) {
 896	case IRQ_TYPE_EDGE_RISING:
 897		newvalue = S5P_IRQ_TYPE_EDGE_RISING;
 898		break;
 899
 900	case IRQ_TYPE_EDGE_FALLING:
 901		newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
 902		break;
 903
 904	case IRQ_TYPE_EDGE_BOTH:
 905		newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
 906		break;
 907
 908	case IRQ_TYPE_LEVEL_LOW:
 909		newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
 910		break;
 911
 912	case IRQ_TYPE_LEVEL_HIGH:
 913		newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
 914		break;
 915
 916	default:
 917		printk(KERN_ERR "No such irq type %d", type);
 918		return -EINVAL;
 919	}
 920
 921	shift = (offs & 0x7) * 4;
 922	mask = 0x7 << shift;
 923
 924	spin_lock(&eint_lock);
 925	ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq));
 926	ctrl &= ~mask;
 927	ctrl |= newvalue << shift;
 928	__raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq));
 929	spin_unlock(&eint_lock);
 930
 931	if (soc_is_exynos5250())
 932		s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
 933	else
 934		s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
 935
 936	return 0;
 937}
 938
 939static struct irq_chip exynos_irq_eint = {
 940	.name		= "exynos-eint",
 941	.irq_mask	= exynos_irq_eint_mask,
 942	.irq_unmask	= exynos_irq_eint_unmask,
 943	.irq_mask_ack	= exynos_irq_eint_maskack,
 944	.irq_ack	= exynos_irq_eint_ack,
 945	.irq_set_type	= exynos_irq_eint_set_type,
 946#ifdef CONFIG_PM
 947	.irq_set_wake	= s3c_irqext_wake,
 948#endif
 949};
 950
 951/*
 952 * exynos4_irq_demux_eint
 953 *
 954 * This function demuxes the IRQ from from EINTs 16 to 31.
 955 * It is designed to be inlined into the specific handler
 956 * s5p_irq_demux_eintX_Y.
 957 *
 958 * Each EINT pend/mask registers handle eight of them.
 959 */
 960static inline void exynos_irq_demux_eint(unsigned int start)
 961{
 962	unsigned int irq;
 963
 964	u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
 965	u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
 966
 967	status &= ~mask;
 968	status &= 0xff;
 969
 970	while (status) {
 971		irq = fls(status) - 1;
 972		generic_handle_irq(irq + start);
 973		status &= ~(1 << irq);
 974	}
 975}
 976
 977static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
 978{
 979	struct irq_chip *chip = irq_get_chip(irq);
 980	chained_irq_enter(chip, desc);
 981	exynos_irq_demux_eint(IRQ_EINT(16));
 982	exynos_irq_demux_eint(IRQ_EINT(24));
 983	chained_irq_exit(chip, desc);
 984}
 985
 986static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
 987{
 988	u32 *irq_data = irq_get_handler_data(irq);
 989	struct irq_chip *chip = irq_get_chip(irq);
 990
 991	chained_irq_enter(chip, desc);
 992	chip->irq_mask(&desc->irq_data);
 993
 994	if (chip->irq_ack)
 995		chip->irq_ack(&desc->irq_data);
 996
 997	generic_handle_irq(*irq_data);
 998
 999	chip->irq_unmask(&desc->irq_data);
1000	chained_irq_exit(chip, desc);
1001}
1002
1003static int __init exynos_init_irq_eint(void)
1004{
1005	int irq;
1006
1007	if (soc_is_exynos5250())
1008		exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
1009	else
1010		exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
1011
1012	if (exynos_eint_base == NULL) {
1013		pr_err("unable to ioremap for EINT base address\n");
1014		return -ENOMEM;
1015	}
1016
1017	for (irq = 0 ; irq <= 31 ; irq++) {
1018		irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint,
1019					 handle_level_irq);
1020		set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
1021	}
1022
1023	irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31);
1024
1025	for (irq = 0 ; irq <= 15 ; irq++) {
1026		eint0_15_data[irq] = IRQ_EINT(irq);
1027
1028		if (soc_is_exynos5250()) {
1029			irq_set_handler_data(exynos5_eint0_15_src_int[irq],
1030					     &eint0_15_data[irq]);
1031			irq_set_chained_handler(exynos5_eint0_15_src_int[irq],
1032						exynos_irq_eint0_15);
1033		} else {
1034			irq_set_handler_data(exynos4_eint0_15_src_int[irq],
1035					     &eint0_15_data[irq]);
1036			irq_set_chained_handler(exynos4_eint0_15_src_int[irq],
1037						exynos_irq_eint0_15);
1038		}
1039	}
1040
1041	return 0;
1042}
1043arch_initcall(exynos_init_irq_eint);