Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * arch/arm/mach-at91/pm.c
   4 * AT91 Power Management
   5 *
   6 * Copyright (C) 2005 David Brownell
   7 */
   8
   9#include <linux/genalloc.h>
  10#include <linux/io.h>
  11#include <linux/of_address.h>
  12#include <linux/of.h>
  13#include <linux/of_fdt.h>
  14#include <linux/of_platform.h>
  15#include <linux/platform_device.h>
  16#include <linux/parser.h>
  17#include <linux/suspend.h>
  18
  19#include <linux/clk.h>
  20#include <linux/clk/at91_pmc.h>
  21#include <linux/platform_data/atmel.h>
  22
  23#include <asm/cacheflush.h>
  24#include <asm/fncpy.h>
  25#include <asm/system_misc.h>
  26#include <asm/suspend.h>
  27
  28#include "generic.h"
  29#include "pm.h"
  30#include "sam_secure.h"
  31
  32#define BACKUP_DDR_PHY_CALIBRATION	(9)
  33
  34/**
  35 * struct at91_pm_bu - AT91 power management backup unit data structure
  36 * @suspended: true if suspended to backup mode
  37 * @reserved: reserved
  38 * @canary: canary data for memory checking after exit from backup mode
  39 * @resume: resume API
  40 * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words
  41 * of the memory
  42 */
  43struct at91_pm_bu {
  44	int suspended;
  45	unsigned long reserved;
  46	phys_addr_t canary;
  47	phys_addr_t resume;
  48	unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
  49};
  50
  51/**
  52 * struct at91_pm_sfrbu_regs - registers mapping for SFRBU
  53 * @pswbu: power switch BU control registers
  54 */
  55struct at91_pm_sfrbu_regs {
  56	struct {
  57		u32 key;
  58		u32 ctrl;
  59		u32 state;
  60		u32 softsw;
  61	} pswbu;
  62};
  63
  64/**
  65 * enum at91_pm_eth_clk - Ethernet clock indexes
  66 * @AT91_PM_ETH_PCLK: pclk index
  67 * @AT91_PM_ETH_HCLK: hclk index
  68 * @AT91_PM_ETH_MAX_CLK: max index
  69 */
  70enum at91_pm_eth_clk {
  71	AT91_PM_ETH_PCLK,
  72	AT91_PM_ETH_HCLK,
  73	AT91_PM_ETH_MAX_CLK,
  74};
  75
  76/**
  77 * enum at91_pm_eth - Ethernet controller indexes
  78 * @AT91_PM_G_ETH: gigabit Ethernet controller index
  79 * @AT91_PM_E_ETH: megabit Ethernet controller index
  80 * @AT91_PM_MAX_ETH: max index
  81 */
  82enum at91_pm_eth {
  83	AT91_PM_G_ETH,
  84	AT91_PM_E_ETH,
  85	AT91_PM_MAX_ETH,
  86};
  87
  88/**
  89 * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks
  90 * @dev: Ethernet device
  91 * @np: Ethernet device node
  92 * @clks: Ethernet clocks
  93 * @modes: power management mode that this quirk applies to
  94 * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured
  95 *	       as wakeup source but buggy and no other wakeup source is
  96 *	       available
  97 */
  98struct at91_pm_quirk_eth {
  99	struct device *dev;
 100	struct device_node *np;
 101	struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
 102	u32 modes;
 103	u32 dns_modes;
 104};
 105
 106/**
 107 * struct at91_pm_quirks - AT91 PM quirks
 108 * @eth: Ethernet quirks
 109 */
 110struct at91_pm_quirks {
 111	struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH];
 112};
 113
 114/**
 115 * struct at91_soc_pm - AT91 SoC power management data structure
 116 * @config_shdwc_ws: wakeup sources configuration function for SHDWC
 117 * @config_pmc_ws: wakeup srouces configuration function for PMC
 118 * @ws_ids: wakup sources of_device_id array
 119 * @bu: backup unit mapped data (for backup mode)
 120 * @quirks: PM quirks
 121 * @data: PM data to be used on last phase of suspend
 122 * @sfrbu_regs: SFRBU registers mapping
 123 * @memcs: memory chip select
 124 */
 125struct at91_soc_pm {
 126	int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
 127	int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
 128	const struct of_device_id *ws_ids;
 129	struct at91_pm_bu *bu;
 130	struct at91_pm_quirks quirks;
 131	struct at91_pm_data data;
 132	struct at91_pm_sfrbu_regs sfrbu_regs;
 133	void *memcs;
 134};
 135
 136/**
 137 * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes
 138 * @AT91_PM_IOMAP_SHDWC:	SHDWC controller
 139 * @AT91_PM_IOMAP_SFRBU:	SFRBU controller
 140 * @AT91_PM_IOMAP_ETHC:		Ethernet controller
 141 */
 142enum at91_pm_iomaps {
 143	AT91_PM_IOMAP_SHDWC,
 144	AT91_PM_IOMAP_SFRBU,
 145	AT91_PM_IOMAP_ETHC,
 146};
 147
 148#define AT91_PM_IOMAP(name)	BIT(AT91_PM_IOMAP_##name)
 149
 150static struct at91_soc_pm soc_pm = {
 151	.data = {
 152		.standby_mode = AT91_PM_STANDBY,
 153		.suspend_mode = AT91_PM_ULP0,
 154	},
 155};
 156
 157static const match_table_t pm_modes __initconst = {
 158	{ AT91_PM_STANDBY,	"standby" },
 159	{ AT91_PM_ULP0,		"ulp0" },
 160	{ AT91_PM_ULP0_FAST,    "ulp0-fast" },
 161	{ AT91_PM_ULP1,		"ulp1" },
 162	{ AT91_PM_BACKUP,	"backup" },
 163	{ -1, NULL },
 164};
 165
 166#define at91_ramc_read(id, field) \
 167	__raw_readl(soc_pm.data.ramc[id] + field)
 168
 169#define at91_ramc_write(id, field, value) \
 170	__raw_writel(value, soc_pm.data.ramc[id] + field)
 171
 172static int at91_pm_valid_state(suspend_state_t state)
 173{
 174	switch (state) {
 175		case PM_SUSPEND_ON:
 176		case PM_SUSPEND_STANDBY:
 177		case PM_SUSPEND_MEM:
 178			return 1;
 179
 180		default:
 181			return 0;
 182	}
 183}
 184
 185static int canary = 0xA5A5A5A5;
 186
 187struct wakeup_source_info {
 188	unsigned int pmc_fsmr_bit;
 189	unsigned int shdwc_mr_bit;
 190	bool set_polarity;
 191};
 192
 193static const struct wakeup_source_info ws_info[] = {
 194	{ .pmc_fsmr_bit = AT91_PMC_FSTT(10),	.set_polarity = true },
 195	{ .pmc_fsmr_bit = AT91_PMC_RTCAL,	.shdwc_mr_bit = BIT(17) },
 196	{ .pmc_fsmr_bit = AT91_PMC_USBAL },
 197	{ .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
 198	{ .pmc_fsmr_bit = AT91_PMC_RTTAL },
 199	{ .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
 200};
 201
 202static const struct of_device_id sama5d2_ws_ids[] = {
 203	{ .compatible = "atmel,sama5d2-gem",		.data = &ws_info[0] },
 204	{ .compatible = "atmel,sama5d2-rtc",		.data = &ws_info[1] },
 205	{ .compatible = "atmel,sama5d3-udc",		.data = &ws_info[2] },
 206	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
 207	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
 208	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
 209	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
 210	{ .compatible = "atmel,sama5d2-sdhci",		.data = &ws_info[3] },
 211	{ /* sentinel */ }
 212};
 213
 214static const struct of_device_id sam9x60_ws_ids[] = {
 215	{ .compatible = "microchip,sam9x60-rtc",	.data = &ws_info[1] },
 216	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
 217	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
 218	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
 219	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
 220	{ .compatible = "microchip,sam9x60-rtt",	.data = &ws_info[4] },
 221	{ .compatible = "cdns,sam9x60-macb",		.data = &ws_info[5] },
 222	{ /* sentinel */ }
 223};
 224
 225static const struct of_device_id sama7g5_ws_ids[] = {
 226	{ .compatible = "microchip,sama7g5-rtc",	.data = &ws_info[1] },
 227	{ .compatible = "microchip,sama7g5-ohci",	.data = &ws_info[2] },
 228	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
 229	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
 230	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
 231	{ .compatible = "microchip,sama7g5-sdhci",	.data = &ws_info[3] },
 232	{ .compatible = "microchip,sama7g5-rtt",	.data = &ws_info[4] },
 233	{ /* sentinel */ }
 234};
 235
 236static const struct of_device_id sam9x7_ws_ids[] = {
 237	{ .compatible = "microchip,sam9x7-rtc",		.data = &ws_info[1] },
 238	{ .compatible = "microchip,sam9x7-rtt",		.data = &ws_info[4] },
 239	{ .compatible = "microchip,sam9x7-gem",		.data = &ws_info[5] },
 240	{ /* sentinel */ }
 241};
 242
 243static int at91_pm_config_ws(unsigned int pm_mode, bool set)
 244{
 245	const struct wakeup_source_info *wsi;
 246	const struct of_device_id *match;
 247	struct platform_device *pdev;
 248	struct device_node *np;
 249	unsigned int mode = 0, polarity = 0, val = 0;
 250
 251	if (pm_mode != AT91_PM_ULP1)
 252		return 0;
 253
 254	if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
 255		return -EPERM;
 256
 257	if (!set) {
 258		writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
 259		return 0;
 260	}
 261
 262	if (soc_pm.config_shdwc_ws)
 263		soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
 264
 265	/* SHDWC.MR */
 266	val = readl(soc_pm.data.shdwc + 0x04);
 267
 268	/* Loop through defined wakeup sources. */
 269	for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
 270		pdev = of_find_device_by_node(np);
 271		if (!pdev)
 272			continue;
 273
 274		if (device_may_wakeup(&pdev->dev)) {
 275			wsi = match->data;
 276
 277			/* Check if enabled on SHDWC. */
 278			if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
 279				goto put_device;
 280
 281			mode |= wsi->pmc_fsmr_bit;
 282			if (wsi->set_polarity)
 283				polarity |= wsi->pmc_fsmr_bit;
 284		}
 285
 286put_device:
 287		put_device(&pdev->dev);
 288	}
 289
 290	if (mode) {
 291		if (soc_pm.config_pmc_ws)
 292			soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
 293	} else {
 294		pr_err("AT91: PM: no ULP1 wakeup sources found!");
 295	}
 296
 297	return mode ? 0 : -EPERM;
 298}
 299
 300static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
 301					u32 *polarity)
 302{
 303	u32 val;
 304
 305	/* SHDWC.WUIR */
 306	val = readl(shdwc + 0x0c);
 307	*mode |= (val & 0x3ff);
 308	*polarity |= ((val >> 16) & 0x3ff);
 309
 310	return 0;
 311}
 312
 313static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
 314{
 315	writel(mode, pmc + AT91_PMC_FSMR);
 316	writel(polarity, pmc + AT91_PMC_FSPR);
 317
 318	return 0;
 319}
 320
 321static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
 322{
 323	writel(mode, pmc + AT91_PMC_FSMR);
 324
 325	return 0;
 326}
 327
 328static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
 329{
 330	struct platform_device *pdev;
 331
 332	/* Interface NA in DT. */
 333	if (!eth->np)
 334		return false;
 335
 336	/* No quirks for this interface and current suspend mode. */
 337	if (!(eth->modes & BIT(soc_pm.data.mode)))
 338		return false;
 339
 340	if (!eth->dev) {
 341		/* Driver not probed. */
 342		pdev = of_find_device_by_node(eth->np);
 343		if (!pdev)
 344			return false;
 345		/* put_device(eth->dev) is called at the end of suspend. */
 346		eth->dev = &pdev->dev;
 347	}
 348
 349	/* No quirks if device isn't a wakeup source. */
 350	if (!device_may_wakeup(eth->dev))
 
 351		return false;
 
 352
 
 353	return true;
 354}
 355
 356static int at91_pm_config_quirks(bool suspend)
 357{
 358	struct at91_pm_quirk_eth *eth;
 359	int i, j, ret, tmp;
 360
 361	/*
 362	 * Ethernet IPs who's device_node pointers are stored into
 363	 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1
 364	 * or both due to a hardware bug. If they receive WoL packets while in
 365	 * ULP0 or ULP1 IPs could stop working or the whole system could stop
 366	 * working. We cannot handle this scenario in the ethernet driver itself
 367	 * as the driver is common to multiple vendors and also we only know
 368	 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle
 369	 * these scenarios here, as quirks.
 370	 */
 371	for (i = 0; i < AT91_PM_MAX_ETH; i++) {
 372		eth = &soc_pm.quirks.eth[i];
 373
 374		if (!at91_pm_eth_quirk_is_valid(eth))
 375			continue;
 376
 377		/*
 378		 * For modes in dns_modes mask the system blocks if quirk is not
 379		 * applied but if applied the interface doesn't act at WoL
 380		 * events. Thus take care to avoid suspending if this interface
 381		 * is the only configured wakeup source.
 382		 */
 383		if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) {
 384			int ws_count = 0;
 385#ifdef CONFIG_PM_SLEEP
 386			struct wakeup_source *ws;
 387
 388			for_each_wakeup_source(ws) {
 389				if (ws->dev == eth->dev)
 390					continue;
 391
 392				ws_count++;
 393				break;
 394			}
 395#endif
 396
 397			/*
 398			 * Checking !ws is good for all platforms with issues
 399			 * even when both G_ETH and E_ETH are available as dns_modes
 400			 * is populated only on G_ETH interface.
 401			 */
 402			if (!ws_count) {
 403				pr_err("AT91: PM: Ethernet cannot resume from WoL!");
 404				ret = -EPERM;
 405				put_device(eth->dev);
 406				eth->dev = NULL;
 407				/* No need to revert clock settings for this eth. */
 408				i--;
 409				goto clk_unconfigure;
 410			}
 411		}
 412
 413		if (suspend) {
 414			clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
 415		} else {
 416			ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
 417						      eth->clks);
 418			if (ret)
 419				goto clk_unconfigure;
 420			/*
 421			 * Release the reference to eth->dev taken in
 422			 * at91_pm_eth_quirk_is_valid().
 423			 */
 424			put_device(eth->dev);
 425			eth->dev = NULL;
 426		}
 427	}
 428
 429	return 0;
 430
 431clk_unconfigure:
 432	/*
 433	 * In case of resume we reach this point if clk_prepare_enable() failed.
 434	 * we don't want to revert the previous clk_prepare_enable() for the
 435	 * other IP.
 436	 */
 437	for (j = i; j >= 0; j--) {
 438		eth = &soc_pm.quirks.eth[j];
 439		if (suspend) {
 440			if (!at91_pm_eth_quirk_is_valid(eth))
 441				continue;
 442
 443			tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks);
 444			if (tmp) {
 445				pr_err("AT91: PM: failed to enable %s clocks\n",
 446				       j == AT91_PM_G_ETH ? "geth" : "eth");
 447			}
 
 
 
 
 
 
 
 448		}
 449
 450		/*
 451		 * Release the reference to eth->dev taken in
 452		 * at91_pm_eth_quirk_is_valid().
 453		 */
 454		put_device(eth->dev);
 455		eth->dev = NULL;
 456	}
 457
 458	return ret;
 459}
 460
 461/*
 462 * Called after processes are frozen, but before we shutdown devices.
 463 */
 464static int at91_pm_begin(suspend_state_t state)
 465{
 466	int ret;
 467
 468	switch (state) {
 469	case PM_SUSPEND_MEM:
 470		soc_pm.data.mode = soc_pm.data.suspend_mode;
 471		break;
 472
 473	case PM_SUSPEND_STANDBY:
 474		soc_pm.data.mode = soc_pm.data.standby_mode;
 475		break;
 476
 477	default:
 478		soc_pm.data.mode = -1;
 479	}
 480
 481	ret = at91_pm_config_ws(soc_pm.data.mode, true);
 482	if (ret)
 483		return ret;
 484
 485	if (soc_pm.data.mode == AT91_PM_BACKUP)
 486		soc_pm.bu->suspended = 1;
 487	else if (soc_pm.bu)
 488		soc_pm.bu->suspended = 0;
 489
 490	return 0;
 491}
 492
 493/*
 494 * Verify that all the clocks are correct before entering
 495 * slow-clock mode.
 496 */
 497static int at91_pm_verify_clocks(void)
 498{
 499	unsigned long scsr;
 500	int i;
 501
 502	scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
 503
 504	/* USB must not be using PLLB */
 505	if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
 506		pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
 507		return 0;
 508	}
 509
 510	/* PCK0..PCK3 must be disabled, or configured to use clk32k */
 511	for (i = 0; i < 4; i++) {
 512		u32 css;
 513
 514		if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
 515			continue;
 516		css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
 517		if (css != AT91_PMC_CSS_SLOW) {
 518			pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
 519			return 0;
 520		}
 521	}
 522
 523	return 1;
 524}
 525
 526/*
 527 * Call this from platform driver suspend() to see how deeply to suspend.
 528 * For example, some controllers (like OHCI) need one of the PLL clocks
 529 * in order to act as a wakeup source, and those are not available when
 530 * going into slow clock mode.
 531 *
 532 * REVISIT: generalize as clk_will_be_available(clk)?  Other platforms have
 533 * the very same problem (but not using at91 main_clk), and it'd be better
 534 * to add one generic API rather than lots of platform-specific ones.
 535 */
 536int at91_suspend_entering_slow_clock(void)
 537{
 538	return (soc_pm.data.mode >= AT91_PM_ULP0);
 539}
 540EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
 541
 542static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
 543extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
 544extern u32 at91_pm_suspend_in_sram_sz;
 545
 546static int at91_suspend_finish(unsigned long val)
 547{
 548	unsigned char modified_gray_code[] = {
 549		0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
 550		0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
 551		0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
 552		0x10, 0x11,
 553	};
 554	unsigned int tmp, index;
 555	int i;
 556
 557	if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
 558		/*
 559		 * Bootloader will perform DDR recalibration and will try to
 560		 * restore the ZQ0SR0 with the value saved here. But the
 561		 * calibration is buggy and restoring some values from ZQ0SR0
 562		 * is forbidden and risky thus we need to provide processed
 563		 * values for these (modified gray code values).
 564		 */
 565		tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
 566
 567		/* Store pull-down output impedance select. */
 568		index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
 569		soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
 570
 571		/* Store pull-up output impedance select. */
 572		index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
 573		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
 574
 575		/* Store pull-down on-die termination impedance select. */
 576		index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
 577		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
 578
 579		/* Store pull-up on-die termination impedance select. */
 580		index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
 581		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
 582
 583		/*
 584		 * The 1st 8 words of memory might get corrupted in the process
 585		 * of DDR PHY recalibration; it is saved here in securam and it
 586		 * will be restored later, after recalibration, by bootloader
 587		 */
 588		for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
 589			soc_pm.bu->ddr_phy_calibration[i] =
 590				*((unsigned int *)soc_pm.memcs + (i - 1));
 591	}
 592
 593	flush_cache_all();
 594	outer_disable();
 595
 596	at91_suspend_sram_fn(&soc_pm.data);
 597
 598	return 0;
 599}
 600
 601/**
 602 * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch
 603 * to automatic/hardware mode.
 604 *
 605 * The Backup Unit Power Switch can be managed either by software or hardware.
 606 * Enabling hardware mode allows the automatic transition of power between
 607 * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the
 608 * availability of these power sources.
 609 *
 610 * If the Backup Unit Power Switch is already in automatic mode, no action is
 611 * required. If it is in software-controlled mode, it is switched to automatic
 612 * mode to enhance safety and eliminate the need for toggling between power
 613 * sources.
 614 */
 615static void at91_pm_switch_ba_to_auto(void)
 616{
 617	unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
 618	unsigned int val;
 619
 620	/* Just for safety. */
 621	if (!soc_pm.data.sfrbu)
 622		return;
 623
 624	val = readl(soc_pm.data.sfrbu + offset);
 625
 626	/* Already on auto/hardware. */
 627	if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl))
 628		return;
 629
 630	val &= ~soc_pm.sfrbu_regs.pswbu.ctrl;
 631	val |= soc_pm.sfrbu_regs.pswbu.key;
 632	writel(val, soc_pm.data.sfrbu + offset);
 
 
 
 
 
 633}
 634
 635static void at91_pm_suspend(suspend_state_t state)
 636{
 637	if (soc_pm.data.mode == AT91_PM_BACKUP) {
 638		at91_pm_switch_ba_to_auto();
 639
 640		cpu_suspend(0, at91_suspend_finish);
 641
 642		/* The SRAM is lost between suspend cycles */
 643		at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
 644					     &at91_pm_suspend_in_sram,
 645					     at91_pm_suspend_in_sram_sz);
 646	} else {
 647		at91_suspend_finish(0);
 648	}
 649
 650	outer_resume();
 651}
 652
 653/*
 654 * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
 655 * event sources; and reduces DRAM power.  But otherwise it's identical to
 656 * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
 657 *
 658 * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
 659 * suspend more deeply, the master clock switches to the clk32k and turns off
 660 * the main oscillator
 661 *
 662 * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
 663 */
 664static int at91_pm_enter(suspend_state_t state)
 665{
 666	int ret;
 667
 668	ret = at91_pm_config_quirks(true);
 669	if (ret)
 670		return ret;
 671
 672	switch (state) {
 673	case PM_SUSPEND_MEM:
 674	case PM_SUSPEND_STANDBY:
 675		/*
 676		 * Ensure that clocks are in a valid state.
 677		 */
 678		if (soc_pm.data.mode >= AT91_PM_ULP0 &&
 679		    !at91_pm_verify_clocks())
 680			goto error;
 681
 682		at91_pm_suspend(state);
 683
 684		break;
 685
 686	case PM_SUSPEND_ON:
 687		cpu_do_idle();
 688		break;
 689
 690	default:
 691		pr_debug("AT91: PM - bogus suspend state %d\n", state);
 692		goto error;
 693	}
 694
 695error:
 696	at91_pm_config_quirks(false);
 697	return 0;
 698}
 699
 700/*
 701 * Called right prior to thawing processes.
 702 */
 703static void at91_pm_end(void)
 704{
 705	at91_pm_config_ws(soc_pm.data.mode, false);
 706}
 707
 708
 709static const struct platform_suspend_ops at91_pm_ops = {
 710	.valid	= at91_pm_valid_state,
 711	.begin	= at91_pm_begin,
 712	.enter	= at91_pm_enter,
 713	.end	= at91_pm_end,
 714};
 715
 716static struct platform_device at91_cpuidle_device = {
 717	.name = "cpuidle-at91",
 718};
 719
 720/*
 721 * The AT91RM9200 goes into self-refresh mode with this command, and will
 722 * terminate self-refresh automatically on the next SDRAM access.
 723 *
 724 * Self-refresh mode is exited as soon as a memory access is made, but we don't
 725 * know for sure when that happens. However, we need to restore the low-power
 726 * mode if it was enabled before going idle. Restoring low-power mode while
 727 * still in self-refresh is "not recommended", but seems to work.
 728 */
 729static void at91rm9200_standby(void)
 730{
 731	asm volatile(
 732		"b    1f\n\t"
 733		".align    5\n\t"
 734		"1:  mcr    p15, 0, %0, c7, c10, 4\n\t"
 735		"    str    %2, [%1, %3]\n\t"
 736		"    mcr    p15, 0, %0, c7, c0, 4\n\t"
 737		:
 738		: "r" (0), "r" (soc_pm.data.ramc[0]),
 739		  "r" (1), "r" (AT91_MC_SDRAMC_SRR));
 740}
 741
 742/* We manage both DDRAM/SDRAM controllers, we need more than one value to
 743 * remember.
 744 */
 745static void at91_ddr_standby(void)
 746{
 747	/* Those two values allow us to delay self-refresh activation
 748	 * to the maximum. */
 749	u32 lpr0, lpr1 = 0;
 750	u32 mdr, saved_mdr0, saved_mdr1 = 0;
 751	u32 saved_lpr0, saved_lpr1 = 0;
 752
 753	/* LPDDR1 --> force DDR2 mode during self-refresh */
 754	saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
 755	if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
 756		mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
 757		mdr |= AT91_DDRSDRC_MD_DDR2;
 758		at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
 759	}
 760
 761	if (soc_pm.data.ramc[1]) {
 762		saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
 763		lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
 764		lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
 765		saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
 766		if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
 767			mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
 768			mdr |= AT91_DDRSDRC_MD_DDR2;
 769			at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
 770		}
 771	}
 772
 773	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
 774	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
 775	lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
 776
 777	/* self-refresh mode now */
 778	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
 779	if (soc_pm.data.ramc[1])
 780		at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
 781
 782	cpu_do_idle();
 783
 784	at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
 785	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
 786	if (soc_pm.data.ramc[1]) {
 787		at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
 788		at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
 789	}
 790}
 791
 792static void sama5d3_ddr_standby(void)
 793{
 794	u32 lpr0;
 795	u32 saved_lpr0;
 796
 797	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
 798	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
 799	lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
 800
 801	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
 802
 803	cpu_do_idle();
 804
 805	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
 806}
 807
 808/* We manage both DDRAM/SDRAM controllers, we need more than one value to
 809 * remember.
 810 */
 811static void at91sam9_sdram_standby(void)
 812{
 813	u32 lpr0, lpr1 = 0;
 814	u32 saved_lpr0, saved_lpr1 = 0;
 815
 816	if (soc_pm.data.ramc[1]) {
 817		saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
 818		lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
 819		lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
 820	}
 821
 822	saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
 823	lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
 824	lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
 825
 826	/* self-refresh mode now */
 827	at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
 828	if (soc_pm.data.ramc[1])
 829		at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
 830
 831	cpu_do_idle();
 832
 833	at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
 834	if (soc_pm.data.ramc[1])
 835		at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
 836}
 837
 838static void sama7g5_standby(void)
 839{
 840	int pwrtmg, ratio;
 841
 842	pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
 843	ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
 844
 845	/*
 846	 * Place RAM into self-refresh after a maximum idle clocks. The maximum
 847	 * idle clocks is configured by bootloader in
 848	 * UDDRC_PWRMGT.SELFREF_TO_X32.
 849	 */
 850	writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
 851	       soc_pm.data.ramc[0] + UDDRC_PWRCTL);
 852	/* Divide CPU clock by 16. */
 853	writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
 854
 855	cpu_do_idle();
 856
 857	/* Restore previous configuration. */
 858	writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO);
 859	writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL);
 860}
 861
 862struct ramc_info {
 863	void (*idle)(void);
 864	unsigned int memctrl;
 865};
 866
 867static const struct ramc_info ramc_infos[] __initconst = {
 868	{ .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
 869	{ .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
 870	{ .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
 871	{ .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
 872	{ .idle = sama7g5_standby, },
 873};
 874
 875static const struct of_device_id ramc_ids[] __initconst = {
 876	{ .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
 877	{ .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
 878	{ .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
 879	{ .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
 880	{ .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], },
 881	{ /*sentinel*/ }
 882};
 883
 884static const struct of_device_id ramc_phy_ids[] __initconst = {
 885	{ .compatible = "microchip,sama7g5-ddr3phy", },
 886	{ /* Sentinel. */ },
 887};
 888
 889static __init int at91_dt_ramc(bool phy_mandatory)
 890{
 891	struct device_node *np;
 892	const struct of_device_id *of_id;
 893	int idx = 0;
 894	void *standby = NULL;
 895	const struct ramc_info *ramc;
 896	int ret;
 897
 898	for_each_matching_node_and_match(np, ramc_ids, &of_id) {
 899		soc_pm.data.ramc[idx] = of_iomap(np, 0);
 900		if (!soc_pm.data.ramc[idx]) {
 901			pr_err("unable to map ramc[%d] cpu registers\n", idx);
 902			ret = -ENOMEM;
 903			of_node_put(np);
 904			goto unmap_ramc;
 905		}
 906
 907		ramc = of_id->data;
 908		if (ramc) {
 909			if (!standby)
 910				standby = ramc->idle;
 911			soc_pm.data.memctrl = ramc->memctrl;
 912		}
 913
 914		idx++;
 915	}
 916
 917	if (!idx) {
 918		pr_err("unable to find compatible ram controller node in dtb\n");
 919		ret = -ENODEV;
 920		goto unmap_ramc;
 921	}
 922
 923	/* Lookup for DDR PHY node, if any. */
 924	for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
 925		soc_pm.data.ramc_phy = of_iomap(np, 0);
 926		if (!soc_pm.data.ramc_phy) {
 927			pr_err("unable to map ramc phy cpu registers\n");
 928			ret = -ENOMEM;
 929			of_node_put(np);
 930			goto unmap_ramc;
 931		}
 932	}
 933
 934	if (phy_mandatory && !soc_pm.data.ramc_phy) {
 935		pr_err("DDR PHY is mandatory!\n");
 936		ret = -ENODEV;
 937		goto unmap_ramc;
 938	}
 939
 940	if (!standby) {
 941		pr_warn("ramc no standby function available\n");
 942		return 0;
 943	}
 944
 945	at91_cpuidle_device.dev.platform_data = standby;
 946
 947	return 0;
 948
 949unmap_ramc:
 950	while (idx)
 951		iounmap(soc_pm.data.ramc[--idx]);
 952
 953	return ret;
 954}
 955
 956static void at91rm9200_idle(void)
 957{
 958	/*
 959	 * Disable the processor clock.  The processor will be automatically
 960	 * re-enabled by an interrupt or by a reset.
 961	 */
 962	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
 963}
 964
 965static void at91sam9_idle(void)
 966{
 967	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
 968	cpu_do_idle();
 969}
 970
 971static void __init at91_pm_sram_init(void)
 972{
 973	struct gen_pool *sram_pool;
 974	phys_addr_t sram_pbase;
 975	unsigned long sram_base;
 976	struct device_node *node;
 977	struct platform_device *pdev = NULL;
 978
 979	for_each_compatible_node(node, NULL, "mmio-sram") {
 980		pdev = of_find_device_by_node(node);
 981		if (pdev) {
 982			of_node_put(node);
 983			break;
 984		}
 985	}
 986
 987	if (!pdev) {
 988		pr_warn("%s: failed to find sram device!\n", __func__);
 989		return;
 990	}
 991
 992	sram_pool = gen_pool_get(&pdev->dev, NULL);
 993	if (!sram_pool) {
 994		pr_warn("%s: sram pool unavailable!\n", __func__);
 995		goto out_put_device;
 996	}
 997
 998	sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
 999	if (!sram_base) {
1000		pr_warn("%s: unable to alloc sram!\n", __func__);
1001		goto out_put_device;
1002	}
1003
1004	sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
1005	at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
1006					at91_pm_suspend_in_sram_sz, false);
1007	if (!at91_suspend_sram_fn) {
1008		pr_warn("SRAM: Could not map\n");
1009		goto out_put_device;
1010	}
1011
1012	/* Copy the pm suspend handler to SRAM */
1013	at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
1014			&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
1015	return;
1016
1017out_put_device:
1018	put_device(&pdev->dev);
1019	return;
1020}
1021
1022static bool __init at91_is_pm_mode_active(int pm_mode)
1023{
1024	return (soc_pm.data.standby_mode == pm_mode ||
1025		soc_pm.data.suspend_mode == pm_mode);
1026}
1027
1028static int __init at91_pm_backup_scan_memcs(unsigned long node,
1029					    const char *uname, int depth,
1030					    void *data)
1031{
1032	const char *type;
1033	const __be32 *reg;
1034	int *located = data;
1035	int size;
1036
1037	/* Memory node already located. */
1038	if (*located)
1039		return 0;
1040
1041	type = of_get_flat_dt_prop(node, "device_type", NULL);
1042
1043	/* We are scanning "memory" nodes only. */
1044	if (!type || strcmp(type, "memory"))
1045		return 0;
1046
1047	reg = of_get_flat_dt_prop(node, "reg", &size);
1048	if (reg) {
1049		soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg));
1050		*located = 1;
1051	}
1052
1053	return 0;
1054}
1055
1056static int __init at91_pm_backup_init(void)
1057{
1058	struct gen_pool *sram_pool;
1059	struct device_node *np;
1060	struct platform_device *pdev;
1061	int ret = -ENODEV, located = 0;
1062
1063	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
1064	    !IS_ENABLED(CONFIG_SOC_SAMA7G5))
1065		return -EPERM;
1066
1067	if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
1068		return 0;
1069
1070	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
1071	if (!np)
1072		return ret;
1073
1074	pdev = of_find_device_by_node(np);
1075	of_node_put(np);
1076	if (!pdev) {
1077		pr_warn("%s: failed to find securam device!\n", __func__);
1078		return ret;
1079	}
1080
1081	sram_pool = gen_pool_get(&pdev->dev, NULL);
1082	if (!sram_pool) {
1083		pr_warn("%s: securam pool unavailable!\n", __func__);
1084		goto securam_fail;
1085	}
1086
1087	soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
1088	if (!soc_pm.bu) {
1089		pr_warn("%s: unable to alloc securam!\n", __func__);
1090		ret = -ENOMEM;
1091		goto securam_fail;
1092	}
1093
1094	soc_pm.bu->suspended = 0;
1095	soc_pm.bu->canary = __pa_symbol(&canary);
1096	soc_pm.bu->resume = __pa_symbol(cpu_resume);
1097	if (soc_pm.data.ramc_phy) {
1098		of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
1099		if (!located)
1100			goto securam_fail;
1101	}
1102
1103	return 0;
1104
1105securam_fail:
1106	put_device(&pdev->dev);
1107	return ret;
1108}
1109
1110static void __init at91_pm_secure_init(void)
1111{
1112	int suspend_mode;
1113	struct arm_smccc_res res;
1114
1115	suspend_mode = soc_pm.data.suspend_mode;
1116
1117	res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE,
1118			     suspend_mode, 0);
1119	if (res.a0 == 0) {
1120		pr_info("AT91: Secure PM: suspend mode set to %s\n",
1121			pm_modes[suspend_mode].pattern);
1122		soc_pm.data.mode = suspend_mode;
1123		return;
1124	}
1125
1126	pr_warn("AT91: Secure PM: %s mode not supported !\n",
1127		pm_modes[suspend_mode].pattern);
1128
1129	res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0);
1130	if (res.a0 == 0) {
1131		pr_warn("AT91: Secure PM: failed to get default mode\n");
1132		soc_pm.data.mode = -1;
1133		return;
1134	}
1135
1136	pr_info("AT91: Secure PM: using default suspend mode %s\n",
1137		pm_modes[suspend_mode].pattern);
1138
1139	soc_pm.data.suspend_mode = res.a1;
1140	soc_pm.data.mode = soc_pm.data.suspend_mode;
1141}
1142static const struct of_device_id atmel_shdwc_ids[] = {
1143	{ .compatible = "atmel,sama5d2-shdwc" },
1144	{ .compatible = "microchip,sam9x60-shdwc" },
1145	{ .compatible = "microchip,sama7g5-shdwc" },
1146	{ /* sentinel. */ }
1147};
1148
1149static const struct of_device_id gmac_ids[] __initconst = {
1150	{ .compatible = "atmel,sama5d3-gem" },
1151	{ .compatible = "atmel,sama5d2-gem" },
1152	{ .compatible = "atmel,sama5d29-gem" },
1153	{ .compatible = "microchip,sama7g5-gem" },
1154	{ },
1155};
1156
1157static const struct of_device_id emac_ids[] __initconst = {
1158	{ .compatible = "atmel,sama5d3-macb" },
1159	{ .compatible = "microchip,sama7g5-emac" },
1160	{ },
1161};
1162
1163/*
1164 * Replaces _mode_to_replace with a supported mode that doesn't depend
1165 * on controller pointed by _map_bitmask
1166 * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91
1167 * PM mode
1168 * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on
1169 * controller represented by _map_bitmask, _mode_to_replace needs to be
1170 * updated
1171 * @_mode_to_replace: standby_mode or suspend_mode that need to be
1172 * updated
1173 * @_mode_to_check: standby_mode or suspend_mode; this is needed here
1174 * to avoid having standby_mode and suspend_mode set with the same AT91
1175 * PM mode
1176 */
1177#define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace,	\
1178			     _mode_to_check)				\
1179	do {								\
1180		if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) {	\
1181			int _mode_to_use, _mode_complementary;		\
1182			/* Use ULP0 if it doesn't need _map_bitmask. */	\
1183			if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
1184				_mode_to_use = AT91_PM_ULP0;		\
1185				_mode_complementary = AT91_PM_STANDBY;	\
1186			} else {					\
1187				_mode_to_use = AT91_PM_STANDBY;		\
1188				_mode_complementary = AT91_PM_STANDBY;	\
1189			}						\
1190									\
1191			if ((_mode_to_check) != _mode_to_use)		\
1192				(_mode_to_replace) = _mode_to_use;	\
1193			else						\
1194				(_mode_to_replace) = _mode_complementary;\
1195		}							\
1196	} while (0)
1197
1198/*
1199 * Replaces standby and suspend modes with default supported modes:
1200 * ULP0 and STANDBY.
1201 * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP()
1202 * flags
1203 * @_map: controller specific name; standby and suspend mode need to be
1204 * replaced in order to not depend on this controller
1205 */
1206#define AT91_PM_REPLACE_MODES(_maps, _map)				\
1207	do {								\
1208		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1209				     (soc_pm.data.standby_mode),	\
1210				     (soc_pm.data.suspend_mode));	\
1211		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1212				     (soc_pm.data.suspend_mode),	\
1213				     (soc_pm.data.standby_mode));	\
1214	} while (0)
1215
1216static int __init at91_pm_get_eth_clks(struct device_node *np,
1217				       struct clk_bulk_data *clks)
1218{
1219	clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk");
1220	if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk))
1221		return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk);
1222
1223	clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk");
1224	if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk))
1225		return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk);
1226
1227	return 0;
1228}
1229
1230static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks)
1231{
1232	return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) ||
1233	       IS_ERR(clks[AT91_PM_ETH_HCLK].clk);
1234}
1235
1236static void __init at91_pm_modes_init(const u32 *maps, int len)
1237{
1238	struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH];
1239	struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH];
1240	struct device_node *np;
1241	int ret;
1242
1243	ret = at91_pm_backup_init();
1244	if (ret) {
1245		if (soc_pm.data.standby_mode == AT91_PM_BACKUP)
1246			soc_pm.data.standby_mode = AT91_PM_ULP0;
1247		if (soc_pm.data.suspend_mode == AT91_PM_BACKUP)
1248			soc_pm.data.suspend_mode = AT91_PM_ULP0;
1249	}
1250
1251	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1252	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) {
1253		np = of_find_matching_node(NULL, atmel_shdwc_ids);
1254		if (!np) {
1255			pr_warn("%s: failed to find shdwc!\n", __func__);
1256			AT91_PM_REPLACE_MODES(maps, SHDWC);
1257		} else {
1258			soc_pm.data.shdwc = of_iomap(np, 0);
1259			of_node_put(np);
1260		}
1261	}
1262
1263	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1264	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) {
1265		np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
1266		if (!np) {
1267			pr_warn("%s: failed to find sfrbu!\n", __func__);
1268			AT91_PM_REPLACE_MODES(maps, SFRBU);
1269		} else {
1270			soc_pm.data.sfrbu = of_iomap(np, 0);
1271			of_node_put(np);
1272		}
1273	}
1274
1275	if ((at91_is_pm_mode_active(AT91_PM_ULP1) ||
1276	     at91_is_pm_mode_active(AT91_PM_ULP0) ||
1277	     at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) &&
1278	    (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) ||
1279	     maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) {
1280		np = of_find_matching_node(NULL, gmac_ids);
1281		if (!np) {
1282			np = of_find_matching_node(NULL, emac_ids);
1283			if (np)
1284				goto get_emac_clks;
1285			AT91_PM_REPLACE_MODES(maps, ETHC);
1286			goto unmap_unused_nodes;
1287		} else {
1288			gmac->np = np;
1289			at91_pm_get_eth_clks(np, gmac->clks);
1290		}
1291
1292		np = of_find_matching_node(NULL, emac_ids);
1293		if (!np) {
1294			if (at91_pm_eth_clks_empty(gmac->clks))
1295				AT91_PM_REPLACE_MODES(maps, ETHC);
1296		} else {
1297get_emac_clks:
1298			emac->np = np;
1299			ret = at91_pm_get_eth_clks(np, emac->clks);
1300			if (ret && at91_pm_eth_clks_empty(gmac->clks)) {
1301				of_node_put(gmac->np);
1302				of_node_put(emac->np);
1303				gmac->np = NULL;
1304				emac->np = NULL;
1305			}
1306		}
1307	}
1308
1309unmap_unused_nodes:
1310	/* Unmap all unnecessary. */
1311	if (soc_pm.data.shdwc &&
1312	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1313	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) {
1314		iounmap(soc_pm.data.shdwc);
1315		soc_pm.data.shdwc = NULL;
1316	}
1317
1318	if (soc_pm.data.sfrbu &&
1319	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1320	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) {
1321		iounmap(soc_pm.data.sfrbu);
1322		soc_pm.data.sfrbu = NULL;
1323	}
1324
1325	return;
1326}
1327
1328struct pmc_info {
1329	unsigned long uhp_udp_mask;
1330	unsigned long mckr;
1331	unsigned long version;
1332};
1333
1334static const struct pmc_info pmc_infos[] __initconst = {
1335	{
1336		.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
1337		.mckr = 0x30,
1338		.version = AT91_PMC_V1,
1339	},
1340
1341	{
1342		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1343		.mckr = 0x30,
1344		.version = AT91_PMC_V1,
1345	},
1346	{
1347		.uhp_udp_mask = AT91SAM926x_PMC_UHP,
1348		.mckr = 0x30,
1349		.version = AT91_PMC_V1,
1350	},
1351	{	.uhp_udp_mask = 0,
1352		.mckr = 0x30,
1353		.version = AT91_PMC_V1,
1354	},
1355	{
1356		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1357		.mckr = 0x28,
1358		.version = AT91_PMC_V2,
1359	},
1360	{
1361		.mckr = 0x28,
1362		.version = AT91_PMC_V2,
1363	},
1364
1365};
1366
1367static const struct of_device_id atmel_pmc_ids[] __initconst = {
1368	{ .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
1369	{ .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
1370	{ .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] },
1371	{ .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] },
1372	{ .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
1373	{ .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
1374	{ .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] },
1375	{ .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
1376	{ .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
1377	{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
1378	{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
1379	{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
1380	{ .compatible = "microchip,sam9x7-pmc", .data = &pmc_infos[4] },
1381	{ .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
1382	{ /* sentinel */ },
1383};
1384
1385static void __init at91_pm_modes_validate(const int *modes, int len)
1386{
1387	u8 i, standby = 0, suspend = 0;
1388	int mode;
1389
1390	for (i = 0; i < len; i++) {
1391		if (standby && suspend)
1392			break;
1393
1394		if (modes[i] == soc_pm.data.standby_mode && !standby) {
1395			standby = 1;
1396			continue;
1397		}
1398
1399		if (modes[i] == soc_pm.data.suspend_mode && !suspend) {
1400			suspend = 1;
1401			continue;
1402		}
1403	}
1404
1405	if (!standby) {
1406		if (soc_pm.data.suspend_mode == AT91_PM_STANDBY)
1407			mode = AT91_PM_ULP0;
1408		else
1409			mode = AT91_PM_STANDBY;
1410
1411		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1412			pm_modes[soc_pm.data.standby_mode].pattern,
1413			pm_modes[mode].pattern);
1414		soc_pm.data.standby_mode = mode;
1415	}
1416
1417	if (!suspend) {
1418		if (soc_pm.data.standby_mode == AT91_PM_ULP0)
1419			mode = AT91_PM_STANDBY;
1420		else
1421			mode = AT91_PM_ULP0;
1422
1423		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1424			pm_modes[soc_pm.data.suspend_mode].pattern,
1425			pm_modes[mode].pattern);
1426		soc_pm.data.suspend_mode = mode;
1427	}
1428}
1429
1430static void __init at91_pm_init(void (*pm_idle)(void))
1431{
1432	struct device_node *pmc_np;
1433	const struct of_device_id *of_id;
1434	const struct pmc_info *pmc;
1435
1436	if (at91_cpuidle_device.dev.platform_data)
1437		platform_device_register(&at91_cpuidle_device);
1438
1439	pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
1440	soc_pm.data.pmc = of_iomap(pmc_np, 0);
1441	of_node_put(pmc_np);
1442	if (!soc_pm.data.pmc) {
1443		pr_err("AT91: PM not supported, PMC not found\n");
1444		return;
1445	}
1446
1447	pmc = of_id->data;
1448	soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
1449	soc_pm.data.pmc_mckr_offset = pmc->mckr;
1450	soc_pm.data.pmc_version = pmc->version;
1451
1452	if (pm_idle)
1453		arm_pm_idle = pm_idle;
1454
1455	at91_pm_sram_init();
1456
1457	if (at91_suspend_sram_fn) {
1458		suspend_set_ops(&at91_pm_ops);
1459		pr_info("AT91: PM: standby: %s, suspend: %s\n",
1460			pm_modes[soc_pm.data.standby_mode].pattern,
1461			pm_modes[soc_pm.data.suspend_mode].pattern);
1462	} else {
1463		pr_info("AT91: PM not supported, due to no SRAM allocated\n");
1464	}
1465}
1466
1467void __init at91rm9200_pm_init(void)
1468{
1469	int ret;
1470
1471	if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
1472		return;
1473
1474	/*
1475	 * Force STANDBY and ULP0 mode to avoid calling
1476	 * at91_pm_modes_validate() which may increase booting time.
1477	 * Platform supports anyway only STANDBY and ULP0 modes.
1478	 */
1479	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1480	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1481
1482	ret = at91_dt_ramc(false);
1483	if (ret)
1484		return;
1485
1486	/*
1487	 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
1488	 */
1489	at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
1490
1491	at91_pm_init(at91rm9200_idle);
1492}
1493
1494void __init sam9x60_pm_init(void)
1495{
1496	static const int modes[] __initconst = {
1497		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1498	};
1499	static const int iomaps[] __initconst = {
1500		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC),
1501	};
1502	int ret;
1503
1504	if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
1505		return;
1506
1507	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1508	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1509	ret = at91_dt_ramc(false);
1510	if (ret)
1511		return;
1512
1513	at91_pm_init(NULL);
1514
1515	soc_pm.ws_ids = sam9x60_ws_ids;
1516	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1517}
1518
1519void __init sam9x7_pm_init(void)
1520{
1521	static const int modes[] __initconst = {
1522		AT91_PM_STANDBY, AT91_PM_ULP0,
1523	};
1524	int ret;
1525
1526	if (!IS_ENABLED(CONFIG_SOC_SAM9X7))
1527		return;
1528
1529	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1530	ret = at91_dt_ramc(false);
1531	if (ret)
1532		return;
1533
1534	at91_pm_init(NULL);
1535
1536	soc_pm.ws_ids = sam9x7_ws_ids;
1537	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1538}
1539
1540void __init at91sam9_pm_init(void)
1541{
1542	int ret;
1543
1544	if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
1545		return;
1546
1547	/*
1548	 * Force STANDBY and ULP0 mode to avoid calling
1549	 * at91_pm_modes_validate() which may increase booting time.
1550	 * Platform supports anyway only STANDBY and ULP0 modes.
1551	 */
1552	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1553	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1554
1555	ret = at91_dt_ramc(false);
1556	if (ret)
1557		return;
1558
1559	at91_pm_init(at91sam9_idle);
1560}
1561
1562void __init sama5_pm_init(void)
1563{
1564	static const int modes[] __initconst = {
1565		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
1566	};
1567	static const u32 iomaps[] __initconst = {
1568		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1569		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1570	};
1571	int ret;
1572
1573	if (!IS_ENABLED(CONFIG_SOC_SAMA5))
1574		return;
1575
1576	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1577	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1578	ret = at91_dt_ramc(false);
1579	if (ret)
1580		return;
1581
1582	at91_pm_init(NULL);
1583
1584	/* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
1585	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1586						 BIT(AT91_PM_ULP0_FAST) |
1587						 BIT(AT91_PM_ULP1);
1588	/* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
1589	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1590						     BIT(AT91_PM_ULP0_FAST);
1591}
1592
1593void __init sama5d2_pm_init(void)
1594{
1595	static const int modes[] __initconst = {
1596		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1597		AT91_PM_BACKUP,
1598	};
1599	static const u32 iomaps[] __initconst = {
1600		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1601		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1602		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC) |
1603					  AT91_PM_IOMAP(ETHC),
1604		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SHDWC) |
1605					  AT91_PM_IOMAP(SFRBU),
1606	};
1607	int ret;
1608
1609	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
1610		return;
1611
1612	if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) {
1613		pr_warn("AT91: Secure PM: ignoring standby mode\n");
1614		at91_pm_secure_init();
1615		return;
1616	}
1617
1618	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1619	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1620	ret = at91_dt_ramc(false);
1621	if (ret)
1622		return;
1623
1624	at91_pm_init(NULL);
1625
1626	soc_pm.ws_ids = sama5d2_ws_ids;
1627	soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
1628	soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
1629
1630	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1631	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1632	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1633	soc_pm.sfrbu_regs.pswbu.state = BIT(3);
1634
1635	/* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
1636	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1637						 BIT(AT91_PM_ULP0_FAST) |
1638						 BIT(AT91_PM_ULP1);
1639	/*
1640	 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup
1641	 * source.
1642	 */
1643	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1644						     BIT(AT91_PM_ULP0_FAST);
1645}
1646
1647void __init sama7_pm_init(void)
1648{
1649	static const int modes[] __initconst = {
1650		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP,
1651	};
1652	static const u32 iomaps[] __initconst = {
1653		[AT91_PM_ULP0]		= AT91_PM_IOMAP(SFRBU),
1654		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SFRBU) |
1655					  AT91_PM_IOMAP(SHDWC) |
1656					  AT91_PM_IOMAP(ETHC),
1657		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SFRBU) |
1658					  AT91_PM_IOMAP(SHDWC),
1659	};
1660	int ret;
1661
1662	if (!IS_ENABLED(CONFIG_SOC_SAMA7))
1663		return;
1664
1665	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1666
1667	ret = at91_dt_ramc(true);
1668	if (ret)
1669		return;
1670
1671	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1672	at91_pm_init(NULL);
1673
1674	soc_pm.ws_ids = sama7g5_ws_ids;
1675	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1676
1677	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1678	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1679	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1680	soc_pm.sfrbu_regs.pswbu.state = BIT(2);
1681
1682	/* Quirks applies to ULP1 for both Ethernet interfaces. */
1683	soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1);
1684	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1);
1685}
1686
1687static int __init at91_pm_modes_select(char *str)
1688{
1689	char *s;
1690	substring_t args[MAX_OPT_ARGS];
1691	int standby, suspend;
1692
1693	if (!str)
1694		return 0;
1695
1696	s = strsep(&str, ",");
1697	standby = match_token(s, pm_modes, args);
1698	if (standby < 0)
1699		return 0;
1700
1701	suspend = match_token(str, pm_modes, args);
1702	if (suspend < 0)
1703		return 0;
1704
1705	soc_pm.data.standby_mode = standby;
1706	soc_pm.data.suspend_mode = suspend;
1707
1708	return 0;
1709}
1710early_param("atmel.pm_modes", at91_pm_modes_select);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * arch/arm/mach-at91/pm.c
   4 * AT91 Power Management
   5 *
   6 * Copyright (C) 2005 David Brownell
   7 */
   8
   9#include <linux/genalloc.h>
  10#include <linux/io.h>
  11#include <linux/of_address.h>
  12#include <linux/of.h>
  13#include <linux/of_fdt.h>
  14#include <linux/of_platform.h>
 
  15#include <linux/parser.h>
  16#include <linux/suspend.h>
  17
  18#include <linux/clk.h>
  19#include <linux/clk/at91_pmc.h>
  20#include <linux/platform_data/atmel.h>
  21
  22#include <asm/cacheflush.h>
  23#include <asm/fncpy.h>
  24#include <asm/system_misc.h>
  25#include <asm/suspend.h>
  26
  27#include "generic.h"
  28#include "pm.h"
  29#include "sam_secure.h"
  30
  31#define BACKUP_DDR_PHY_CALIBRATION	(9)
  32
  33/**
  34 * struct at91_pm_bu - AT91 power management backup unit data structure
  35 * @suspended: true if suspended to backup mode
  36 * @reserved: reserved
  37 * @canary: canary data for memory checking after exit from backup mode
  38 * @resume: resume API
  39 * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words
  40 * of the memory
  41 */
  42struct at91_pm_bu {
  43	int suspended;
  44	unsigned long reserved;
  45	phys_addr_t canary;
  46	phys_addr_t resume;
  47	unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
  48};
  49
  50/**
  51 * struct at91_pm_sfrbu_regs - registers mapping for SFRBU
  52 * @pswbu: power switch BU control registers
  53 */
  54struct at91_pm_sfrbu_regs {
  55	struct {
  56		u32 key;
  57		u32 ctrl;
  58		u32 state;
  59		u32 softsw;
  60	} pswbu;
  61};
  62
  63/**
  64 * enum at91_pm_eth_clk - Ethernet clock indexes
  65 * @AT91_PM_ETH_PCLK: pclk index
  66 * @AT91_PM_ETH_HCLK: hclk index
  67 * @AT91_PM_ETH_MAX_CLK: max index
  68 */
  69enum at91_pm_eth_clk {
  70	AT91_PM_ETH_PCLK,
  71	AT91_PM_ETH_HCLK,
  72	AT91_PM_ETH_MAX_CLK,
  73};
  74
  75/**
  76 * enum at91_pm_eth - Ethernet controller indexes
  77 * @AT91_PM_G_ETH: gigabit Ethernet controller index
  78 * @AT91_PM_E_ETH: megabit Ethernet controller index
  79 * @AT91_PM_MAX_ETH: max index
  80 */
  81enum at91_pm_eth {
  82	AT91_PM_G_ETH,
  83	AT91_PM_E_ETH,
  84	AT91_PM_MAX_ETH,
  85};
  86
  87/**
  88 * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks
  89 * @dev: Ethernet device
  90 * @np: Ethernet device node
  91 * @clks: Ethernet clocks
  92 * @modes: power management mode that this quirk applies to
  93 * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured
  94 *	       as wakeup source but buggy and no other wakeup source is
  95 *	       available
  96 */
  97struct at91_pm_quirk_eth {
  98	struct device *dev;
  99	struct device_node *np;
 100	struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
 101	u32 modes;
 102	u32 dns_modes;
 103};
 104
 105/**
 106 * struct at91_pm_quirks - AT91 PM quirks
 107 * @eth: Ethernet quirks
 108 */
 109struct at91_pm_quirks {
 110	struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH];
 111};
 112
 113/**
 114 * struct at91_soc_pm - AT91 SoC power management data structure
 115 * @config_shdwc_ws: wakeup sources configuration function for SHDWC
 116 * @config_pmc_ws: wakeup srouces configuration function for PMC
 117 * @ws_ids: wakup sources of_device_id array
 118 * @bu: backup unit mapped data (for backup mode)
 119 * @quirks: PM quirks
 120 * @data: PM data to be used on last phase of suspend
 121 * @sfrbu_regs: SFRBU registers mapping
 122 * @memcs: memory chip select
 123 */
 124struct at91_soc_pm {
 125	int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
 126	int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
 127	const struct of_device_id *ws_ids;
 128	struct at91_pm_bu *bu;
 129	struct at91_pm_quirks quirks;
 130	struct at91_pm_data data;
 131	struct at91_pm_sfrbu_regs sfrbu_regs;
 132	void *memcs;
 133};
 134
 135/**
 136 * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes
 137 * @AT91_PM_IOMAP_SHDWC:	SHDWC controller
 138 * @AT91_PM_IOMAP_SFRBU:	SFRBU controller
 139 * @AT91_PM_IOMAP_ETHC:		Ethernet controller
 140 */
 141enum at91_pm_iomaps {
 142	AT91_PM_IOMAP_SHDWC,
 143	AT91_PM_IOMAP_SFRBU,
 144	AT91_PM_IOMAP_ETHC,
 145};
 146
 147#define AT91_PM_IOMAP(name)	BIT(AT91_PM_IOMAP_##name)
 148
 149static struct at91_soc_pm soc_pm = {
 150	.data = {
 151		.standby_mode = AT91_PM_STANDBY,
 152		.suspend_mode = AT91_PM_ULP0,
 153	},
 154};
 155
 156static const match_table_t pm_modes __initconst = {
 157	{ AT91_PM_STANDBY,	"standby" },
 158	{ AT91_PM_ULP0,		"ulp0" },
 159	{ AT91_PM_ULP0_FAST,    "ulp0-fast" },
 160	{ AT91_PM_ULP1,		"ulp1" },
 161	{ AT91_PM_BACKUP,	"backup" },
 162	{ -1, NULL },
 163};
 164
 165#define at91_ramc_read(id, field) \
 166	__raw_readl(soc_pm.data.ramc[id] + field)
 167
 168#define at91_ramc_write(id, field, value) \
 169	__raw_writel(value, soc_pm.data.ramc[id] + field)
 170
 171static int at91_pm_valid_state(suspend_state_t state)
 172{
 173	switch (state) {
 174		case PM_SUSPEND_ON:
 175		case PM_SUSPEND_STANDBY:
 176		case PM_SUSPEND_MEM:
 177			return 1;
 178
 179		default:
 180			return 0;
 181	}
 182}
 183
 184static int canary = 0xA5A5A5A5;
 185
 186struct wakeup_source_info {
 187	unsigned int pmc_fsmr_bit;
 188	unsigned int shdwc_mr_bit;
 189	bool set_polarity;
 190};
 191
 192static const struct wakeup_source_info ws_info[] = {
 193	{ .pmc_fsmr_bit = AT91_PMC_FSTT(10),	.set_polarity = true },
 194	{ .pmc_fsmr_bit = AT91_PMC_RTCAL,	.shdwc_mr_bit = BIT(17) },
 195	{ .pmc_fsmr_bit = AT91_PMC_USBAL },
 196	{ .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
 197	{ .pmc_fsmr_bit = AT91_PMC_RTTAL },
 198	{ .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
 199};
 200
 201static const struct of_device_id sama5d2_ws_ids[] = {
 202	{ .compatible = "atmel,sama5d2-gem",		.data = &ws_info[0] },
 203	{ .compatible = "atmel,sama5d2-rtc",		.data = &ws_info[1] },
 204	{ .compatible = "atmel,sama5d3-udc",		.data = &ws_info[2] },
 205	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
 206	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
 207	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
 208	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
 209	{ .compatible = "atmel,sama5d2-sdhci",		.data = &ws_info[3] },
 210	{ /* sentinel */ }
 211};
 212
 213static const struct of_device_id sam9x60_ws_ids[] = {
 214	{ .compatible = "microchip,sam9x60-rtc",	.data = &ws_info[1] },
 215	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
 216	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
 217	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
 218	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
 219	{ .compatible = "microchip,sam9x60-rtt",	.data = &ws_info[4] },
 220	{ .compatible = "cdns,sam9x60-macb",		.data = &ws_info[5] },
 221	{ /* sentinel */ }
 222};
 223
 224static const struct of_device_id sama7g5_ws_ids[] = {
 225	{ .compatible = "microchip,sama7g5-rtc",	.data = &ws_info[1] },
 226	{ .compatible = "microchip,sama7g5-ohci",	.data = &ws_info[2] },
 227	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
 228	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
 229	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
 230	{ .compatible = "microchip,sama7g5-sdhci",	.data = &ws_info[3] },
 231	{ .compatible = "microchip,sama7g5-rtt",	.data = &ws_info[4] },
 232	{ /* sentinel */ }
 233};
 234
 
 
 
 
 
 
 
 235static int at91_pm_config_ws(unsigned int pm_mode, bool set)
 236{
 237	const struct wakeup_source_info *wsi;
 238	const struct of_device_id *match;
 239	struct platform_device *pdev;
 240	struct device_node *np;
 241	unsigned int mode = 0, polarity = 0, val = 0;
 242
 243	if (pm_mode != AT91_PM_ULP1)
 244		return 0;
 245
 246	if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
 247		return -EPERM;
 248
 249	if (!set) {
 250		writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
 251		return 0;
 252	}
 253
 254	if (soc_pm.config_shdwc_ws)
 255		soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
 256
 257	/* SHDWC.MR */
 258	val = readl(soc_pm.data.shdwc + 0x04);
 259
 260	/* Loop through defined wakeup sources. */
 261	for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
 262		pdev = of_find_device_by_node(np);
 263		if (!pdev)
 264			continue;
 265
 266		if (device_may_wakeup(&pdev->dev)) {
 267			wsi = match->data;
 268
 269			/* Check if enabled on SHDWC. */
 270			if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
 271				goto put_device;
 272
 273			mode |= wsi->pmc_fsmr_bit;
 274			if (wsi->set_polarity)
 275				polarity |= wsi->pmc_fsmr_bit;
 276		}
 277
 278put_device:
 279		put_device(&pdev->dev);
 280	}
 281
 282	if (mode) {
 283		if (soc_pm.config_pmc_ws)
 284			soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
 285	} else {
 286		pr_err("AT91: PM: no ULP1 wakeup sources found!");
 287	}
 288
 289	return mode ? 0 : -EPERM;
 290}
 291
 292static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
 293					u32 *polarity)
 294{
 295	u32 val;
 296
 297	/* SHDWC.WUIR */
 298	val = readl(shdwc + 0x0c);
 299	*mode |= (val & 0x3ff);
 300	*polarity |= ((val >> 16) & 0x3ff);
 301
 302	return 0;
 303}
 304
 305static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
 306{
 307	writel(mode, pmc + AT91_PMC_FSMR);
 308	writel(polarity, pmc + AT91_PMC_FSPR);
 309
 310	return 0;
 311}
 312
 313static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
 314{
 315	writel(mode, pmc + AT91_PMC_FSMR);
 316
 317	return 0;
 318}
 319
 320static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
 321{
 322	struct platform_device *pdev;
 323
 324	/* Interface NA in DT. */
 325	if (!eth->np)
 326		return false;
 327
 328	/* No quirks for this interface and current suspend mode. */
 329	if (!(eth->modes & BIT(soc_pm.data.mode)))
 330		return false;
 331
 332	if (!eth->dev) {
 333		/* Driver not probed. */
 334		pdev = of_find_device_by_node(eth->np);
 335		if (!pdev)
 336			return false;
 
 337		eth->dev = &pdev->dev;
 338	}
 339
 340	/* No quirks if device isn't a wakeup source. */
 341	if (!device_may_wakeup(eth->dev)) {
 342		put_device(eth->dev);
 343		return false;
 344	}
 345
 346	/* put_device(eth->dev) is called at the end of suspend. */
 347	return true;
 348}
 349
 350static int at91_pm_config_quirks(bool suspend)
 351{
 352	struct at91_pm_quirk_eth *eth;
 353	int i, j, ret, tmp;
 354
 355	/*
 356	 * Ethernet IPs who's device_node pointers are stored into
 357	 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1
 358	 * or both due to a hardware bug. If they receive WoL packets while in
 359	 * ULP0 or ULP1 IPs could stop working or the whole system could stop
 360	 * working. We cannot handle this scenario in the ethernet driver itself
 361	 * as the driver is common to multiple vendors and also we only know
 362	 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle
 363	 * these scenarios here, as quirks.
 364	 */
 365	for (i = 0; i < AT91_PM_MAX_ETH; i++) {
 366		eth = &soc_pm.quirks.eth[i];
 367
 368		if (!at91_pm_eth_quirk_is_valid(eth))
 369			continue;
 370
 371		/*
 372		 * For modes in dns_modes mask the system blocks if quirk is not
 373		 * applied but if applied the interface doesn't act at WoL
 374		 * events. Thus take care to avoid suspending if this interface
 375		 * is the only configured wakeup source.
 376		 */
 377		if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) {
 378			int ws_count = 0;
 379#ifdef CONFIG_PM_SLEEP
 380			struct wakeup_source *ws;
 381
 382			for_each_wakeup_source(ws) {
 383				if (ws->dev == eth->dev)
 384					continue;
 385
 386				ws_count++;
 387				break;
 388			}
 389#endif
 390
 391			/*
 392			 * Checking !ws is good for all platforms with issues
 393			 * even when both G_ETH and E_ETH are available as dns_modes
 394			 * is populated only on G_ETH interface.
 395			 */
 396			if (!ws_count) {
 397				pr_err("AT91: PM: Ethernet cannot resume from WoL!");
 398				ret = -EPERM;
 399				put_device(eth->dev);
 400				eth->dev = NULL;
 401				/* No need to revert clock settings for this eth. */
 402				i--;
 403				goto clk_unconfigure;
 404			}
 405		}
 406
 407		if (suspend) {
 408			clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
 409		} else {
 410			ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
 411						      eth->clks);
 412			if (ret)
 413				goto clk_unconfigure;
 414			/*
 415			 * Release the reference to eth->dev taken in
 416			 * at91_pm_eth_quirk_is_valid().
 417			 */
 418			put_device(eth->dev);
 419			eth->dev = NULL;
 420		}
 421	}
 422
 423	return 0;
 424
 425clk_unconfigure:
 426	/*
 427	 * In case of resume we reach this point if clk_prepare_enable() failed.
 428	 * we don't want to revert the previous clk_prepare_enable() for the
 429	 * other IP.
 430	 */
 431	for (j = i; j >= 0; j--) {
 432		eth = &soc_pm.quirks.eth[j];
 433		if (suspend) {
 434			if (!at91_pm_eth_quirk_is_valid(eth))
 435				continue;
 436
 437			tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks);
 438			if (tmp) {
 439				pr_err("AT91: PM: failed to enable %s clocks\n",
 440				       j == AT91_PM_G_ETH ? "geth" : "eth");
 441			}
 442		} else {
 443			/*
 444			 * Release the reference to eth->dev taken in
 445			 * at91_pm_eth_quirk_is_valid().
 446			 */
 447			put_device(eth->dev);
 448			eth->dev = NULL;
 449		}
 
 
 
 
 
 
 
 450	}
 451
 452	return ret;
 453}
 454
 455/*
 456 * Called after processes are frozen, but before we shutdown devices.
 457 */
 458static int at91_pm_begin(suspend_state_t state)
 459{
 460	int ret;
 461
 462	switch (state) {
 463	case PM_SUSPEND_MEM:
 464		soc_pm.data.mode = soc_pm.data.suspend_mode;
 465		break;
 466
 467	case PM_SUSPEND_STANDBY:
 468		soc_pm.data.mode = soc_pm.data.standby_mode;
 469		break;
 470
 471	default:
 472		soc_pm.data.mode = -1;
 473	}
 474
 475	ret = at91_pm_config_ws(soc_pm.data.mode, true);
 476	if (ret)
 477		return ret;
 478
 479	if (soc_pm.data.mode == AT91_PM_BACKUP)
 480		soc_pm.bu->suspended = 1;
 481	else if (soc_pm.bu)
 482		soc_pm.bu->suspended = 0;
 483
 484	return 0;
 485}
 486
 487/*
 488 * Verify that all the clocks are correct before entering
 489 * slow-clock mode.
 490 */
 491static int at91_pm_verify_clocks(void)
 492{
 493	unsigned long scsr;
 494	int i;
 495
 496	scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
 497
 498	/* USB must not be using PLLB */
 499	if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
 500		pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
 501		return 0;
 502	}
 503
 504	/* PCK0..PCK3 must be disabled, or configured to use clk32k */
 505	for (i = 0; i < 4; i++) {
 506		u32 css;
 507
 508		if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
 509			continue;
 510		css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
 511		if (css != AT91_PMC_CSS_SLOW) {
 512			pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
 513			return 0;
 514		}
 515	}
 516
 517	return 1;
 518}
 519
 520/*
 521 * Call this from platform driver suspend() to see how deeply to suspend.
 522 * For example, some controllers (like OHCI) need one of the PLL clocks
 523 * in order to act as a wakeup source, and those are not available when
 524 * going into slow clock mode.
 525 *
 526 * REVISIT: generalize as clk_will_be_available(clk)?  Other platforms have
 527 * the very same problem (but not using at91 main_clk), and it'd be better
 528 * to add one generic API rather than lots of platform-specific ones.
 529 */
 530int at91_suspend_entering_slow_clock(void)
 531{
 532	return (soc_pm.data.mode >= AT91_PM_ULP0);
 533}
 534EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
 535
 536static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
 537extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
 538extern u32 at91_pm_suspend_in_sram_sz;
 539
 540static int at91_suspend_finish(unsigned long val)
 541{
 542	unsigned char modified_gray_code[] = {
 543		0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
 544		0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
 545		0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
 546		0x10, 0x11,
 547	};
 548	unsigned int tmp, index;
 549	int i;
 550
 551	if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
 552		/*
 553		 * Bootloader will perform DDR recalibration and will try to
 554		 * restore the ZQ0SR0 with the value saved here. But the
 555		 * calibration is buggy and restoring some values from ZQ0SR0
 556		 * is forbidden and risky thus we need to provide processed
 557		 * values for these (modified gray code values).
 558		 */
 559		tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
 560
 561		/* Store pull-down output impedance select. */
 562		index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
 563		soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
 564
 565		/* Store pull-up output impedance select. */
 566		index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
 567		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
 568
 569		/* Store pull-down on-die termination impedance select. */
 570		index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
 571		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
 572
 573		/* Store pull-up on-die termination impedance select. */
 574		index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
 575		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
 576
 577		/*
 578		 * The 1st 8 words of memory might get corrupted in the process
 579		 * of DDR PHY recalibration; it is saved here in securam and it
 580		 * will be restored later, after recalibration, by bootloader
 581		 */
 582		for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
 583			soc_pm.bu->ddr_phy_calibration[i] =
 584				*((unsigned int *)soc_pm.memcs + (i - 1));
 585	}
 586
 587	flush_cache_all();
 588	outer_disable();
 589
 590	at91_suspend_sram_fn(&soc_pm.data);
 591
 592	return 0;
 593}
 594
 595static void at91_pm_switch_ba_to_vbat(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 596{
 597	unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
 598	unsigned int val;
 599
 600	/* Just for safety. */
 601	if (!soc_pm.data.sfrbu)
 602		return;
 603
 604	val = readl(soc_pm.data.sfrbu + offset);
 605
 606	/* Already on VBAT. */
 607	if (!(val & soc_pm.sfrbu_regs.pswbu.state))
 608		return;
 609
 610	val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
 611	val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
 612	writel(val, soc_pm.data.sfrbu + offset);
 613
 614	/* Wait for update. */
 615	val = readl(soc_pm.data.sfrbu + offset);
 616	while (val & soc_pm.sfrbu_regs.pswbu.state)
 617		val = readl(soc_pm.data.sfrbu + offset);
 618}
 619
 620static void at91_pm_suspend(suspend_state_t state)
 621{
 622	if (soc_pm.data.mode == AT91_PM_BACKUP) {
 623		at91_pm_switch_ba_to_vbat();
 624
 625		cpu_suspend(0, at91_suspend_finish);
 626
 627		/* The SRAM is lost between suspend cycles */
 628		at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
 629					     &at91_pm_suspend_in_sram,
 630					     at91_pm_suspend_in_sram_sz);
 631	} else {
 632		at91_suspend_finish(0);
 633	}
 634
 635	outer_resume();
 636}
 637
 638/*
 639 * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
 640 * event sources; and reduces DRAM power.  But otherwise it's identical to
 641 * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
 642 *
 643 * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
 644 * suspend more deeply, the master clock switches to the clk32k and turns off
 645 * the main oscillator
 646 *
 647 * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
 648 */
 649static int at91_pm_enter(suspend_state_t state)
 650{
 651	int ret;
 652
 653	ret = at91_pm_config_quirks(true);
 654	if (ret)
 655		return ret;
 656
 657	switch (state) {
 658	case PM_SUSPEND_MEM:
 659	case PM_SUSPEND_STANDBY:
 660		/*
 661		 * Ensure that clocks are in a valid state.
 662		 */
 663		if (soc_pm.data.mode >= AT91_PM_ULP0 &&
 664		    !at91_pm_verify_clocks())
 665			goto error;
 666
 667		at91_pm_suspend(state);
 668
 669		break;
 670
 671	case PM_SUSPEND_ON:
 672		cpu_do_idle();
 673		break;
 674
 675	default:
 676		pr_debug("AT91: PM - bogus suspend state %d\n", state);
 677		goto error;
 678	}
 679
 680error:
 681	at91_pm_config_quirks(false);
 682	return 0;
 683}
 684
 685/*
 686 * Called right prior to thawing processes.
 687 */
 688static void at91_pm_end(void)
 689{
 690	at91_pm_config_ws(soc_pm.data.mode, false);
 691}
 692
 693
 694static const struct platform_suspend_ops at91_pm_ops = {
 695	.valid	= at91_pm_valid_state,
 696	.begin	= at91_pm_begin,
 697	.enter	= at91_pm_enter,
 698	.end	= at91_pm_end,
 699};
 700
 701static struct platform_device at91_cpuidle_device = {
 702	.name = "cpuidle-at91",
 703};
 704
 705/*
 706 * The AT91RM9200 goes into self-refresh mode with this command, and will
 707 * terminate self-refresh automatically on the next SDRAM access.
 708 *
 709 * Self-refresh mode is exited as soon as a memory access is made, but we don't
 710 * know for sure when that happens. However, we need to restore the low-power
 711 * mode if it was enabled before going idle. Restoring low-power mode while
 712 * still in self-refresh is "not recommended", but seems to work.
 713 */
 714static void at91rm9200_standby(void)
 715{
 716	asm volatile(
 717		"b    1f\n\t"
 718		".align    5\n\t"
 719		"1:  mcr    p15, 0, %0, c7, c10, 4\n\t"
 720		"    str    %2, [%1, %3]\n\t"
 721		"    mcr    p15, 0, %0, c7, c0, 4\n\t"
 722		:
 723		: "r" (0), "r" (soc_pm.data.ramc[0]),
 724		  "r" (1), "r" (AT91_MC_SDRAMC_SRR));
 725}
 726
 727/* We manage both DDRAM/SDRAM controllers, we need more than one value to
 728 * remember.
 729 */
 730static void at91_ddr_standby(void)
 731{
 732	/* Those two values allow us to delay self-refresh activation
 733	 * to the maximum. */
 734	u32 lpr0, lpr1 = 0;
 735	u32 mdr, saved_mdr0, saved_mdr1 = 0;
 736	u32 saved_lpr0, saved_lpr1 = 0;
 737
 738	/* LPDDR1 --> force DDR2 mode during self-refresh */
 739	saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
 740	if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
 741		mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
 742		mdr |= AT91_DDRSDRC_MD_DDR2;
 743		at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
 744	}
 745
 746	if (soc_pm.data.ramc[1]) {
 747		saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
 748		lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
 749		lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
 750		saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
 751		if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
 752			mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
 753			mdr |= AT91_DDRSDRC_MD_DDR2;
 754			at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
 755		}
 756	}
 757
 758	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
 759	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
 760	lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
 761
 762	/* self-refresh mode now */
 763	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
 764	if (soc_pm.data.ramc[1])
 765		at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
 766
 767	cpu_do_idle();
 768
 769	at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
 770	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
 771	if (soc_pm.data.ramc[1]) {
 772		at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
 773		at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
 774	}
 775}
 776
 777static void sama5d3_ddr_standby(void)
 778{
 779	u32 lpr0;
 780	u32 saved_lpr0;
 781
 782	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
 783	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
 784	lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
 785
 786	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
 787
 788	cpu_do_idle();
 789
 790	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
 791}
 792
 793/* We manage both DDRAM/SDRAM controllers, we need more than one value to
 794 * remember.
 795 */
 796static void at91sam9_sdram_standby(void)
 797{
 798	u32 lpr0, lpr1 = 0;
 799	u32 saved_lpr0, saved_lpr1 = 0;
 800
 801	if (soc_pm.data.ramc[1]) {
 802		saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
 803		lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
 804		lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
 805	}
 806
 807	saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
 808	lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
 809	lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
 810
 811	/* self-refresh mode now */
 812	at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
 813	if (soc_pm.data.ramc[1])
 814		at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
 815
 816	cpu_do_idle();
 817
 818	at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
 819	if (soc_pm.data.ramc[1])
 820		at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
 821}
 822
 823static void sama7g5_standby(void)
 824{
 825	int pwrtmg, ratio;
 826
 827	pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
 828	ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
 829
 830	/*
 831	 * Place RAM into self-refresh after a maximum idle clocks. The maximum
 832	 * idle clocks is configured by bootloader in
 833	 * UDDRC_PWRMGT.SELFREF_TO_X32.
 834	 */
 835	writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
 836	       soc_pm.data.ramc[0] + UDDRC_PWRCTL);
 837	/* Divide CPU clock by 16. */
 838	writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
 839
 840	cpu_do_idle();
 841
 842	/* Restore previous configuration. */
 843	writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO);
 844	writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL);
 845}
 846
 847struct ramc_info {
 848	void (*idle)(void);
 849	unsigned int memctrl;
 850};
 851
 852static const struct ramc_info ramc_infos[] __initconst = {
 853	{ .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
 854	{ .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
 855	{ .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
 856	{ .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
 857	{ .idle = sama7g5_standby, },
 858};
 859
 860static const struct of_device_id ramc_ids[] __initconst = {
 861	{ .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
 862	{ .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
 863	{ .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
 864	{ .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
 865	{ .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], },
 866	{ /*sentinel*/ }
 867};
 868
 869static const struct of_device_id ramc_phy_ids[] __initconst = {
 870	{ .compatible = "microchip,sama7g5-ddr3phy", },
 871	{ /* Sentinel. */ },
 872};
 873
 874static __init int at91_dt_ramc(bool phy_mandatory)
 875{
 876	struct device_node *np;
 877	const struct of_device_id *of_id;
 878	int idx = 0;
 879	void *standby = NULL;
 880	const struct ramc_info *ramc;
 881	int ret;
 882
 883	for_each_matching_node_and_match(np, ramc_ids, &of_id) {
 884		soc_pm.data.ramc[idx] = of_iomap(np, 0);
 885		if (!soc_pm.data.ramc[idx]) {
 886			pr_err("unable to map ramc[%d] cpu registers\n", idx);
 887			ret = -ENOMEM;
 888			of_node_put(np);
 889			goto unmap_ramc;
 890		}
 891
 892		ramc = of_id->data;
 893		if (ramc) {
 894			if (!standby)
 895				standby = ramc->idle;
 896			soc_pm.data.memctrl = ramc->memctrl;
 897		}
 898
 899		idx++;
 900	}
 901
 902	if (!idx) {
 903		pr_err("unable to find compatible ram controller node in dtb\n");
 904		ret = -ENODEV;
 905		goto unmap_ramc;
 906	}
 907
 908	/* Lookup for DDR PHY node, if any. */
 909	for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
 910		soc_pm.data.ramc_phy = of_iomap(np, 0);
 911		if (!soc_pm.data.ramc_phy) {
 912			pr_err("unable to map ramc phy cpu registers\n");
 913			ret = -ENOMEM;
 914			of_node_put(np);
 915			goto unmap_ramc;
 916		}
 917	}
 918
 919	if (phy_mandatory && !soc_pm.data.ramc_phy) {
 920		pr_err("DDR PHY is mandatory!\n");
 921		ret = -ENODEV;
 922		goto unmap_ramc;
 923	}
 924
 925	if (!standby) {
 926		pr_warn("ramc no standby function available\n");
 927		return 0;
 928	}
 929
 930	at91_cpuidle_device.dev.platform_data = standby;
 931
 932	return 0;
 933
 934unmap_ramc:
 935	while (idx)
 936		iounmap(soc_pm.data.ramc[--idx]);
 937
 938	return ret;
 939}
 940
 941static void at91rm9200_idle(void)
 942{
 943	/*
 944	 * Disable the processor clock.  The processor will be automatically
 945	 * re-enabled by an interrupt or by a reset.
 946	 */
 947	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
 948}
 949
 950static void at91sam9_idle(void)
 951{
 952	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
 953	cpu_do_idle();
 954}
 955
 956static void __init at91_pm_sram_init(void)
 957{
 958	struct gen_pool *sram_pool;
 959	phys_addr_t sram_pbase;
 960	unsigned long sram_base;
 961	struct device_node *node;
 962	struct platform_device *pdev = NULL;
 963
 964	for_each_compatible_node(node, NULL, "mmio-sram") {
 965		pdev = of_find_device_by_node(node);
 966		if (pdev) {
 967			of_node_put(node);
 968			break;
 969		}
 970	}
 971
 972	if (!pdev) {
 973		pr_warn("%s: failed to find sram device!\n", __func__);
 974		return;
 975	}
 976
 977	sram_pool = gen_pool_get(&pdev->dev, NULL);
 978	if (!sram_pool) {
 979		pr_warn("%s: sram pool unavailable!\n", __func__);
 980		goto out_put_device;
 981	}
 982
 983	sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
 984	if (!sram_base) {
 985		pr_warn("%s: unable to alloc sram!\n", __func__);
 986		goto out_put_device;
 987	}
 988
 989	sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
 990	at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
 991					at91_pm_suspend_in_sram_sz, false);
 992	if (!at91_suspend_sram_fn) {
 993		pr_warn("SRAM: Could not map\n");
 994		goto out_put_device;
 995	}
 996
 997	/* Copy the pm suspend handler to SRAM */
 998	at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
 999			&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
1000	return;
1001
1002out_put_device:
1003	put_device(&pdev->dev);
1004	return;
1005}
1006
1007static bool __init at91_is_pm_mode_active(int pm_mode)
1008{
1009	return (soc_pm.data.standby_mode == pm_mode ||
1010		soc_pm.data.suspend_mode == pm_mode);
1011}
1012
1013static int __init at91_pm_backup_scan_memcs(unsigned long node,
1014					    const char *uname, int depth,
1015					    void *data)
1016{
1017	const char *type;
1018	const __be32 *reg;
1019	int *located = data;
1020	int size;
1021
1022	/* Memory node already located. */
1023	if (*located)
1024		return 0;
1025
1026	type = of_get_flat_dt_prop(node, "device_type", NULL);
1027
1028	/* We are scanning "memory" nodes only. */
1029	if (!type || strcmp(type, "memory"))
1030		return 0;
1031
1032	reg = of_get_flat_dt_prop(node, "reg", &size);
1033	if (reg) {
1034		soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg));
1035		*located = 1;
1036	}
1037
1038	return 0;
1039}
1040
1041static int __init at91_pm_backup_init(void)
1042{
1043	struct gen_pool *sram_pool;
1044	struct device_node *np;
1045	struct platform_device *pdev;
1046	int ret = -ENODEV, located = 0;
1047
1048	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
1049	    !IS_ENABLED(CONFIG_SOC_SAMA7G5))
1050		return -EPERM;
1051
1052	if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
1053		return 0;
1054
1055	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
1056	if (!np)
1057		return ret;
1058
1059	pdev = of_find_device_by_node(np);
1060	of_node_put(np);
1061	if (!pdev) {
1062		pr_warn("%s: failed to find securam device!\n", __func__);
1063		return ret;
1064	}
1065
1066	sram_pool = gen_pool_get(&pdev->dev, NULL);
1067	if (!sram_pool) {
1068		pr_warn("%s: securam pool unavailable!\n", __func__);
1069		goto securam_fail;
1070	}
1071
1072	soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
1073	if (!soc_pm.bu) {
1074		pr_warn("%s: unable to alloc securam!\n", __func__);
1075		ret = -ENOMEM;
1076		goto securam_fail;
1077	}
1078
1079	soc_pm.bu->suspended = 0;
1080	soc_pm.bu->canary = __pa_symbol(&canary);
1081	soc_pm.bu->resume = __pa_symbol(cpu_resume);
1082	if (soc_pm.data.ramc_phy) {
1083		of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
1084		if (!located)
1085			goto securam_fail;
1086	}
1087
1088	return 0;
1089
1090securam_fail:
1091	put_device(&pdev->dev);
1092	return ret;
1093}
1094
1095static void __init at91_pm_secure_init(void)
1096{
1097	int suspend_mode;
1098	struct arm_smccc_res res;
1099
1100	suspend_mode = soc_pm.data.suspend_mode;
1101
1102	res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE,
1103			     suspend_mode, 0);
1104	if (res.a0 == 0) {
1105		pr_info("AT91: Secure PM: suspend mode set to %s\n",
1106			pm_modes[suspend_mode].pattern);
 
1107		return;
1108	}
1109
1110	pr_warn("AT91: Secure PM: %s mode not supported !\n",
1111		pm_modes[suspend_mode].pattern);
1112
1113	res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0);
1114	if (res.a0 == 0) {
1115		pr_warn("AT91: Secure PM: failed to get default mode\n");
 
1116		return;
1117	}
1118
1119	pr_info("AT91: Secure PM: using default suspend mode %s\n",
1120		pm_modes[suspend_mode].pattern);
1121
1122	soc_pm.data.suspend_mode = res.a1;
 
1123}
1124static const struct of_device_id atmel_shdwc_ids[] = {
1125	{ .compatible = "atmel,sama5d2-shdwc" },
1126	{ .compatible = "microchip,sam9x60-shdwc" },
1127	{ .compatible = "microchip,sama7g5-shdwc" },
1128	{ /* sentinel. */ }
1129};
1130
1131static const struct of_device_id gmac_ids[] __initconst = {
1132	{ .compatible = "atmel,sama5d3-gem" },
1133	{ .compatible = "atmel,sama5d2-gem" },
1134	{ .compatible = "atmel,sama5d29-gem" },
1135	{ .compatible = "microchip,sama7g5-gem" },
1136	{ },
1137};
1138
1139static const struct of_device_id emac_ids[] __initconst = {
1140	{ .compatible = "atmel,sama5d3-macb" },
1141	{ .compatible = "microchip,sama7g5-emac" },
1142	{ },
1143};
1144
1145/*
1146 * Replaces _mode_to_replace with a supported mode that doesn't depend
1147 * on controller pointed by _map_bitmask
1148 * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91
1149 * PM mode
1150 * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on
1151 * controller represented by _map_bitmask, _mode_to_replace needs to be
1152 * updated
1153 * @_mode_to_replace: standby_mode or suspend_mode that need to be
1154 * updated
1155 * @_mode_to_check: standby_mode or suspend_mode; this is needed here
1156 * to avoid having standby_mode and suspend_mode set with the same AT91
1157 * PM mode
1158 */
1159#define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace,	\
1160			     _mode_to_check)				\
1161	do {								\
1162		if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) {	\
1163			int _mode_to_use, _mode_complementary;		\
1164			/* Use ULP0 if it doesn't need _map_bitmask. */	\
1165			if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
1166				_mode_to_use = AT91_PM_ULP0;		\
1167				_mode_complementary = AT91_PM_STANDBY;	\
1168			} else {					\
1169				_mode_to_use = AT91_PM_STANDBY;		\
1170				_mode_complementary = AT91_PM_STANDBY;	\
1171			}						\
1172									\
1173			if ((_mode_to_check) != _mode_to_use)		\
1174				(_mode_to_replace) = _mode_to_use;	\
1175			else						\
1176				(_mode_to_replace) = _mode_complementary;\
1177		}							\
1178	} while (0)
1179
1180/*
1181 * Replaces standby and suspend modes with default supported modes:
1182 * ULP0 and STANDBY.
1183 * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP()
1184 * flags
1185 * @_map: controller specific name; standby and suspend mode need to be
1186 * replaced in order to not depend on this controller
1187 */
1188#define AT91_PM_REPLACE_MODES(_maps, _map)				\
1189	do {								\
1190		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1191				     (soc_pm.data.standby_mode),	\
1192				     (soc_pm.data.suspend_mode));	\
1193		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1194				     (soc_pm.data.suspend_mode),	\
1195				     (soc_pm.data.standby_mode));	\
1196	} while (0)
1197
1198static int __init at91_pm_get_eth_clks(struct device_node *np,
1199				       struct clk_bulk_data *clks)
1200{
1201	clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk");
1202	if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk))
1203		return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk);
1204
1205	clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk");
1206	if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk))
1207		return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk);
1208
1209	return 0;
1210}
1211
1212static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks)
1213{
1214	return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) ||
1215	       IS_ERR(clks[AT91_PM_ETH_HCLK].clk);
1216}
1217
1218static void __init at91_pm_modes_init(const u32 *maps, int len)
1219{
1220	struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH];
1221	struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH];
1222	struct device_node *np;
1223	int ret;
1224
1225	ret = at91_pm_backup_init();
1226	if (ret) {
1227		if (soc_pm.data.standby_mode == AT91_PM_BACKUP)
1228			soc_pm.data.standby_mode = AT91_PM_ULP0;
1229		if (soc_pm.data.suspend_mode == AT91_PM_BACKUP)
1230			soc_pm.data.suspend_mode = AT91_PM_ULP0;
1231	}
1232
1233	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1234	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) {
1235		np = of_find_matching_node(NULL, atmel_shdwc_ids);
1236		if (!np) {
1237			pr_warn("%s: failed to find shdwc!\n", __func__);
1238			AT91_PM_REPLACE_MODES(maps, SHDWC);
1239		} else {
1240			soc_pm.data.shdwc = of_iomap(np, 0);
1241			of_node_put(np);
1242		}
1243	}
1244
1245	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1246	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) {
1247		np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
1248		if (!np) {
1249			pr_warn("%s: failed to find sfrbu!\n", __func__);
1250			AT91_PM_REPLACE_MODES(maps, SFRBU);
1251		} else {
1252			soc_pm.data.sfrbu = of_iomap(np, 0);
1253			of_node_put(np);
1254		}
1255	}
1256
1257	if ((at91_is_pm_mode_active(AT91_PM_ULP1) ||
1258	     at91_is_pm_mode_active(AT91_PM_ULP0) ||
1259	     at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) &&
1260	    (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) ||
1261	     maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) {
1262		np = of_find_matching_node(NULL, gmac_ids);
1263		if (!np) {
1264			np = of_find_matching_node(NULL, emac_ids);
1265			if (np)
1266				goto get_emac_clks;
1267			AT91_PM_REPLACE_MODES(maps, ETHC);
1268			goto unmap_unused_nodes;
1269		} else {
1270			gmac->np = np;
1271			at91_pm_get_eth_clks(np, gmac->clks);
1272		}
1273
1274		np = of_find_matching_node(NULL, emac_ids);
1275		if (!np) {
1276			if (at91_pm_eth_clks_empty(gmac->clks))
1277				AT91_PM_REPLACE_MODES(maps, ETHC);
1278		} else {
1279get_emac_clks:
1280			emac->np = np;
1281			ret = at91_pm_get_eth_clks(np, emac->clks);
1282			if (ret && at91_pm_eth_clks_empty(gmac->clks)) {
1283				of_node_put(gmac->np);
1284				of_node_put(emac->np);
1285				gmac->np = NULL;
1286				emac->np = NULL;
1287			}
1288		}
1289	}
1290
1291unmap_unused_nodes:
1292	/* Unmap all unnecessary. */
1293	if (soc_pm.data.shdwc &&
1294	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1295	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) {
1296		iounmap(soc_pm.data.shdwc);
1297		soc_pm.data.shdwc = NULL;
1298	}
1299
1300	if (soc_pm.data.sfrbu &&
1301	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1302	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) {
1303		iounmap(soc_pm.data.sfrbu);
1304		soc_pm.data.sfrbu = NULL;
1305	}
1306
1307	return;
1308}
1309
1310struct pmc_info {
1311	unsigned long uhp_udp_mask;
1312	unsigned long mckr;
1313	unsigned long version;
1314};
1315
1316static const struct pmc_info pmc_infos[] __initconst = {
1317	{
1318		.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
1319		.mckr = 0x30,
1320		.version = AT91_PMC_V1,
1321	},
1322
1323	{
1324		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1325		.mckr = 0x30,
1326		.version = AT91_PMC_V1,
1327	},
1328	{
1329		.uhp_udp_mask = AT91SAM926x_PMC_UHP,
1330		.mckr = 0x30,
1331		.version = AT91_PMC_V1,
1332	},
1333	{	.uhp_udp_mask = 0,
1334		.mckr = 0x30,
1335		.version = AT91_PMC_V1,
1336	},
1337	{
1338		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1339		.mckr = 0x28,
1340		.version = AT91_PMC_V2,
1341	},
1342	{
1343		.mckr = 0x28,
1344		.version = AT91_PMC_V2,
1345	},
1346
1347};
1348
1349static const struct of_device_id atmel_pmc_ids[] __initconst = {
1350	{ .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
1351	{ .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
1352	{ .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] },
1353	{ .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] },
1354	{ .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
1355	{ .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
1356	{ .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] },
1357	{ .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
1358	{ .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
1359	{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
1360	{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
1361	{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
 
1362	{ .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
1363	{ /* sentinel */ },
1364};
1365
1366static void __init at91_pm_modes_validate(const int *modes, int len)
1367{
1368	u8 i, standby = 0, suspend = 0;
1369	int mode;
1370
1371	for (i = 0; i < len; i++) {
1372		if (standby && suspend)
1373			break;
1374
1375		if (modes[i] == soc_pm.data.standby_mode && !standby) {
1376			standby = 1;
1377			continue;
1378		}
1379
1380		if (modes[i] == soc_pm.data.suspend_mode && !suspend) {
1381			suspend = 1;
1382			continue;
1383		}
1384	}
1385
1386	if (!standby) {
1387		if (soc_pm.data.suspend_mode == AT91_PM_STANDBY)
1388			mode = AT91_PM_ULP0;
1389		else
1390			mode = AT91_PM_STANDBY;
1391
1392		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1393			pm_modes[soc_pm.data.standby_mode].pattern,
1394			pm_modes[mode].pattern);
1395		soc_pm.data.standby_mode = mode;
1396	}
1397
1398	if (!suspend) {
1399		if (soc_pm.data.standby_mode == AT91_PM_ULP0)
1400			mode = AT91_PM_STANDBY;
1401		else
1402			mode = AT91_PM_ULP0;
1403
1404		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1405			pm_modes[soc_pm.data.suspend_mode].pattern,
1406			pm_modes[mode].pattern);
1407		soc_pm.data.suspend_mode = mode;
1408	}
1409}
1410
1411static void __init at91_pm_init(void (*pm_idle)(void))
1412{
1413	struct device_node *pmc_np;
1414	const struct of_device_id *of_id;
1415	const struct pmc_info *pmc;
1416
1417	if (at91_cpuidle_device.dev.platform_data)
1418		platform_device_register(&at91_cpuidle_device);
1419
1420	pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
1421	soc_pm.data.pmc = of_iomap(pmc_np, 0);
1422	of_node_put(pmc_np);
1423	if (!soc_pm.data.pmc) {
1424		pr_err("AT91: PM not supported, PMC not found\n");
1425		return;
1426	}
1427
1428	pmc = of_id->data;
1429	soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
1430	soc_pm.data.pmc_mckr_offset = pmc->mckr;
1431	soc_pm.data.pmc_version = pmc->version;
1432
1433	if (pm_idle)
1434		arm_pm_idle = pm_idle;
1435
1436	at91_pm_sram_init();
1437
1438	if (at91_suspend_sram_fn) {
1439		suspend_set_ops(&at91_pm_ops);
1440		pr_info("AT91: PM: standby: %s, suspend: %s\n",
1441			pm_modes[soc_pm.data.standby_mode].pattern,
1442			pm_modes[soc_pm.data.suspend_mode].pattern);
1443	} else {
1444		pr_info("AT91: PM not supported, due to no SRAM allocated\n");
1445	}
1446}
1447
1448void __init at91rm9200_pm_init(void)
1449{
1450	int ret;
1451
1452	if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
1453		return;
1454
1455	/*
1456	 * Force STANDBY and ULP0 mode to avoid calling
1457	 * at91_pm_modes_validate() which may increase booting time.
1458	 * Platform supports anyway only STANDBY and ULP0 modes.
1459	 */
1460	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1461	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1462
1463	ret = at91_dt_ramc(false);
1464	if (ret)
1465		return;
1466
1467	/*
1468	 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
1469	 */
1470	at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
1471
1472	at91_pm_init(at91rm9200_idle);
1473}
1474
1475void __init sam9x60_pm_init(void)
1476{
1477	static const int modes[] __initconst = {
1478		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1479	};
1480	static const int iomaps[] __initconst = {
1481		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC),
1482	};
1483	int ret;
1484
1485	if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
1486		return;
1487
1488	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1489	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1490	ret = at91_dt_ramc(false);
1491	if (ret)
1492		return;
1493
1494	at91_pm_init(NULL);
1495
1496	soc_pm.ws_ids = sam9x60_ws_ids;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1497	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1498}
1499
1500void __init at91sam9_pm_init(void)
1501{
1502	int ret;
1503
1504	if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
1505		return;
1506
1507	/*
1508	 * Force STANDBY and ULP0 mode to avoid calling
1509	 * at91_pm_modes_validate() which may increase booting time.
1510	 * Platform supports anyway only STANDBY and ULP0 modes.
1511	 */
1512	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1513	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1514
1515	ret = at91_dt_ramc(false);
1516	if (ret)
1517		return;
1518
1519	at91_pm_init(at91sam9_idle);
1520}
1521
1522void __init sama5_pm_init(void)
1523{
1524	static const int modes[] __initconst = {
1525		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
1526	};
1527	static const u32 iomaps[] __initconst = {
1528		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1529		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1530	};
1531	int ret;
1532
1533	if (!IS_ENABLED(CONFIG_SOC_SAMA5))
1534		return;
1535
1536	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1537	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1538	ret = at91_dt_ramc(false);
1539	if (ret)
1540		return;
1541
1542	at91_pm_init(NULL);
1543
1544	/* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
1545	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1546						 BIT(AT91_PM_ULP0_FAST) |
1547						 BIT(AT91_PM_ULP1);
1548	/* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
1549	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1550						     BIT(AT91_PM_ULP0_FAST);
1551}
1552
1553void __init sama5d2_pm_init(void)
1554{
1555	static const int modes[] __initconst = {
1556		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1557		AT91_PM_BACKUP,
1558	};
1559	static const u32 iomaps[] __initconst = {
1560		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1561		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1562		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC) |
1563					  AT91_PM_IOMAP(ETHC),
1564		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SHDWC) |
1565					  AT91_PM_IOMAP(SFRBU),
1566	};
1567	int ret;
1568
1569	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
1570		return;
1571
1572	if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) {
1573		pr_warn("AT91: Secure PM: ignoring standby mode\n");
1574		at91_pm_secure_init();
1575		return;
1576	}
1577
1578	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1579	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1580	ret = at91_dt_ramc(false);
1581	if (ret)
1582		return;
1583
1584	at91_pm_init(NULL);
1585
1586	soc_pm.ws_ids = sama5d2_ws_ids;
1587	soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
1588	soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
1589
1590	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1591	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1592	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1593	soc_pm.sfrbu_regs.pswbu.state = BIT(3);
1594
1595	/* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
1596	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1597						 BIT(AT91_PM_ULP0_FAST) |
1598						 BIT(AT91_PM_ULP1);
1599	/*
1600	 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup
1601	 * source.
1602	 */
1603	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1604						     BIT(AT91_PM_ULP0_FAST);
1605}
1606
1607void __init sama7_pm_init(void)
1608{
1609	static const int modes[] __initconst = {
1610		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP,
1611	};
1612	static const u32 iomaps[] __initconst = {
1613		[AT91_PM_ULP0]		= AT91_PM_IOMAP(SFRBU),
1614		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SFRBU) |
1615					  AT91_PM_IOMAP(SHDWC) |
1616					  AT91_PM_IOMAP(ETHC),
1617		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SFRBU) |
1618					  AT91_PM_IOMAP(SHDWC),
1619	};
1620	int ret;
1621
1622	if (!IS_ENABLED(CONFIG_SOC_SAMA7))
1623		return;
1624
1625	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1626
1627	ret = at91_dt_ramc(true);
1628	if (ret)
1629		return;
1630
1631	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1632	at91_pm_init(NULL);
1633
1634	soc_pm.ws_ids = sama7g5_ws_ids;
1635	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1636
1637	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1638	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1639	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1640	soc_pm.sfrbu_regs.pswbu.state = BIT(2);
1641
1642	/* Quirks applies to ULP1 for both Ethernet interfaces. */
1643	soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1);
1644	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1);
1645}
1646
1647static int __init at91_pm_modes_select(char *str)
1648{
1649	char *s;
1650	substring_t args[MAX_OPT_ARGS];
1651	int standby, suspend;
1652
1653	if (!str)
1654		return 0;
1655
1656	s = strsep(&str, ",");
1657	standby = match_token(s, pm_modes, args);
1658	if (standby < 0)
1659		return 0;
1660
1661	suspend = match_token(str, pm_modes, args);
1662	if (suspend < 0)
1663		return 0;
1664
1665	soc_pm.data.standby_mode = standby;
1666	soc_pm.data.suspend_mode = suspend;
1667
1668	return 0;
1669}
1670early_param("atmel.pm_modes", at91_pm_modes_select);