Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * EMIF driver
   4 *
   5 * Copyright (C) 2012 Texas Instruments, Inc.
   6 *
   7 * Aneesh V <aneesh@ti.com>
   8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 */
  10#include <linux/err.h>
  11#include <linux/kernel.h>
  12#include <linux/reboot.h>
  13#include <linux/platform_data/emif_plat.h>
  14#include <linux/io.h>
  15#include <linux/device.h>
  16#include <linux/platform_device.h>
  17#include <linux/interrupt.h>
  18#include <linux/slab.h>
  19#include <linux/of.h>
  20#include <linux/debugfs.h>
  21#include <linux/seq_file.h>
  22#include <linux/module.h>
  23#include <linux/list.h>
  24#include <linux/spinlock.h>
  25#include <linux/pm.h>
  26
  27#include "emif.h"
  28#include "jedec_ddr.h"
  29#include "of_memory.h"
  30
  31/**
  32 * struct emif_data - Per device static data for driver's use
  33 * @duplicate:			Whether the DDR devices attached to this EMIF
  34 *				instance are exactly same as that on EMIF1. In
  35 *				this case we can save some memory and processing
  36 * @temperature_level:		Maximum temperature of LPDDR2 devices attached
  37 *				to this EMIF - read from MR4 register. If there
  38 *				are two devices attached to this EMIF, this
  39 *				value is the maximum of the two temperature
  40 *				levels.
  41 * @node:			node in the device list
  42 * @base:			base address of memory-mapped IO registers.
  43 * @dev:			device pointer.
 
  44 * @regs_cache:			An array of 'struct emif_regs' that stores
  45 *				calculated register values for different
  46 *				frequencies, to avoid re-calculating them on
  47 *				each DVFS transition.
  48 * @curr_regs:			The set of register values used in the last
  49 *				frequency change (i.e. corresponding to the
  50 *				frequency in effect at the moment)
  51 * @plat_data:			Pointer to saved platform data.
  52 * @debugfs_root:		dentry to the root folder for EMIF in debugfs
  53 * @np_ddr:			Pointer to ddr device tree node
  54 */
  55struct emif_data {
  56	u8				duplicate;
  57	u8				temperature_level;
  58	u8				lpmode;
  59	struct list_head		node;
  60	unsigned long			irq_state;
  61	void __iomem			*base;
  62	struct device			*dev;
 
  63	struct emif_regs		*regs_cache[EMIF_MAX_NUM_FREQUENCIES];
  64	struct emif_regs		*curr_regs;
  65	struct emif_platform_data	*plat_data;
  66	struct dentry			*debugfs_root;
  67	struct device_node		*np_ddr;
  68};
  69
  70static struct emif_data *emif1;
  71static DEFINE_SPINLOCK(emif_lock);
  72static unsigned long	irq_state;
 
  73static LIST_HEAD(device_list);
  74
  75#ifdef CONFIG_DEBUG_FS
  76static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
  77	struct emif_regs *regs)
  78{
  79	u32 type = emif->plat_data->device_info->type;
  80	u32 ip_rev = emif->plat_data->ip_rev;
  81
  82	seq_printf(s, "EMIF register cache dump for %dMHz\n",
  83		regs->freq/1000000);
  84
  85	seq_printf(s, "ref_ctrl_shdw\t: 0x%08x\n", regs->ref_ctrl_shdw);
  86	seq_printf(s, "sdram_tim1_shdw\t: 0x%08x\n", regs->sdram_tim1_shdw);
  87	seq_printf(s, "sdram_tim2_shdw\t: 0x%08x\n", regs->sdram_tim2_shdw);
  88	seq_printf(s, "sdram_tim3_shdw\t: 0x%08x\n", regs->sdram_tim3_shdw);
  89
  90	if (ip_rev == EMIF_4D) {
  91		seq_printf(s, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
  92			regs->read_idle_ctrl_shdw_normal);
  93		seq_printf(s, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
  94			regs->read_idle_ctrl_shdw_volt_ramp);
  95	} else if (ip_rev == EMIF_4D5) {
  96		seq_printf(s, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
  97			regs->dll_calib_ctrl_shdw_normal);
  98		seq_printf(s, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
  99			regs->dll_calib_ctrl_shdw_volt_ramp);
 100	}
 101
 102	if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
 103		seq_printf(s, "ref_ctrl_shdw_derated\t: 0x%08x\n",
 104			regs->ref_ctrl_shdw_derated);
 105		seq_printf(s, "sdram_tim1_shdw_derated\t: 0x%08x\n",
 106			regs->sdram_tim1_shdw_derated);
 107		seq_printf(s, "sdram_tim3_shdw_derated\t: 0x%08x\n",
 108			regs->sdram_tim3_shdw_derated);
 109	}
 110}
 111
 112static int emif_regdump_show(struct seq_file *s, void *unused)
 113{
 114	struct emif_data	*emif	= s->private;
 115	struct emif_regs	**regs_cache;
 116	int			i;
 117
 118	if (emif->duplicate)
 119		regs_cache = emif1->regs_cache;
 120	else
 121		regs_cache = emif->regs_cache;
 122
 123	for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
 124		do_emif_regdump_show(s, emif, regs_cache[i]);
 125		seq_putc(s, '\n');
 126	}
 127
 128	return 0;
 129}
 130
 131DEFINE_SHOW_ATTRIBUTE(emif_regdump);
 
 
 
 
 
 
 
 
 
 132
 133static int emif_mr4_show(struct seq_file *s, void *unused)
 134{
 135	struct emif_data *emif = s->private;
 136
 137	seq_printf(s, "MR4=%d\n", emif->temperature_level);
 138	return 0;
 139}
 140
 141DEFINE_SHOW_ATTRIBUTE(emif_mr4);
 
 
 
 
 
 
 
 
 
 142
 143static int __init_or_module emif_debugfs_init(struct emif_data *emif)
 144{
 145	emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
 146	debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
 147			    &emif_regdump_fops);
 148	debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
 149			    &emif_mr4_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 150	return 0;
 
 
 
 
 151}
 152
 153static void __exit emif_debugfs_exit(struct emif_data *emif)
 154{
 155	debugfs_remove_recursive(emif->debugfs_root);
 156	emif->debugfs_root = NULL;
 157}
 158#else
 159static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
 160{
 161	return 0;
 162}
 163
 164static inline void __exit emif_debugfs_exit(struct emif_data *emif)
 165{
 166}
 167#endif
 168
 169/*
 
 
 
 
 
 
 
 
 
 170 * Get bus width used by EMIF. Note that this may be different from the
 171 * bus width of the DDR devices used. For instance two 16-bit DDR devices
 172 * may be connected to a given CS of EMIF. In this case bus width as far
 173 * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
 174 */
 175static u32 get_emif_bus_width(struct emif_data *emif)
 176{
 177	u32		width;
 178	void __iomem	*base = emif->base;
 179
 180	width = (readl(base + EMIF_SDRAM_CONFIG) & NARROW_MODE_MASK)
 181			>> NARROW_MODE_SHIFT;
 182	width = width == 0 ? 32 : 16;
 183
 184	return width;
 185}
 186
 
 
 
 
 
 
 
 
 
 
 
 
 
 187static void set_lpmode(struct emif_data *emif, u8 lpmode)
 188{
 189	u32 temp;
 190	void __iomem *base = emif->base;
 191
 192	/*
 193	 * Workaround for errata i743 - LPDDR2 Power-Down State is Not
 194	 * Efficient
 195	 *
 196	 * i743 DESCRIPTION:
 197	 * The EMIF supports power-down state for low power. The EMIF
 198	 * automatically puts the SDRAM into power-down after the memory is
 199	 * not accessed for a defined number of cycles and the
 200	 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4.
 201	 * As the EMIF supports automatic output impedance calibration, a ZQ
 202	 * calibration long command is issued every time it exits active
 203	 * power-down and precharge power-down modes. The EMIF waits and
 204	 * blocks any other command during this calibration.
 205	 * The EMIF does not allow selective disabling of ZQ calibration upon
 206	 * exit of power-down mode. Due to very short periods of power-down
 207	 * cycles, ZQ calibration overhead creates bandwidth issues and
 208	 * increases overall system power consumption. On the other hand,
 209	 * issuing ZQ calibration long commands when exiting self-refresh is
 210	 * still required.
 211	 *
 212	 * WORKAROUND
 213	 * Because there is no power consumption benefit of the power-down due
 214	 * to the calibration and there is a performance risk, the guideline
 215	 * is to not allow power-down state and, therefore, to not have set
 216	 * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
 217	 */
 218	if ((emif->plat_data->ip_rev == EMIF_4D) &&
 219	    (lpmode == EMIF_LP_MODE_PWR_DN)) {
 220		WARN_ONCE(1,
 221			  "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
 222		/* rollback LP_MODE to Self-refresh mode */
 223		lpmode = EMIF_LP_MODE_SELF_REFRESH;
 224	}
 225
 226	temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL);
 227	temp &= ~LP_MODE_MASK;
 228	temp |= (lpmode << LP_MODE_SHIFT);
 229	writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL);
 230}
 231
 232static void do_freq_update(void)
 233{
 234	struct emif_data *emif;
 235
 236	/*
 237	 * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
 238	 *
 239	 * i728 DESCRIPTION:
 240	 * The EMIF automatically puts the SDRAM into self-refresh mode
 241	 * after the EMIF has not performed accesses during
 242	 * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
 243	 * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
 244	 * to 0x2. If during a small window the following three events
 245	 * occur:
 246	 * - The SR_TIMING counter expires
 247	 * - And frequency change is requested
 248	 * - And OCP access is requested
 249	 * Then it causes instable clock on the DDR interface.
 250	 *
 251	 * WORKAROUND
 252	 * To avoid the occurrence of the three events, the workaround
 253	 * is to disable the self-refresh when requesting a frequency
 254	 * change. Before requesting a frequency change the software must
 255	 * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
 256	 * frequency change has been done, the software can reprogram
 257	 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
 258	 */
 259	list_for_each_entry(emif, &device_list, node) {
 260		if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
 261			set_lpmode(emif, EMIF_LP_MODE_DISABLE);
 262	}
 263
 264	/*
 265	 * TODO: Do FREQ_UPDATE here when an API
 266	 * is available for this as part of the new
 267	 * clock framework
 268	 */
 269
 270	list_for_each_entry(emif, &device_list, node) {
 271		if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
 272			set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
 273	}
 274}
 275
 276/* Find addressing table entry based on the device's type and density */
 277static const struct lpddr2_addressing *get_addressing_table(
 278	const struct ddr_device_info *device_info)
 279{
 280	u32		index, type, density;
 281
 282	type = device_info->type;
 283	density = device_info->density;
 284
 285	switch (type) {
 286	case DDR_TYPE_LPDDR2_S4:
 287		index = density - 1;
 288		break;
 289	case DDR_TYPE_LPDDR2_S2:
 290		switch (density) {
 291		case DDR_DENSITY_1Gb:
 292		case DDR_DENSITY_2Gb:
 293			index = density + 3;
 294			break;
 295		default:
 296			index = density - 1;
 297		}
 298		break;
 299	default:
 300		return NULL;
 301	}
 302
 303	return &lpddr2_jedec_addressing_table[index];
 304}
 305
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 306static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing,
 307		bool cs1_used, bool cal_resistors_per_cs)
 308{
 309	u32 zq = 0, val = 0;
 310
 311	val = EMIF_ZQCS_INTERVAL_US * 1000 / addressing->tREFI_ns;
 312	zq |= val << ZQ_REFINTERVAL_SHIFT;
 313
 314	val = DIV_ROUND_UP(T_ZQCL_DEFAULT_NS, T_ZQCS_DEFAULT_NS) - 1;
 315	zq |= val << ZQ_ZQCL_MULT_SHIFT;
 316
 317	val = DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS, T_ZQCL_DEFAULT_NS) - 1;
 318	zq |= val << ZQ_ZQINIT_MULT_SHIFT;
 319
 320	zq |= ZQ_SFEXITEN_ENABLE << ZQ_SFEXITEN_SHIFT;
 321
 322	if (cal_resistors_per_cs)
 323		zq |= ZQ_DUALCALEN_ENABLE << ZQ_DUALCALEN_SHIFT;
 324	else
 325		zq |= ZQ_DUALCALEN_DISABLE << ZQ_DUALCALEN_SHIFT;
 326
 327	zq |= ZQ_CS0EN_MASK; /* CS0 is used for sure */
 328
 329	val = cs1_used ? 1 : 0;
 330	zq |= val << ZQ_CS1EN_SHIFT;
 331
 332	return zq;
 333}
 334
 335static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing,
 336		const struct emif_custom_configs *custom_configs, bool cs1_used,
 337		u32 sdram_io_width, u32 emif_bus_width)
 338{
 339	u32 alert = 0, interval, devcnt;
 340
 341	if (custom_configs && (custom_configs->mask &
 342				EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL))
 343		interval = custom_configs->temp_alert_poll_interval_ms;
 344	else
 345		interval = TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS;
 346
 347	interval *= 1000000;			/* Convert to ns */
 348	interval /= addressing->tREFI_ns;	/* Convert to refresh cycles */
 349	alert |= (interval << TA_REFINTERVAL_SHIFT);
 350
 351	/*
 352	 * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
 353	 * also to this form and subtract to get TA_DEVCNT, which is
 354	 * in log2(x) form.
 355	 */
 356	emif_bus_width = __fls(emif_bus_width) - 1;
 357	devcnt = emif_bus_width - sdram_io_width;
 358	alert |= devcnt << TA_DEVCNT_SHIFT;
 359
 360	/* DEVWDT is in 'log2(x) - 3' form */
 361	alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT;
 362
 363	alert |= 1 << TA_SFEXITEN_SHIFT;
 364	alert |= 1 << TA_CS0EN_SHIFT;
 365	alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT;
 366
 367	return alert;
 368}
 369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 370static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
 371{
 372	u32 pwr_mgmt_ctrl	= 0, timeout;
 373	u32 lpmode		= EMIF_LP_MODE_SELF_REFRESH;
 374	u32 timeout_perf	= EMIF_LP_MODE_TIMEOUT_PERFORMANCE;
 375	u32 timeout_pwr		= EMIF_LP_MODE_TIMEOUT_POWER;
 376	u32 freq_threshold	= EMIF_LP_MODE_FREQ_THRESHOLD;
 377	u32 mask;
 378	u8 shift;
 379
 380	struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs;
 381
 382	if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) {
 383		lpmode		= cust_cfgs->lpmode;
 384		timeout_perf	= cust_cfgs->lpmode_timeout_performance;
 385		timeout_pwr	= cust_cfgs->lpmode_timeout_power;
 386		freq_threshold  = cust_cfgs->lpmode_freq_threshold;
 387	}
 388
 389	/* Timeout based on DDR frequency */
 390	timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
 391
 392	/*
 393	 * The value to be set in register is "log2(timeout) - 3"
 394	 * if timeout < 16 load 0 in register
 395	 * if timeout is not a power of 2, round to next highest power of 2
 396	 */
 397	if (timeout < 16) {
 398		timeout = 0;
 399	} else {
 400		if (timeout & (timeout - 1))
 401			timeout <<= 1;
 402		timeout = __fls(timeout) - 3;
 403	}
 404
 405	switch (lpmode) {
 406	case EMIF_LP_MODE_CLOCK_STOP:
 407		shift = CS_TIM_SHIFT;
 408		mask = CS_TIM_MASK;
 409		break;
 410	case EMIF_LP_MODE_SELF_REFRESH:
 411		/* Workaround for errata i735 */
 412		if (timeout < 6)
 413			timeout = 6;
 414
 415		shift = SR_TIM_SHIFT;
 416		mask = SR_TIM_MASK;
 417		break;
 418	case EMIF_LP_MODE_PWR_DN:
 419		shift = PD_TIM_SHIFT;
 420		mask = PD_TIM_MASK;
 421		break;
 422	case EMIF_LP_MODE_DISABLE:
 423	default:
 424		mask = 0;
 425		shift = 0;
 426		break;
 427	}
 428	/* Round to maximum in case of overflow, BUT warn! */
 429	if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) {
 430		pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
 431		       lpmode,
 432		       timeout_perf,
 433		       timeout_pwr,
 434		       freq_threshold);
 435		WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
 436		     timeout, mask >> shift);
 437		timeout = mask >> shift;
 438	}
 439
 440	/* Setup required timing */
 441	pwr_mgmt_ctrl = (timeout << shift) & mask;
 442	/* setup a default mask for rest of the modes */
 443	pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) &
 444			  ~mask;
 445
 446	/* No CS_TIM in EMIF_4D5 */
 447	if (ip_rev == EMIF_4D5)
 448		pwr_mgmt_ctrl &= ~CS_TIM_MASK;
 449
 450	pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
 451
 452	return pwr_mgmt_ctrl;
 453}
 454
 455/*
 456 * Get the temperature level of the EMIF instance:
 457 * Reads the MR4 register of attached SDRAM parts to find out the temperature
 458 * level. If there are two parts attached(one on each CS), then the temperature
 459 * level for the EMIF instance is the higher of the two temperatures.
 460 */
 461static void get_temperature_level(struct emif_data *emif)
 462{
 463	u32		temp, temperature_level;
 464	void __iomem	*base;
 465
 466	base = emif->base;
 467
 468	/* Read mode register 4 */
 469	writel(DDR_MR4, base + EMIF_LPDDR2_MODE_REG_CONFIG);
 470	temperature_level = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
 471	temperature_level = (temperature_level & MR4_SDRAM_REF_RATE_MASK) >>
 472				MR4_SDRAM_REF_RATE_SHIFT;
 473
 474	if (emif->plat_data->device_info->cs1_used) {
 475		writel(DDR_MR4 | CS_MASK, base + EMIF_LPDDR2_MODE_REG_CONFIG);
 476		temp = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
 477		temp = (temp & MR4_SDRAM_REF_RATE_MASK)
 478				>> MR4_SDRAM_REF_RATE_SHIFT;
 479		temperature_level = max(temp, temperature_level);
 480	}
 481
 482	/* treat everything less than nominal(3) in MR4 as nominal */
 483	if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL))
 484		temperature_level = SDRAM_TEMP_NOMINAL;
 485
 486	/* if we get reserved value in MR4 persist with the existing value */
 487	if (likely(temperature_level != SDRAM_TEMP_RESERVED_4))
 488		emif->temperature_level = temperature_level;
 489}
 490
 491/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 492 * setup_temperature_sensitive_regs() - set the timings for temperature
 493 * sensitive registers. This happens once at initialisation time based
 494 * on the temperature at boot time and subsequently based on the temperature
 495 * alert interrupt. Temperature alert can happen when the temperature
 496 * increases or drops. So this function can have the effect of either
 497 * derating the timings or going back to nominal values.
 498 */
 499static void setup_temperature_sensitive_regs(struct emif_data *emif,
 500		struct emif_regs *regs)
 501{
 502	u32		tim1, tim3, ref_ctrl, type;
 503	void __iomem	*base = emif->base;
 504	u32		temperature;
 505
 506	type = emif->plat_data->device_info->type;
 507
 508	tim1 = regs->sdram_tim1_shdw;
 509	tim3 = regs->sdram_tim3_shdw;
 510	ref_ctrl = regs->ref_ctrl_shdw;
 511
 512	/* No de-rating for non-lpddr2 devices */
 513	if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4)
 514		goto out;
 515
 516	temperature = emif->temperature_level;
 517	if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) {
 518		ref_ctrl = regs->ref_ctrl_shdw_derated;
 519	} else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) {
 520		tim1 = regs->sdram_tim1_shdw_derated;
 521		tim3 = regs->sdram_tim3_shdw_derated;
 522		ref_ctrl = regs->ref_ctrl_shdw_derated;
 523	}
 524
 525out:
 526	writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
 527	writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
 528	writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
 529}
 530
 531static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
 532{
 533	u32		old_temp_level;
 534	irqreturn_t	ret = IRQ_HANDLED;
 535	struct emif_custom_configs *custom_configs;
 536
 537	spin_lock_irqsave(&emif_lock, irq_state);
 538	old_temp_level = emif->temperature_level;
 539	get_temperature_level(emif);
 540
 541	if (unlikely(emif->temperature_level == old_temp_level)) {
 542		goto out;
 543	} else if (!emif->curr_regs) {
 544		dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
 545		goto out;
 546	}
 547
 548	custom_configs = emif->plat_data->custom_configs;
 549
 550	/*
 551	 * IF we detect higher than "nominal rating" from DDR sensor
 552	 * on an unsupported DDR part, shutdown system
 553	 */
 554	if (custom_configs && !(custom_configs->mask &
 555				EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
 556		if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
 557			dev_err(emif->dev,
 558				"%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
 559				__func__, emif->temperature_level);
 560			/*
 561			 * Temperature far too high - do kernel_power_off()
 562			 * from thread context
 563			 */
 564			emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN;
 565			ret = IRQ_WAKE_THREAD;
 566			goto out;
 567		}
 568	}
 569
 570	if (emif->temperature_level < old_temp_level ||
 571		emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
 572		/*
 573		 * Temperature coming down - defer handling to thread OR
 574		 * Temperature far too high - do kernel_power_off() from
 575		 * thread context
 576		 */
 577		ret = IRQ_WAKE_THREAD;
 578	} else {
 579		/* Temperature is going up - handle immediately */
 580		setup_temperature_sensitive_regs(emif, emif->curr_regs);
 581		do_freq_update();
 582	}
 583
 584out:
 585	spin_unlock_irqrestore(&emif_lock, irq_state);
 586	return ret;
 587}
 588
 589static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
 590{
 591	u32			interrupts;
 592	struct emif_data	*emif = dev_id;
 593	void __iomem		*base = emif->base;
 594	struct device		*dev = emif->dev;
 595	irqreturn_t		ret = IRQ_HANDLED;
 596
 597	/* Save the status and clear it */
 598	interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
 599	writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
 600
 601	/*
 602	 * Handle temperature alert
 603	 * Temperature alert should be same for all ports
 604	 * So, it's enough to process it only for one of the ports
 605	 */
 606	if (interrupts & TA_SYS_MASK)
 607		ret = handle_temp_alert(base, emif);
 608
 609	if (interrupts & ERR_SYS_MASK)
 610		dev_err(dev, "Access error from SYS port - %x\n", interrupts);
 611
 612	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
 613		/* Save the status and clear it */
 614		interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS);
 615		writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS);
 616
 617		if (interrupts & ERR_LL_MASK)
 618			dev_err(dev, "Access error from LL port - %x\n",
 619				interrupts);
 620	}
 621
 622	return ret;
 623}
 624
 625static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
 626{
 627	struct emif_data	*emif = dev_id;
 628
 629	if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
 630		dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
 631
 632		/* If we have Power OFF ability, use it, else try restarting */
 633		if (kernel_can_power_off()) {
 634			kernel_power_off();
 635		} else {
 636			WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
 637			kernel_restart("SDRAM Over-temp Emergency restart");
 638		}
 639		return IRQ_HANDLED;
 640	}
 641
 642	spin_lock_irqsave(&emif_lock, irq_state);
 643
 644	if (emif->curr_regs) {
 645		setup_temperature_sensitive_regs(emif, emif->curr_regs);
 646		do_freq_update();
 647	} else {
 648		dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
 649	}
 650
 651	spin_unlock_irqrestore(&emif_lock, irq_state);
 652
 653	return IRQ_HANDLED;
 654}
 655
 656static void clear_all_interrupts(struct emif_data *emif)
 657{
 658	void __iomem	*base = emif->base;
 659
 660	writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS),
 661		base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
 662	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
 663		writel(readl(base + EMIF_LL_OCP_INTERRUPT_STATUS),
 664			base + EMIF_LL_OCP_INTERRUPT_STATUS);
 665}
 666
 667static void disable_and_clear_all_interrupts(struct emif_data *emif)
 668{
 669	void __iomem		*base = emif->base;
 670
 671	/* Disable all interrupts */
 672	writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET),
 673		base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR);
 674	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
 675		writel(readl(base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET),
 676			base + EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR);
 677
 678	/* Clear all interrupts */
 679	clear_all_interrupts(emif);
 680}
 681
 682static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
 683{
 684	u32		interrupts, type;
 685	void __iomem	*base = emif->base;
 686
 687	type = emif->plat_data->device_info->type;
 688
 689	clear_all_interrupts(emif);
 690
 691	/* Enable interrupts for SYS interface */
 692	interrupts = EN_ERR_SYS_MASK;
 693	if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4)
 694		interrupts |= EN_TA_SYS_MASK;
 695	writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET);
 696
 697	/* Enable interrupts for LL interface */
 698	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
 699		/* TA need not be enabled for LL */
 700		interrupts = EN_ERR_LL_MASK;
 701		writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET);
 702	}
 703
 704	/* setup IRQ handlers */
 705	return devm_request_threaded_irq(emif->dev, irq,
 706				    emif_interrupt_handler,
 707				    emif_threaded_isr,
 708				    0, dev_name(emif->dev),
 709				    emif);
 710
 711}
 712
 713static void __init_or_module emif_onetime_settings(struct emif_data *emif)
 714{
 715	u32				pwr_mgmt_ctrl, zq, temp_alert_cfg;
 716	void __iomem			*base = emif->base;
 717	const struct lpddr2_addressing	*addressing;
 718	const struct ddr_device_info	*device_info;
 719
 720	device_info = emif->plat_data->device_info;
 721	addressing = get_addressing_table(device_info);
 722
 723	/*
 724	 * Init power management settings
 725	 * We don't know the frequency yet. Use a high frequency
 726	 * value for a conservative timeout setting
 727	 */
 728	pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif,
 729			emif->plat_data->ip_rev);
 730	emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT;
 731	writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL);
 732
 733	/* Init ZQ calibration settings */
 734	zq = get_zq_config_reg(addressing, device_info->cs1_used,
 735		device_info->cal_resistors_per_cs);
 736	writel(zq, base + EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG);
 737
 738	/* Check temperature level temperature level*/
 739	get_temperature_level(emif);
 740	if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
 741		dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
 742
 743	/* Init temperature polling */
 744	temp_alert_cfg = get_temp_alert_config(addressing,
 745		emif->plat_data->custom_configs, device_info->cs1_used,
 746		device_info->io_width, get_emif_bus_width(emif));
 747	writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG);
 748
 749	/*
 750	 * Program external PHY control registers that are not frequency
 751	 * dependent
 752	 */
 753	if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY)
 754		return;
 755	writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW);
 756	writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW);
 757	writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW);
 758	writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW);
 759	writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW);
 760	writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW);
 761	writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW);
 762	writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW);
 763	writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW);
 764	writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW);
 765	writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW);
 766	writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW);
 767	writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW);
 768	writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW);
 769	writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW);
 770	writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW);
 771	writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW);
 772	writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW);
 773	writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW);
 774	writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW);
 775	writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW);
 776}
 777
 778static void get_default_timings(struct emif_data *emif)
 779{
 780	struct emif_platform_data *pd = emif->plat_data;
 781
 782	pd->timings		= lpddr2_jedec_timings;
 783	pd->timings_arr_size	= ARRAY_SIZE(lpddr2_jedec_timings);
 784
 785	dev_warn(emif->dev, "%s: using default timings\n", __func__);
 786}
 787
 788static int is_dev_data_valid(u32 type, u32 density, u32 io_width, u32 phy_type,
 789		u32 ip_rev, struct device *dev)
 790{
 791	int valid;
 792
 793	valid = (type == DDR_TYPE_LPDDR2_S4 ||
 794			type == DDR_TYPE_LPDDR2_S2)
 795		&& (density >= DDR_DENSITY_64Mb
 796			&& density <= DDR_DENSITY_8Gb)
 797		&& (io_width >= DDR_IO_WIDTH_8
 798			&& io_width <= DDR_IO_WIDTH_32);
 799
 800	/* Combinations of EMIF and PHY revisions that we support today */
 801	switch (ip_rev) {
 802	case EMIF_4D:
 803		valid = valid && (phy_type == EMIF_PHY_TYPE_ATTILAPHY);
 804		break;
 805	case EMIF_4D5:
 806		valid = valid && (phy_type == EMIF_PHY_TYPE_INTELLIPHY);
 807		break;
 808	default:
 809		valid = 0;
 810	}
 811
 812	if (!valid)
 813		dev_err(dev, "%s: invalid DDR details\n", __func__);
 814	return valid;
 815}
 816
 817static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
 818		struct device *dev)
 819{
 820	int valid = 1;
 821
 822	if ((cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE) &&
 823		(cust_cfgs->lpmode != EMIF_LP_MODE_DISABLE))
 824		valid = cust_cfgs->lpmode_freq_threshold &&
 825			cust_cfgs->lpmode_timeout_performance &&
 826			cust_cfgs->lpmode_timeout_power;
 827
 828	if (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL)
 829		valid = valid && cust_cfgs->temp_alert_poll_interval_ms;
 830
 831	if (!valid)
 832		dev_warn(dev, "%s: invalid custom configs\n", __func__);
 833
 834	return valid;
 835}
 836
 837#if defined(CONFIG_OF)
 838static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
 839		struct emif_data *emif)
 840{
 841	struct emif_custom_configs	*cust_cfgs = NULL;
 842	int				len;
 843	const __be32			*lpmode, *poll_intvl;
 844
 845	lpmode = of_get_property(np_emif, "low-power-mode", &len);
 846	poll_intvl = of_get_property(np_emif, "temp-alert-poll-interval", &len);
 847
 848	if (lpmode || poll_intvl)
 849		cust_cfgs = devm_kzalloc(emif->dev, sizeof(*cust_cfgs),
 850			GFP_KERNEL);
 851
 852	if (!cust_cfgs)
 853		return;
 854
 855	if (lpmode) {
 856		cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_LPMODE;
 857		cust_cfgs->lpmode = be32_to_cpup(lpmode);
 858		of_property_read_u32(np_emif,
 859				"low-power-mode-timeout-performance",
 860				&cust_cfgs->lpmode_timeout_performance);
 861		of_property_read_u32(np_emif,
 862				"low-power-mode-timeout-power",
 863				&cust_cfgs->lpmode_timeout_power);
 864		of_property_read_u32(np_emif,
 865				"low-power-mode-freq-threshold",
 866				&cust_cfgs->lpmode_freq_threshold);
 867	}
 868
 869	if (poll_intvl) {
 870		cust_cfgs->mask |=
 871				EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL;
 872		cust_cfgs->temp_alert_poll_interval_ms =
 873						be32_to_cpup(poll_intvl);
 874	}
 875
 876	if (of_find_property(np_emif, "extended-temp-part", &len))
 877		cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART;
 878
 879	if (!is_custom_config_valid(cust_cfgs, emif->dev)) {
 880		devm_kfree(emif->dev, cust_cfgs);
 881		return;
 882	}
 883
 884	emif->plat_data->custom_configs = cust_cfgs;
 885}
 886
 887static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
 888		struct device_node *np_ddr,
 889		struct ddr_device_info *dev_info)
 890{
 891	u32 density = 0, io_width = 0;
 892	int len;
 893
 894	if (of_find_property(np_emif, "cs1-used", &len))
 895		dev_info->cs1_used = true;
 896
 897	if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
 898		dev_info->cal_resistors_per_cs = true;
 899
 900	if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
 901		dev_info->type = DDR_TYPE_LPDDR2_S4;
 902	else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
 903		dev_info->type = DDR_TYPE_LPDDR2_S2;
 904
 905	of_property_read_u32(np_ddr, "density", &density);
 906	of_property_read_u32(np_ddr, "io-width", &io_width);
 907
 908	/* Convert from density in Mb to the density encoding in jedc_ddr.h */
 909	if (density & (density - 1))
 910		dev_info->density = 0;
 911	else
 912		dev_info->density = __fls(density) - 5;
 913
 914	/* Convert from io_width in bits to io_width encoding in jedc_ddr.h */
 915	if (io_width & (io_width - 1))
 916		dev_info->io_width = 0;
 917	else
 918		dev_info->io_width = __fls(io_width) - 1;
 919}
 920
 921static struct emif_data * __init_or_module of_get_memory_device_details(
 922		struct device_node *np_emif, struct device *dev)
 923{
 924	struct emif_data		*emif = NULL;
 925	struct ddr_device_info		*dev_info = NULL;
 926	struct emif_platform_data	*pd = NULL;
 927	struct device_node		*np_ddr;
 928	int				len;
 929
 930	np_ddr = of_parse_phandle(np_emif, "device-handle", 0);
 931	if (!np_ddr)
 932		goto error;
 933	emif	= devm_kzalloc(dev, sizeof(struct emif_data), GFP_KERNEL);
 934	pd	= devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
 935	dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
 936
 937	if (!emif || !pd || !dev_info) {
 938		dev_err(dev, "%s: Out of memory!!\n",
 939			__func__);
 940		goto error;
 941	}
 942
 943	emif->plat_data		= pd;
 944	pd->device_info		= dev_info;
 945	emif->dev		= dev;
 946	emif->np_ddr		= np_ddr;
 947	emif->temperature_level	= SDRAM_TEMP_NOMINAL;
 948
 949	if (of_device_is_compatible(np_emif, "ti,emif-4d"))
 950		emif->plat_data->ip_rev = EMIF_4D;
 951	else if (of_device_is_compatible(np_emif, "ti,emif-4d5"))
 952		emif->plat_data->ip_rev = EMIF_4D5;
 953
 954	of_property_read_u32(np_emif, "phy-type", &pd->phy_type);
 955
 956	if (of_find_property(np_emif, "hw-caps-ll-interface", &len))
 957		pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
 958
 959	of_get_ddr_info(np_emif, np_ddr, dev_info);
 960	if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density,
 961			pd->device_info->io_width, pd->phy_type, pd->ip_rev,
 962			emif->dev)) {
 963		dev_err(dev, "%s: invalid device data!!\n", __func__);
 964		goto error;
 965	}
 966	/*
 967	 * For EMIF instances other than EMIF1 see if the devices connected
 968	 * are exactly same as on EMIF1(which is typically the case). If so,
 969	 * mark it as a duplicate of EMIF1. This will save some memory and
 970	 * computation.
 971	 */
 972	if (emif1 && emif1->np_ddr == np_ddr) {
 973		emif->duplicate = true;
 974		goto out;
 975	} else if (emif1) {
 976		dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
 977			__func__);
 978	}
 979
 980	of_get_custom_configs(np_emif, emif);
 981	emif->plat_data->timings = of_get_ddr_timings(np_ddr, emif->dev,
 982					emif->plat_data->device_info->type,
 983					&emif->plat_data->timings_arr_size);
 984
 985	emif->plat_data->min_tck = of_get_min_tck(np_ddr, emif->dev);
 986	goto out;
 987
 988error:
 989	return NULL;
 990out:
 991	return emif;
 992}
 993
 994#else
 995
 996static struct emif_data * __init_or_module of_get_memory_device_details(
 997		struct device_node *np_emif, struct device *dev)
 998{
 999	return NULL;
1000}
1001#endif
1002
1003static struct emif_data *__init_or_module get_device_details(
1004		struct platform_device *pdev)
1005{
1006	u32				size;
1007	struct emif_data		*emif = NULL;
1008	struct ddr_device_info		*dev_info;
1009	struct emif_custom_configs	*cust_cfgs;
1010	struct emif_platform_data	*pd;
1011	struct device			*dev;
1012	void				*temp;
1013
1014	pd = pdev->dev.platform_data;
1015	dev = &pdev->dev;
1016
1017	if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type,
1018			pd->device_info->density, pd->device_info->io_width,
1019			pd->phy_type, pd->ip_rev, dev))) {
1020		dev_err(dev, "%s: invalid device data\n", __func__);
1021		goto error;
1022	}
1023
1024	emif	= devm_kzalloc(dev, sizeof(*emif), GFP_KERNEL);
1025	temp	= devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1026	dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
1027
1028	if (!emif || !temp || !dev_info)
 
1029		goto error;
 
1030
1031	memcpy(temp, pd, sizeof(*pd));
1032	pd = temp;
1033	memcpy(dev_info, pd->device_info, sizeof(*dev_info));
1034
1035	pd->device_info		= dev_info;
1036	emif->plat_data		= pd;
1037	emif->dev		= dev;
1038	emif->temperature_level	= SDRAM_TEMP_NOMINAL;
1039
1040	/*
1041	 * For EMIF instances other than EMIF1 see if the devices connected
1042	 * are exactly same as on EMIF1(which is typically the case). If so,
1043	 * mark it as a duplicate of EMIF1 and skip copying timings data.
1044	 * This will save some memory and some computation later.
1045	 */
1046	emif->duplicate = emif1 && (memcmp(dev_info,
1047		emif1->plat_data->device_info,
1048		sizeof(struct ddr_device_info)) == 0);
1049
1050	if (emif->duplicate) {
1051		pd->timings = NULL;
1052		pd->min_tck = NULL;
1053		goto out;
1054	} else if (emif1) {
1055		dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
1056			__func__);
1057	}
1058
1059	/*
1060	 * Copy custom configs - ignore allocation error, if any, as
1061	 * custom_configs is not very critical
1062	 */
1063	cust_cfgs = pd->custom_configs;
1064	if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) {
1065		temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
1066		if (temp)
1067			memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
 
 
 
1068		pd->custom_configs = temp;
1069	}
1070
1071	/*
1072	 * Copy timings and min-tck values from platform data. If it is not
1073	 * available or if memory allocation fails, use JEDEC defaults
1074	 */
1075	size = sizeof(struct lpddr2_timings) * pd->timings_arr_size;
1076	if (pd->timings) {
1077		temp = devm_kzalloc(dev, size, GFP_KERNEL);
1078		if (temp) {
1079			memcpy(temp, pd->timings, size);
1080			pd->timings = temp;
1081		} else {
 
 
1082			get_default_timings(emif);
1083		}
1084	} else {
1085		get_default_timings(emif);
1086	}
1087
1088	if (pd->min_tck) {
1089		temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL);
1090		if (temp) {
1091			memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
1092			pd->min_tck = temp;
1093		} else {
 
 
1094			pd->min_tck = &lpddr2_jedec_min_tck;
1095		}
1096	} else {
1097		pd->min_tck = &lpddr2_jedec_min_tck;
1098	}
1099
1100out:
1101	return emif;
1102
1103error:
1104	return NULL;
1105}
1106
1107static int __init_or_module emif_probe(struct platform_device *pdev)
1108{
1109	struct emif_data	*emif;
1110	int			irq, ret;
 
1111
1112	if (pdev->dev.of_node)
1113		emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
1114	else
1115		emif = get_device_details(pdev);
1116
1117	if (!emif) {
1118		pr_err("%s: error getting device data\n", __func__);
1119		goto error;
1120	}
1121
1122	list_add(&emif->node, &device_list);
 
1123
1124	/* Save pointers to each other in emif and device structures */
1125	emif->dev = &pdev->dev;
1126	platform_set_drvdata(pdev, emif);
1127
1128	emif->base = devm_platform_ioremap_resource(pdev, 0);
 
1129	if (IS_ERR(emif->base))
1130		goto error;
1131
1132	irq = platform_get_irq(pdev, 0);
1133	if (irq < 0)
1134		goto error;
1135
1136	emif_onetime_settings(emif);
1137	emif_debugfs_init(emif);
1138	disable_and_clear_all_interrupts(emif);
1139	ret = setup_interrupts(emif, irq);
1140	if (ret)
1141		goto error;
1142
1143	/* One-time actions taken on probing the first device */
1144	if (!emif1) {
1145		emif1 = emif;
 
1146
1147		/*
1148		 * TODO: register notifiers for frequency and voltage
1149		 * change here once the respective frameworks are
1150		 * available
1151		 */
1152	}
1153
1154	dev_info(&pdev->dev, "%s: device configured with addr = %p and IRQ%d\n",
1155		__func__, emif->base, irq);
1156
1157	return 0;
1158error:
1159	return -ENODEV;
1160}
1161
1162static void __exit emif_remove(struct platform_device *pdev)
1163{
1164	struct emif_data *emif = platform_get_drvdata(pdev);
1165
1166	emif_debugfs_exit(emif);
 
 
1167}
1168
1169static void emif_shutdown(struct platform_device *pdev)
1170{
1171	struct emif_data	*emif = platform_get_drvdata(pdev);
1172
1173	disable_and_clear_all_interrupts(emif);
1174}
1175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1176#if defined(CONFIG_OF)
1177static const struct of_device_id emif_of_match[] = {
1178		{ .compatible = "ti,emif-4d" },
1179		{ .compatible = "ti,emif-4d5" },
1180		{},
1181};
1182MODULE_DEVICE_TABLE(of, emif_of_match);
1183#endif
1184
1185static struct platform_driver emif_driver = {
1186	.remove_new	= __exit_p(emif_remove),
1187	.shutdown	= emif_shutdown,
1188	.driver = {
1189		.name = "emif",
1190		.of_match_table = of_match_ptr(emif_of_match),
1191	},
1192};
1193
1194module_platform_driver_probe(emif_driver, emif_probe);
1195
1196MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
1197MODULE_LICENSE("GPL");
1198MODULE_ALIAS("platform:emif");
1199MODULE_AUTHOR("Texas Instruments Inc");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * EMIF driver
   4 *
   5 * Copyright (C) 2012 Texas Instruments, Inc.
   6 *
   7 * Aneesh V <aneesh@ti.com>
   8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 */
  10#include <linux/err.h>
  11#include <linux/kernel.h>
  12#include <linux/reboot.h>
  13#include <linux/platform_data/emif_plat.h>
  14#include <linux/io.h>
  15#include <linux/device.h>
  16#include <linux/platform_device.h>
  17#include <linux/interrupt.h>
  18#include <linux/slab.h>
  19#include <linux/of.h>
  20#include <linux/debugfs.h>
  21#include <linux/seq_file.h>
  22#include <linux/module.h>
  23#include <linux/list.h>
  24#include <linux/spinlock.h>
  25#include <linux/pm.h>
  26
  27#include "emif.h"
  28#include "jedec_ddr.h"
  29#include "of_memory.h"
  30
  31/**
  32 * struct emif_data - Per device static data for driver's use
  33 * @duplicate:			Whether the DDR devices attached to this EMIF
  34 *				instance are exactly same as that on EMIF1. In
  35 *				this case we can save some memory and processing
  36 * @temperature_level:		Maximum temperature of LPDDR2 devices attached
  37 *				to this EMIF - read from MR4 register. If there
  38 *				are two devices attached to this EMIF, this
  39 *				value is the maximum of the two temperature
  40 *				levels.
  41 * @node:			node in the device list
  42 * @base:			base address of memory-mapped IO registers.
  43 * @dev:			device pointer.
  44 * @addressing			table with addressing information from the spec
  45 * @regs_cache:			An array of 'struct emif_regs' that stores
  46 *				calculated register values for different
  47 *				frequencies, to avoid re-calculating them on
  48 *				each DVFS transition.
  49 * @curr_regs:			The set of register values used in the last
  50 *				frequency change (i.e. corresponding to the
  51 *				frequency in effect at the moment)
  52 * @plat_data:			Pointer to saved platform data.
  53 * @debugfs_root:		dentry to the root folder for EMIF in debugfs
  54 * @np_ddr:			Pointer to ddr device tree node
  55 */
  56struct emif_data {
  57	u8				duplicate;
  58	u8				temperature_level;
  59	u8				lpmode;
  60	struct list_head		node;
  61	unsigned long			irq_state;
  62	void __iomem			*base;
  63	struct device			*dev;
  64	const struct lpddr2_addressing	*addressing;
  65	struct emif_regs		*regs_cache[EMIF_MAX_NUM_FREQUENCIES];
  66	struct emif_regs		*curr_regs;
  67	struct emif_platform_data	*plat_data;
  68	struct dentry			*debugfs_root;
  69	struct device_node		*np_ddr;
  70};
  71
  72static struct emif_data *emif1;
  73static spinlock_t	emif_lock;
  74static unsigned long	irq_state;
  75static u32		t_ck; /* DDR clock period in ps */
  76static LIST_HEAD(device_list);
  77
  78#ifdef CONFIG_DEBUG_FS
  79static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
  80	struct emif_regs *regs)
  81{
  82	u32 type = emif->plat_data->device_info->type;
  83	u32 ip_rev = emif->plat_data->ip_rev;
  84
  85	seq_printf(s, "EMIF register cache dump for %dMHz\n",
  86		regs->freq/1000000);
  87
  88	seq_printf(s, "ref_ctrl_shdw\t: 0x%08x\n", regs->ref_ctrl_shdw);
  89	seq_printf(s, "sdram_tim1_shdw\t: 0x%08x\n", regs->sdram_tim1_shdw);
  90	seq_printf(s, "sdram_tim2_shdw\t: 0x%08x\n", regs->sdram_tim2_shdw);
  91	seq_printf(s, "sdram_tim3_shdw\t: 0x%08x\n", regs->sdram_tim3_shdw);
  92
  93	if (ip_rev == EMIF_4D) {
  94		seq_printf(s, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
  95			regs->read_idle_ctrl_shdw_normal);
  96		seq_printf(s, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
  97			regs->read_idle_ctrl_shdw_volt_ramp);
  98	} else if (ip_rev == EMIF_4D5) {
  99		seq_printf(s, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
 100			regs->dll_calib_ctrl_shdw_normal);
 101		seq_printf(s, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
 102			regs->dll_calib_ctrl_shdw_volt_ramp);
 103	}
 104
 105	if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
 106		seq_printf(s, "ref_ctrl_shdw_derated\t: 0x%08x\n",
 107			regs->ref_ctrl_shdw_derated);
 108		seq_printf(s, "sdram_tim1_shdw_derated\t: 0x%08x\n",
 109			regs->sdram_tim1_shdw_derated);
 110		seq_printf(s, "sdram_tim3_shdw_derated\t: 0x%08x\n",
 111			regs->sdram_tim3_shdw_derated);
 112	}
 113}
 114
 115static int emif_regdump_show(struct seq_file *s, void *unused)
 116{
 117	struct emif_data	*emif	= s->private;
 118	struct emif_regs	**regs_cache;
 119	int			i;
 120
 121	if (emif->duplicate)
 122		regs_cache = emif1->regs_cache;
 123	else
 124		regs_cache = emif->regs_cache;
 125
 126	for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
 127		do_emif_regdump_show(s, emif, regs_cache[i]);
 128		seq_putc(s, '\n');
 129	}
 130
 131	return 0;
 132}
 133
 134static int emif_regdump_open(struct inode *inode, struct file *file)
 135{
 136	return single_open(file, emif_regdump_show, inode->i_private);
 137}
 138
 139static const struct file_operations emif_regdump_fops = {
 140	.open			= emif_regdump_open,
 141	.read			= seq_read,
 142	.release		= single_release,
 143};
 144
 145static int emif_mr4_show(struct seq_file *s, void *unused)
 146{
 147	struct emif_data *emif = s->private;
 148
 149	seq_printf(s, "MR4=%d\n", emif->temperature_level);
 150	return 0;
 151}
 152
 153static int emif_mr4_open(struct inode *inode, struct file *file)
 154{
 155	return single_open(file, emif_mr4_show, inode->i_private);
 156}
 157
 158static const struct file_operations emif_mr4_fops = {
 159	.open			= emif_mr4_open,
 160	.read			= seq_read,
 161	.release		= single_release,
 162};
 163
 164static int __init_or_module emif_debugfs_init(struct emif_data *emif)
 165{
 166	struct dentry	*dentry;
 167	int		ret;
 168
 169	dentry = debugfs_create_dir(dev_name(emif->dev), NULL);
 170	if (!dentry) {
 171		ret = -ENOMEM;
 172		goto err0;
 173	}
 174	emif->debugfs_root = dentry;
 175
 176	dentry = debugfs_create_file("regcache_dump", S_IRUGO,
 177			emif->debugfs_root, emif, &emif_regdump_fops);
 178	if (!dentry) {
 179		ret = -ENOMEM;
 180		goto err1;
 181	}
 182
 183	dentry = debugfs_create_file("mr4", S_IRUGO,
 184			emif->debugfs_root, emif, &emif_mr4_fops);
 185	if (!dentry) {
 186		ret = -ENOMEM;
 187		goto err1;
 188	}
 189
 190	return 0;
 191err1:
 192	debugfs_remove_recursive(emif->debugfs_root);
 193err0:
 194	return ret;
 195}
 196
 197static void __exit emif_debugfs_exit(struct emif_data *emif)
 198{
 199	debugfs_remove_recursive(emif->debugfs_root);
 200	emif->debugfs_root = NULL;
 201}
 202#else
 203static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
 204{
 205	return 0;
 206}
 207
 208static inline void __exit emif_debugfs_exit(struct emif_data *emif)
 209{
 210}
 211#endif
 212
 213/*
 214 * Calculate the period of DDR clock from frequency value
 215 */
 216static void set_ddr_clk_period(u32 freq)
 217{
 218	/* Divide 10^12 by frequency to get period in ps */
 219	t_ck = (u32)DIV_ROUND_UP_ULL(1000000000000ull, freq);
 220}
 221
 222/*
 223 * Get bus width used by EMIF. Note that this may be different from the
 224 * bus width of the DDR devices used. For instance two 16-bit DDR devices
 225 * may be connected to a given CS of EMIF. In this case bus width as far
 226 * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
 227 */
 228static u32 get_emif_bus_width(struct emif_data *emif)
 229{
 230	u32		width;
 231	void __iomem	*base = emif->base;
 232
 233	width = (readl(base + EMIF_SDRAM_CONFIG) & NARROW_MODE_MASK)
 234			>> NARROW_MODE_SHIFT;
 235	width = width == 0 ? 32 : 16;
 236
 237	return width;
 238}
 239
 240/*
 241 * Get the CL from SDRAM_CONFIG register
 242 */
 243static u32 get_cl(struct emif_data *emif)
 244{
 245	u32		cl;
 246	void __iomem	*base = emif->base;
 247
 248	cl = (readl(base + EMIF_SDRAM_CONFIG) & CL_MASK) >> CL_SHIFT;
 249
 250	return cl;
 251}
 252
 253static void set_lpmode(struct emif_data *emif, u8 lpmode)
 254{
 255	u32 temp;
 256	void __iomem *base = emif->base;
 257
 258	/*
 259	 * Workaround for errata i743 - LPDDR2 Power-Down State is Not
 260	 * Efficient
 261	 *
 262	 * i743 DESCRIPTION:
 263	 * The EMIF supports power-down state for low power. The EMIF
 264	 * automatically puts the SDRAM into power-down after the memory is
 265	 * not accessed for a defined number of cycles and the
 266	 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4.
 267	 * As the EMIF supports automatic output impedance calibration, a ZQ
 268	 * calibration long command is issued every time it exits active
 269	 * power-down and precharge power-down modes. The EMIF waits and
 270	 * blocks any other command during this calibration.
 271	 * The EMIF does not allow selective disabling of ZQ calibration upon
 272	 * exit of power-down mode. Due to very short periods of power-down
 273	 * cycles, ZQ calibration overhead creates bandwidth issues and
 274	 * increases overall system power consumption. On the other hand,
 275	 * issuing ZQ calibration long commands when exiting self-refresh is
 276	 * still required.
 277	 *
 278	 * WORKAROUND
 279	 * Because there is no power consumption benefit of the power-down due
 280	 * to the calibration and there is a performance risk, the guideline
 281	 * is to not allow power-down state and, therefore, to not have set
 282	 * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
 283	 */
 284	if ((emif->plat_data->ip_rev == EMIF_4D) &&
 285	    (lpmode == EMIF_LP_MODE_PWR_DN)) {
 286		WARN_ONCE(1,
 287			  "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
 288		/* rollback LP_MODE to Self-refresh mode */
 289		lpmode = EMIF_LP_MODE_SELF_REFRESH;
 290	}
 291
 292	temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL);
 293	temp &= ~LP_MODE_MASK;
 294	temp |= (lpmode << LP_MODE_SHIFT);
 295	writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL);
 296}
 297
 298static void do_freq_update(void)
 299{
 300	struct emif_data *emif;
 301
 302	/*
 303	 * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
 304	 *
 305	 * i728 DESCRIPTION:
 306	 * The EMIF automatically puts the SDRAM into self-refresh mode
 307	 * after the EMIF has not performed accesses during
 308	 * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
 309	 * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
 310	 * to 0x2. If during a small window the following three events
 311	 * occur:
 312	 * - The SR_TIMING counter expires
 313	 * - And frequency change is requested
 314	 * - And OCP access is requested
 315	 * Then it causes instable clock on the DDR interface.
 316	 *
 317	 * WORKAROUND
 318	 * To avoid the occurrence of the three events, the workaround
 319	 * is to disable the self-refresh when requesting a frequency
 320	 * change. Before requesting a frequency change the software must
 321	 * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
 322	 * frequency change has been done, the software can reprogram
 323	 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
 324	 */
 325	list_for_each_entry(emif, &device_list, node) {
 326		if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
 327			set_lpmode(emif, EMIF_LP_MODE_DISABLE);
 328	}
 329
 330	/*
 331	 * TODO: Do FREQ_UPDATE here when an API
 332	 * is available for this as part of the new
 333	 * clock framework
 334	 */
 335
 336	list_for_each_entry(emif, &device_list, node) {
 337		if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
 338			set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
 339	}
 340}
 341
 342/* Find addressing table entry based on the device's type and density */
 343static const struct lpddr2_addressing *get_addressing_table(
 344	const struct ddr_device_info *device_info)
 345{
 346	u32		index, type, density;
 347
 348	type = device_info->type;
 349	density = device_info->density;
 350
 351	switch (type) {
 352	case DDR_TYPE_LPDDR2_S4:
 353		index = density - 1;
 354		break;
 355	case DDR_TYPE_LPDDR2_S2:
 356		switch (density) {
 357		case DDR_DENSITY_1Gb:
 358		case DDR_DENSITY_2Gb:
 359			index = density + 3;
 360			break;
 361		default:
 362			index = density - 1;
 363		}
 364		break;
 365	default:
 366		return NULL;
 367	}
 368
 369	return &lpddr2_jedec_addressing_table[index];
 370}
 371
 372/*
 373 * Find the the right timing table from the array of timing
 374 * tables of the device using DDR clock frequency
 375 */
 376static const struct lpddr2_timings *get_timings_table(struct emif_data *emif,
 377		u32 freq)
 378{
 379	u32				i, min, max, freq_nearest;
 380	const struct lpddr2_timings	*timings = NULL;
 381	const struct lpddr2_timings	*timings_arr = emif->plat_data->timings;
 382	struct				device *dev = emif->dev;
 383
 384	/* Start with a very high frequency - 1GHz */
 385	freq_nearest = 1000000000;
 386
 387	/*
 388	 * Find the timings table such that:
 389	 *  1. the frequency range covers the required frequency(safe) AND
 390	 *  2. the max_freq is closest to the required frequency(optimal)
 391	 */
 392	for (i = 0; i < emif->plat_data->timings_arr_size; i++) {
 393		max = timings_arr[i].max_freq;
 394		min = timings_arr[i].min_freq;
 395		if ((freq >= min) && (freq <= max) && (max < freq_nearest)) {
 396			freq_nearest = max;
 397			timings = &timings_arr[i];
 398		}
 399	}
 400
 401	if (!timings)
 402		dev_err(dev, "%s: couldn't find timings for - %dHz\n",
 403			__func__, freq);
 404
 405	dev_dbg(dev, "%s: timings table: freq %d, speed bin freq %d\n",
 406		__func__, freq, freq_nearest);
 407
 408	return timings;
 409}
 410
 411static u32 get_sdram_ref_ctrl_shdw(u32 freq,
 412		const struct lpddr2_addressing *addressing)
 413{
 414	u32 ref_ctrl_shdw = 0, val = 0, freq_khz, t_refi;
 415
 416	/* Scale down frequency and t_refi to avoid overflow */
 417	freq_khz = freq / 1000;
 418	t_refi = addressing->tREFI_ns / 100;
 419
 420	/*
 421	 * refresh rate to be set is 'tREFI(in us) * freq in MHz
 422	 * division by 10000 to account for change in units
 423	 */
 424	val = t_refi * freq_khz / 10000;
 425	ref_ctrl_shdw |= val << REFRESH_RATE_SHIFT;
 426
 427	return ref_ctrl_shdw;
 428}
 429
 430static u32 get_sdram_tim_1_shdw(const struct lpddr2_timings *timings,
 431		const struct lpddr2_min_tck *min_tck,
 432		const struct lpddr2_addressing *addressing)
 433{
 434	u32 tim1 = 0, val = 0;
 435
 436	val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
 437	tim1 |= val << T_WTR_SHIFT;
 438
 439	if (addressing->num_banks == B8)
 440		val = DIV_ROUND_UP(timings->tFAW, t_ck*4);
 441	else
 442		val = max(min_tck->tRRD, DIV_ROUND_UP(timings->tRRD, t_ck));
 443	tim1 |= (val - 1) << T_RRD_SHIFT;
 444
 445	val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab, t_ck) - 1;
 446	tim1 |= val << T_RC_SHIFT;
 447
 448	val = max(min_tck->tRASmin, DIV_ROUND_UP(timings->tRAS_min, t_ck));
 449	tim1 |= (val - 1) << T_RAS_SHIFT;
 450
 451	val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
 452	tim1 |= val << T_WR_SHIFT;
 453
 454	val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD, t_ck)) - 1;
 455	tim1 |= val << T_RCD_SHIFT;
 456
 457	val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab, t_ck)) - 1;
 458	tim1 |= val << T_RP_SHIFT;
 459
 460	return tim1;
 461}
 462
 463static u32 get_sdram_tim_1_shdw_derated(const struct lpddr2_timings *timings,
 464		const struct lpddr2_min_tck *min_tck,
 465		const struct lpddr2_addressing *addressing)
 466{
 467	u32 tim1 = 0, val = 0;
 468
 469	val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
 470	tim1 = val << T_WTR_SHIFT;
 471
 472	/*
 473	 * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps
 474	 * to tFAW for de-rating
 475	 */
 476	if (addressing->num_banks == B8) {
 477		val = DIV_ROUND_UP(timings->tFAW + 7500, 4 * t_ck) - 1;
 478	} else {
 479		val = DIV_ROUND_UP(timings->tRRD + 1875, t_ck);
 480		val = max(min_tck->tRRD, val) - 1;
 481	}
 482	tim1 |= val << T_RRD_SHIFT;
 483
 484	val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab + 1875, t_ck);
 485	tim1 |= (val - 1) << T_RC_SHIFT;
 486
 487	val = DIV_ROUND_UP(timings->tRAS_min + 1875, t_ck);
 488	val = max(min_tck->tRASmin, val) - 1;
 489	tim1 |= val << T_RAS_SHIFT;
 490
 491	val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
 492	tim1 |= val << T_WR_SHIFT;
 493
 494	val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD + 1875, t_ck));
 495	tim1 |= (val - 1) << T_RCD_SHIFT;
 496
 497	val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab + 1875, t_ck));
 498	tim1 |= (val - 1) << T_RP_SHIFT;
 499
 500	return tim1;
 501}
 502
 503static u32 get_sdram_tim_2_shdw(const struct lpddr2_timings *timings,
 504		const struct lpddr2_min_tck *min_tck,
 505		const struct lpddr2_addressing *addressing,
 506		u32 type)
 507{
 508	u32 tim2 = 0, val = 0;
 509
 510	val = min_tck->tCKE - 1;
 511	tim2 |= val << T_CKE_SHIFT;
 512
 513	val = max(min_tck->tRTP, DIV_ROUND_UP(timings->tRTP, t_ck)) - 1;
 514	tim2 |= val << T_RTP_SHIFT;
 515
 516	/* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */
 517	val = DIV_ROUND_UP(addressing->tRFCab_ps + 10000, t_ck) - 1;
 518	tim2 |= val << T_XSNR_SHIFT;
 519
 520	/* XSRD same as XSNR for LPDDR2 */
 521	tim2 |= val << T_XSRD_SHIFT;
 522
 523	val = max(min_tck->tXP, DIV_ROUND_UP(timings->tXP, t_ck)) - 1;
 524	tim2 |= val << T_XP_SHIFT;
 525
 526	return tim2;
 527}
 528
 529static u32 get_sdram_tim_3_shdw(const struct lpddr2_timings *timings,
 530		const struct lpddr2_min_tck *min_tck,
 531		const struct lpddr2_addressing *addressing,
 532		u32 type, u32 ip_rev, u32 derated)
 533{
 534	u32 tim3 = 0, val = 0, t_dqsck;
 535
 536	val = timings->tRAS_max_ns / addressing->tREFI_ns - 1;
 537	val = val > 0xF ? 0xF : val;
 538	tim3 |= val << T_RAS_MAX_SHIFT;
 539
 540	val = DIV_ROUND_UP(addressing->tRFCab_ps, t_ck) - 1;
 541	tim3 |= val << T_RFC_SHIFT;
 542
 543	t_dqsck = (derated == EMIF_DERATED_TIMINGS) ?
 544		timings->tDQSCK_max_derated : timings->tDQSCK_max;
 545	if (ip_rev == EMIF_4D5)
 546		val = DIV_ROUND_UP(t_dqsck + 1000, t_ck) - 1;
 547	else
 548		val = DIV_ROUND_UP(t_dqsck, t_ck) - 1;
 549
 550	tim3 |= val << T_TDQSCKMAX_SHIFT;
 551
 552	val = DIV_ROUND_UP(timings->tZQCS, t_ck) - 1;
 553	tim3 |= val << ZQ_ZQCS_SHIFT;
 554
 555	val = DIV_ROUND_UP(timings->tCKESR, t_ck);
 556	val = max(min_tck->tCKESR, val) - 1;
 557	tim3 |= val << T_CKESR_SHIFT;
 558
 559	if (ip_rev == EMIF_4D5) {
 560		tim3 |= (EMIF_T_CSTA - 1) << T_CSTA_SHIFT;
 561
 562		val = DIV_ROUND_UP(EMIF_T_PDLL_UL, 128) - 1;
 563		tim3 |= val << T_PDLL_UL_SHIFT;
 564	}
 565
 566	return tim3;
 567}
 568
 569static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing,
 570		bool cs1_used, bool cal_resistors_per_cs)
 571{
 572	u32 zq = 0, val = 0;
 573
 574	val = EMIF_ZQCS_INTERVAL_US * 1000 / addressing->tREFI_ns;
 575	zq |= val << ZQ_REFINTERVAL_SHIFT;
 576
 577	val = DIV_ROUND_UP(T_ZQCL_DEFAULT_NS, T_ZQCS_DEFAULT_NS) - 1;
 578	zq |= val << ZQ_ZQCL_MULT_SHIFT;
 579
 580	val = DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS, T_ZQCL_DEFAULT_NS) - 1;
 581	zq |= val << ZQ_ZQINIT_MULT_SHIFT;
 582
 583	zq |= ZQ_SFEXITEN_ENABLE << ZQ_SFEXITEN_SHIFT;
 584
 585	if (cal_resistors_per_cs)
 586		zq |= ZQ_DUALCALEN_ENABLE << ZQ_DUALCALEN_SHIFT;
 587	else
 588		zq |= ZQ_DUALCALEN_DISABLE << ZQ_DUALCALEN_SHIFT;
 589
 590	zq |= ZQ_CS0EN_MASK; /* CS0 is used for sure */
 591
 592	val = cs1_used ? 1 : 0;
 593	zq |= val << ZQ_CS1EN_SHIFT;
 594
 595	return zq;
 596}
 597
 598static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing,
 599		const struct emif_custom_configs *custom_configs, bool cs1_used,
 600		u32 sdram_io_width, u32 emif_bus_width)
 601{
 602	u32 alert = 0, interval, devcnt;
 603
 604	if (custom_configs && (custom_configs->mask &
 605				EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL))
 606		interval = custom_configs->temp_alert_poll_interval_ms;
 607	else
 608		interval = TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS;
 609
 610	interval *= 1000000;			/* Convert to ns */
 611	interval /= addressing->tREFI_ns;	/* Convert to refresh cycles */
 612	alert |= (interval << TA_REFINTERVAL_SHIFT);
 613
 614	/*
 615	 * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
 616	 * also to this form and subtract to get TA_DEVCNT, which is
 617	 * in log2(x) form.
 618	 */
 619	emif_bus_width = __fls(emif_bus_width) - 1;
 620	devcnt = emif_bus_width - sdram_io_width;
 621	alert |= devcnt << TA_DEVCNT_SHIFT;
 622
 623	/* DEVWDT is in 'log2(x) - 3' form */
 624	alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT;
 625
 626	alert |= 1 << TA_SFEXITEN_SHIFT;
 627	alert |= 1 << TA_CS0EN_SHIFT;
 628	alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT;
 629
 630	return alert;
 631}
 632
 633static u32 get_read_idle_ctrl_shdw(u8 volt_ramp)
 634{
 635	u32 idle = 0, val = 0;
 636
 637	/*
 638	 * Maximum value in normal conditions and increased frequency
 639	 * when voltage is ramping
 640	 */
 641	if (volt_ramp)
 642		val = READ_IDLE_INTERVAL_DVFS / t_ck / 64 - 1;
 643	else
 644		val = 0x1FF;
 645
 646	/*
 647	 * READ_IDLE_CTRL register in EMIF4D has same offset and fields
 648	 * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts
 649	 */
 650	idle |= val << DLL_CALIB_INTERVAL_SHIFT;
 651	idle |= EMIF_READ_IDLE_LEN_VAL << ACK_WAIT_SHIFT;
 652
 653	return idle;
 654}
 655
 656static u32 get_dll_calib_ctrl_shdw(u8 volt_ramp)
 657{
 658	u32 calib = 0, val = 0;
 659
 660	if (volt_ramp == DDR_VOLTAGE_RAMPING)
 661		val = DLL_CALIB_INTERVAL_DVFS / t_ck / 16 - 1;
 662	else
 663		val = 0; /* Disabled when voltage is stable */
 664
 665	calib |= val << DLL_CALIB_INTERVAL_SHIFT;
 666	calib |= DLL_CALIB_ACK_WAIT_VAL << ACK_WAIT_SHIFT;
 667
 668	return calib;
 669}
 670
 671static u32 get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings *timings,
 672	u32 freq, u8 RL)
 673{
 674	u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY, val = 0;
 675
 676	val = RL + DIV_ROUND_UP(timings->tDQSCK_max, t_ck) - 1;
 677	phy |= val << READ_LATENCY_SHIFT_4D;
 678
 679	if (freq <= 100000000)
 680		val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY;
 681	else if (freq <= 200000000)
 682		val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY;
 683	else
 684		val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY;
 685
 686	phy |= val << DLL_SLAVE_DLY_CTRL_SHIFT_4D;
 687
 688	return phy;
 689}
 690
 691static u32 get_phy_ctrl_1_intelliphy_4d5(u32 freq, u8 cl)
 692{
 693	u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY, half_delay;
 694
 695	/*
 696	 * DLL operates at 266 MHz. If DDR frequency is near 266 MHz,
 697	 * half-delay is not needed else set half-delay
 698	 */
 699	if (freq >= 265000000 && freq < 267000000)
 700		half_delay = 0;
 701	else
 702		half_delay = 1;
 703
 704	phy |= half_delay << DLL_HALF_DELAY_SHIFT_4D5;
 705	phy |= ((cl + DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS,
 706			t_ck) - 1) << READ_LATENCY_SHIFT_4D5);
 707
 708	return phy;
 709}
 710
 711static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void)
 712{
 713	u32 fifo_we_slave_ratio;
 714
 715	fifo_we_slave_ratio =  DIV_ROUND_CLOSEST(
 716		EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
 717
 718	return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 |
 719		fifo_we_slave_ratio << 22;
 720}
 721
 722static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void)
 723{
 724	u32 fifo_we_slave_ratio;
 725
 726	fifo_we_slave_ratio =  DIV_ROUND_CLOSEST(
 727		EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
 728
 729	return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 |
 730		fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23;
 731}
 732
 733static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void)
 734{
 735	u32 fifo_we_slave_ratio;
 736
 737	fifo_we_slave_ratio =  DIV_ROUND_CLOSEST(
 738		EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
 739
 740	return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 |
 741		fifo_we_slave_ratio << 13;
 742}
 743
 744static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
 745{
 746	u32 pwr_mgmt_ctrl	= 0, timeout;
 747	u32 lpmode		= EMIF_LP_MODE_SELF_REFRESH;
 748	u32 timeout_perf	= EMIF_LP_MODE_TIMEOUT_PERFORMANCE;
 749	u32 timeout_pwr		= EMIF_LP_MODE_TIMEOUT_POWER;
 750	u32 freq_threshold	= EMIF_LP_MODE_FREQ_THRESHOLD;
 751	u32 mask;
 752	u8 shift;
 753
 754	struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs;
 755
 756	if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) {
 757		lpmode		= cust_cfgs->lpmode;
 758		timeout_perf	= cust_cfgs->lpmode_timeout_performance;
 759		timeout_pwr	= cust_cfgs->lpmode_timeout_power;
 760		freq_threshold  = cust_cfgs->lpmode_freq_threshold;
 761	}
 762
 763	/* Timeout based on DDR frequency */
 764	timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
 765
 766	/*
 767	 * The value to be set in register is "log2(timeout) - 3"
 768	 * if timeout < 16 load 0 in register
 769	 * if timeout is not a power of 2, round to next highest power of 2
 770	 */
 771	if (timeout < 16) {
 772		timeout = 0;
 773	} else {
 774		if (timeout & (timeout - 1))
 775			timeout <<= 1;
 776		timeout = __fls(timeout) - 3;
 777	}
 778
 779	switch (lpmode) {
 780	case EMIF_LP_MODE_CLOCK_STOP:
 781		shift = CS_TIM_SHIFT;
 782		mask = CS_TIM_MASK;
 783		break;
 784	case EMIF_LP_MODE_SELF_REFRESH:
 785		/* Workaround for errata i735 */
 786		if (timeout < 6)
 787			timeout = 6;
 788
 789		shift = SR_TIM_SHIFT;
 790		mask = SR_TIM_MASK;
 791		break;
 792	case EMIF_LP_MODE_PWR_DN:
 793		shift = PD_TIM_SHIFT;
 794		mask = PD_TIM_MASK;
 795		break;
 796	case EMIF_LP_MODE_DISABLE:
 797	default:
 798		mask = 0;
 799		shift = 0;
 800		break;
 801	}
 802	/* Round to maximum in case of overflow, BUT warn! */
 803	if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) {
 804		pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
 805		       lpmode,
 806		       timeout_perf,
 807		       timeout_pwr,
 808		       freq_threshold);
 809		WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
 810		     timeout, mask >> shift);
 811		timeout = mask >> shift;
 812	}
 813
 814	/* Setup required timing */
 815	pwr_mgmt_ctrl = (timeout << shift) & mask;
 816	/* setup a default mask for rest of the modes */
 817	pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) &
 818			  ~mask;
 819
 820	/* No CS_TIM in EMIF_4D5 */
 821	if (ip_rev == EMIF_4D5)
 822		pwr_mgmt_ctrl &= ~CS_TIM_MASK;
 823
 824	pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
 825
 826	return pwr_mgmt_ctrl;
 827}
 828
 829/*
 830 * Get the temperature level of the EMIF instance:
 831 * Reads the MR4 register of attached SDRAM parts to find out the temperature
 832 * level. If there are two parts attached(one on each CS), then the temperature
 833 * level for the EMIF instance is the higher of the two temperatures.
 834 */
 835static void get_temperature_level(struct emif_data *emif)
 836{
 837	u32		temp, temperature_level;
 838	void __iomem	*base;
 839
 840	base = emif->base;
 841
 842	/* Read mode register 4 */
 843	writel(DDR_MR4, base + EMIF_LPDDR2_MODE_REG_CONFIG);
 844	temperature_level = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
 845	temperature_level = (temperature_level & MR4_SDRAM_REF_RATE_MASK) >>
 846				MR4_SDRAM_REF_RATE_SHIFT;
 847
 848	if (emif->plat_data->device_info->cs1_used) {
 849		writel(DDR_MR4 | CS_MASK, base + EMIF_LPDDR2_MODE_REG_CONFIG);
 850		temp = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
 851		temp = (temp & MR4_SDRAM_REF_RATE_MASK)
 852				>> MR4_SDRAM_REF_RATE_SHIFT;
 853		temperature_level = max(temp, temperature_level);
 854	}
 855
 856	/* treat everything less than nominal(3) in MR4 as nominal */
 857	if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL))
 858		temperature_level = SDRAM_TEMP_NOMINAL;
 859
 860	/* if we get reserved value in MR4 persist with the existing value */
 861	if (likely(temperature_level != SDRAM_TEMP_RESERVED_4))
 862		emif->temperature_level = temperature_level;
 863}
 864
 865/*
 866 * Program EMIF shadow registers that are not dependent on temperature
 867 * or voltage
 868 */
 869static void setup_registers(struct emif_data *emif, struct emif_regs *regs)
 870{
 871	void __iomem	*base = emif->base;
 872
 873	writel(regs->sdram_tim2_shdw, base + EMIF_SDRAM_TIMING_2_SHDW);
 874	writel(regs->phy_ctrl_1_shdw, base + EMIF_DDR_PHY_CTRL_1_SHDW);
 875	writel(regs->pwr_mgmt_ctrl_shdw,
 876	       base + EMIF_POWER_MANAGEMENT_CTRL_SHDW);
 877
 878	/* Settings specific for EMIF4D5 */
 879	if (emif->plat_data->ip_rev != EMIF_4D5)
 880		return;
 881	writel(regs->ext_phy_ctrl_2_shdw, base + EMIF_EXT_PHY_CTRL_2_SHDW);
 882	writel(regs->ext_phy_ctrl_3_shdw, base + EMIF_EXT_PHY_CTRL_3_SHDW);
 883	writel(regs->ext_phy_ctrl_4_shdw, base + EMIF_EXT_PHY_CTRL_4_SHDW);
 884}
 885
 886/*
 887 * When voltage ramps dll calibration and forced read idle should
 888 * happen more often
 889 */
 890static void setup_volt_sensitive_regs(struct emif_data *emif,
 891		struct emif_regs *regs, u32 volt_state)
 892{
 893	u32		calib_ctrl;
 894	void __iomem	*base = emif->base;
 895
 896	/*
 897	 * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as
 898	 * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_*
 899	 * is an alias of the respective read_idle_ctrl_shdw_* (members of
 900	 * a union). So, the below code takes care of both cases
 901	 */
 902	if (volt_state == DDR_VOLTAGE_RAMPING)
 903		calib_ctrl = regs->dll_calib_ctrl_shdw_volt_ramp;
 904	else
 905		calib_ctrl = regs->dll_calib_ctrl_shdw_normal;
 906
 907	writel(calib_ctrl, base + EMIF_DLL_CALIB_CTRL_SHDW);
 908}
 909
 910/*
 911 * setup_temperature_sensitive_regs() - set the timings for temperature
 912 * sensitive registers. This happens once at initialisation time based
 913 * on the temperature at boot time and subsequently based on the temperature
 914 * alert interrupt. Temperature alert can happen when the temperature
 915 * increases or drops. So this function can have the effect of either
 916 * derating the timings or going back to nominal values.
 917 */
 918static void setup_temperature_sensitive_regs(struct emif_data *emif,
 919		struct emif_regs *regs)
 920{
 921	u32		tim1, tim3, ref_ctrl, type;
 922	void __iomem	*base = emif->base;
 923	u32		temperature;
 924
 925	type = emif->plat_data->device_info->type;
 926
 927	tim1 = regs->sdram_tim1_shdw;
 928	tim3 = regs->sdram_tim3_shdw;
 929	ref_ctrl = regs->ref_ctrl_shdw;
 930
 931	/* No de-rating for non-lpddr2 devices */
 932	if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4)
 933		goto out;
 934
 935	temperature = emif->temperature_level;
 936	if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) {
 937		ref_ctrl = regs->ref_ctrl_shdw_derated;
 938	} else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) {
 939		tim1 = regs->sdram_tim1_shdw_derated;
 940		tim3 = regs->sdram_tim3_shdw_derated;
 941		ref_ctrl = regs->ref_ctrl_shdw_derated;
 942	}
 943
 944out:
 945	writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
 946	writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
 947	writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
 948}
 949
 950static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
 951{
 952	u32		old_temp_level;
 953	irqreturn_t	ret = IRQ_HANDLED;
 954	struct emif_custom_configs *custom_configs;
 955
 956	spin_lock_irqsave(&emif_lock, irq_state);
 957	old_temp_level = emif->temperature_level;
 958	get_temperature_level(emif);
 959
 960	if (unlikely(emif->temperature_level == old_temp_level)) {
 961		goto out;
 962	} else if (!emif->curr_regs) {
 963		dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
 964		goto out;
 965	}
 966
 967	custom_configs = emif->plat_data->custom_configs;
 968
 969	/*
 970	 * IF we detect higher than "nominal rating" from DDR sensor
 971	 * on an unsupported DDR part, shutdown system
 972	 */
 973	if (custom_configs && !(custom_configs->mask &
 974				EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
 975		if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
 976			dev_err(emif->dev,
 977				"%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
 978				__func__, emif->temperature_level);
 979			/*
 980			 * Temperature far too high - do kernel_power_off()
 981			 * from thread context
 982			 */
 983			emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN;
 984			ret = IRQ_WAKE_THREAD;
 985			goto out;
 986		}
 987	}
 988
 989	if (emif->temperature_level < old_temp_level ||
 990		emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
 991		/*
 992		 * Temperature coming down - defer handling to thread OR
 993		 * Temperature far too high - do kernel_power_off() from
 994		 * thread context
 995		 */
 996		ret = IRQ_WAKE_THREAD;
 997	} else {
 998		/* Temperature is going up - handle immediately */
 999		setup_temperature_sensitive_regs(emif, emif->curr_regs);
1000		do_freq_update();
1001	}
1002
1003out:
1004	spin_unlock_irqrestore(&emif_lock, irq_state);
1005	return ret;
1006}
1007
1008static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
1009{
1010	u32			interrupts;
1011	struct emif_data	*emif = dev_id;
1012	void __iomem		*base = emif->base;
1013	struct device		*dev = emif->dev;
1014	irqreturn_t		ret = IRQ_HANDLED;
1015
1016	/* Save the status and clear it */
1017	interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
1018	writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
1019
1020	/*
1021	 * Handle temperature alert
1022	 * Temperature alert should be same for all ports
1023	 * So, it's enough to process it only for one of the ports
1024	 */
1025	if (interrupts & TA_SYS_MASK)
1026		ret = handle_temp_alert(base, emif);
1027
1028	if (interrupts & ERR_SYS_MASK)
1029		dev_err(dev, "Access error from SYS port - %x\n", interrupts);
1030
1031	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
1032		/* Save the status and clear it */
1033		interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS);
1034		writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS);
1035
1036		if (interrupts & ERR_LL_MASK)
1037			dev_err(dev, "Access error from LL port - %x\n",
1038				interrupts);
1039	}
1040
1041	return ret;
1042}
1043
1044static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
1045{
1046	struct emif_data	*emif = dev_id;
1047
1048	if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
1049		dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
1050
1051		/* If we have Power OFF ability, use it, else try restarting */
1052		if (pm_power_off) {
1053			kernel_power_off();
1054		} else {
1055			WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
1056			kernel_restart("SDRAM Over-temp Emergency restart");
1057		}
1058		return IRQ_HANDLED;
1059	}
1060
1061	spin_lock_irqsave(&emif_lock, irq_state);
1062
1063	if (emif->curr_regs) {
1064		setup_temperature_sensitive_regs(emif, emif->curr_regs);
1065		do_freq_update();
1066	} else {
1067		dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
1068	}
1069
1070	spin_unlock_irqrestore(&emif_lock, irq_state);
1071
1072	return IRQ_HANDLED;
1073}
1074
1075static void clear_all_interrupts(struct emif_data *emif)
1076{
1077	void __iomem	*base = emif->base;
1078
1079	writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS),
1080		base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
1081	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
1082		writel(readl(base + EMIF_LL_OCP_INTERRUPT_STATUS),
1083			base + EMIF_LL_OCP_INTERRUPT_STATUS);
1084}
1085
1086static void disable_and_clear_all_interrupts(struct emif_data *emif)
1087{
1088	void __iomem		*base = emif->base;
1089
1090	/* Disable all interrupts */
1091	writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET),
1092		base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR);
1093	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
1094		writel(readl(base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET),
1095			base + EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR);
1096
1097	/* Clear all interrupts */
1098	clear_all_interrupts(emif);
1099}
1100
1101static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
1102{
1103	u32		interrupts, type;
1104	void __iomem	*base = emif->base;
1105
1106	type = emif->plat_data->device_info->type;
1107
1108	clear_all_interrupts(emif);
1109
1110	/* Enable interrupts for SYS interface */
1111	interrupts = EN_ERR_SYS_MASK;
1112	if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4)
1113		interrupts |= EN_TA_SYS_MASK;
1114	writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET);
1115
1116	/* Enable interrupts for LL interface */
1117	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
1118		/* TA need not be enabled for LL */
1119		interrupts = EN_ERR_LL_MASK;
1120		writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET);
1121	}
1122
1123	/* setup IRQ handlers */
1124	return devm_request_threaded_irq(emif->dev, irq,
1125				    emif_interrupt_handler,
1126				    emif_threaded_isr,
1127				    0, dev_name(emif->dev),
1128				    emif);
1129
1130}
1131
1132static void __init_or_module emif_onetime_settings(struct emif_data *emif)
1133{
1134	u32				pwr_mgmt_ctrl, zq, temp_alert_cfg;
1135	void __iomem			*base = emif->base;
1136	const struct lpddr2_addressing	*addressing;
1137	const struct ddr_device_info	*device_info;
1138
1139	device_info = emif->plat_data->device_info;
1140	addressing = get_addressing_table(device_info);
1141
1142	/*
1143	 * Init power management settings
1144	 * We don't know the frequency yet. Use a high frequency
1145	 * value for a conservative timeout setting
1146	 */
1147	pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif,
1148			emif->plat_data->ip_rev);
1149	emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT;
1150	writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL);
1151
1152	/* Init ZQ calibration settings */
1153	zq = get_zq_config_reg(addressing, device_info->cs1_used,
1154		device_info->cal_resistors_per_cs);
1155	writel(zq, base + EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG);
1156
1157	/* Check temperature level temperature level*/
1158	get_temperature_level(emif);
1159	if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
1160		dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
1161
1162	/* Init temperature polling */
1163	temp_alert_cfg = get_temp_alert_config(addressing,
1164		emif->plat_data->custom_configs, device_info->cs1_used,
1165		device_info->io_width, get_emif_bus_width(emif));
1166	writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG);
1167
1168	/*
1169	 * Program external PHY control registers that are not frequency
1170	 * dependent
1171	 */
1172	if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY)
1173		return;
1174	writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW);
1175	writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW);
1176	writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW);
1177	writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW);
1178	writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW);
1179	writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW);
1180	writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW);
1181	writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW);
1182	writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW);
1183	writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW);
1184	writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW);
1185	writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW);
1186	writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW);
1187	writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW);
1188	writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW);
1189	writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW);
1190	writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW);
1191	writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW);
1192	writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW);
1193	writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW);
1194	writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW);
1195}
1196
1197static void get_default_timings(struct emif_data *emif)
1198{
1199	struct emif_platform_data *pd = emif->plat_data;
1200
1201	pd->timings		= lpddr2_jedec_timings;
1202	pd->timings_arr_size	= ARRAY_SIZE(lpddr2_jedec_timings);
1203
1204	dev_warn(emif->dev, "%s: using default timings\n", __func__);
1205}
1206
1207static int is_dev_data_valid(u32 type, u32 density, u32 io_width, u32 phy_type,
1208		u32 ip_rev, struct device *dev)
1209{
1210	int valid;
1211
1212	valid = (type == DDR_TYPE_LPDDR2_S4 ||
1213			type == DDR_TYPE_LPDDR2_S2)
1214		&& (density >= DDR_DENSITY_64Mb
1215			&& density <= DDR_DENSITY_8Gb)
1216		&& (io_width >= DDR_IO_WIDTH_8
1217			&& io_width <= DDR_IO_WIDTH_32);
1218
1219	/* Combinations of EMIF and PHY revisions that we support today */
1220	switch (ip_rev) {
1221	case EMIF_4D:
1222		valid = valid && (phy_type == EMIF_PHY_TYPE_ATTILAPHY);
1223		break;
1224	case EMIF_4D5:
1225		valid = valid && (phy_type == EMIF_PHY_TYPE_INTELLIPHY);
1226		break;
1227	default:
1228		valid = 0;
1229	}
1230
1231	if (!valid)
1232		dev_err(dev, "%s: invalid DDR details\n", __func__);
1233	return valid;
1234}
1235
1236static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
1237		struct device *dev)
1238{
1239	int valid = 1;
1240
1241	if ((cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE) &&
1242		(cust_cfgs->lpmode != EMIF_LP_MODE_DISABLE))
1243		valid = cust_cfgs->lpmode_freq_threshold &&
1244			cust_cfgs->lpmode_timeout_performance &&
1245			cust_cfgs->lpmode_timeout_power;
1246
1247	if (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL)
1248		valid = valid && cust_cfgs->temp_alert_poll_interval_ms;
1249
1250	if (!valid)
1251		dev_warn(dev, "%s: invalid custom configs\n", __func__);
1252
1253	return valid;
1254}
1255
1256#if defined(CONFIG_OF)
1257static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
1258		struct emif_data *emif)
1259{
1260	struct emif_custom_configs	*cust_cfgs = NULL;
1261	int				len;
1262	const __be32			*lpmode, *poll_intvl;
1263
1264	lpmode = of_get_property(np_emif, "low-power-mode", &len);
1265	poll_intvl = of_get_property(np_emif, "temp-alert-poll-interval", &len);
1266
1267	if (lpmode || poll_intvl)
1268		cust_cfgs = devm_kzalloc(emif->dev, sizeof(*cust_cfgs),
1269			GFP_KERNEL);
1270
1271	if (!cust_cfgs)
1272		return;
1273
1274	if (lpmode) {
1275		cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_LPMODE;
1276		cust_cfgs->lpmode = be32_to_cpup(lpmode);
1277		of_property_read_u32(np_emif,
1278				"low-power-mode-timeout-performance",
1279				&cust_cfgs->lpmode_timeout_performance);
1280		of_property_read_u32(np_emif,
1281				"low-power-mode-timeout-power",
1282				&cust_cfgs->lpmode_timeout_power);
1283		of_property_read_u32(np_emif,
1284				"low-power-mode-freq-threshold",
1285				&cust_cfgs->lpmode_freq_threshold);
1286	}
1287
1288	if (poll_intvl) {
1289		cust_cfgs->mask |=
1290				EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL;
1291		cust_cfgs->temp_alert_poll_interval_ms =
1292						be32_to_cpup(poll_intvl);
1293	}
1294
1295	if (of_find_property(np_emif, "extended-temp-part", &len))
1296		cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART;
1297
1298	if (!is_custom_config_valid(cust_cfgs, emif->dev)) {
1299		devm_kfree(emif->dev, cust_cfgs);
1300		return;
1301	}
1302
1303	emif->plat_data->custom_configs = cust_cfgs;
1304}
1305
1306static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
1307		struct device_node *np_ddr,
1308		struct ddr_device_info *dev_info)
1309{
1310	u32 density = 0, io_width = 0;
1311	int len;
1312
1313	if (of_find_property(np_emif, "cs1-used", &len))
1314		dev_info->cs1_used = true;
1315
1316	if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
1317		dev_info->cal_resistors_per_cs = true;
1318
1319	if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
1320		dev_info->type = DDR_TYPE_LPDDR2_S4;
1321	else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
1322		dev_info->type = DDR_TYPE_LPDDR2_S2;
1323
1324	of_property_read_u32(np_ddr, "density", &density);
1325	of_property_read_u32(np_ddr, "io-width", &io_width);
1326
1327	/* Convert from density in Mb to the density encoding in jedc_ddr.h */
1328	if (density & (density - 1))
1329		dev_info->density = 0;
1330	else
1331		dev_info->density = __fls(density) - 5;
1332
1333	/* Convert from io_width in bits to io_width encoding in jedc_ddr.h */
1334	if (io_width & (io_width - 1))
1335		dev_info->io_width = 0;
1336	else
1337		dev_info->io_width = __fls(io_width) - 1;
1338}
1339
1340static struct emif_data * __init_or_module of_get_memory_device_details(
1341		struct device_node *np_emif, struct device *dev)
1342{
1343	struct emif_data		*emif = NULL;
1344	struct ddr_device_info		*dev_info = NULL;
1345	struct emif_platform_data	*pd = NULL;
1346	struct device_node		*np_ddr;
1347	int				len;
1348
1349	np_ddr = of_parse_phandle(np_emif, "device-handle", 0);
1350	if (!np_ddr)
1351		goto error;
1352	emif	= devm_kzalloc(dev, sizeof(struct emif_data), GFP_KERNEL);
1353	pd	= devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1354	dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
1355
1356	if (!emif || !pd || !dev_info) {
1357		dev_err(dev, "%s: Out of memory!!\n",
1358			__func__);
1359		goto error;
1360	}
1361
1362	emif->plat_data		= pd;
1363	pd->device_info		= dev_info;
1364	emif->dev		= dev;
1365	emif->np_ddr		= np_ddr;
1366	emif->temperature_level	= SDRAM_TEMP_NOMINAL;
1367
1368	if (of_device_is_compatible(np_emif, "ti,emif-4d"))
1369		emif->plat_data->ip_rev = EMIF_4D;
1370	else if (of_device_is_compatible(np_emif, "ti,emif-4d5"))
1371		emif->plat_data->ip_rev = EMIF_4D5;
1372
1373	of_property_read_u32(np_emif, "phy-type", &pd->phy_type);
1374
1375	if (of_find_property(np_emif, "hw-caps-ll-interface", &len))
1376		pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
1377
1378	of_get_ddr_info(np_emif, np_ddr, dev_info);
1379	if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density,
1380			pd->device_info->io_width, pd->phy_type, pd->ip_rev,
1381			emif->dev)) {
1382		dev_err(dev, "%s: invalid device data!!\n", __func__);
1383		goto error;
1384	}
1385	/*
1386	 * For EMIF instances other than EMIF1 see if the devices connected
1387	 * are exactly same as on EMIF1(which is typically the case). If so,
1388	 * mark it as a duplicate of EMIF1. This will save some memory and
1389	 * computation.
1390	 */
1391	if (emif1 && emif1->np_ddr == np_ddr) {
1392		emif->duplicate = true;
1393		goto out;
1394	} else if (emif1) {
1395		dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
1396			__func__);
1397	}
1398
1399	of_get_custom_configs(np_emif, emif);
1400	emif->plat_data->timings = of_get_ddr_timings(np_ddr, emif->dev,
1401					emif->plat_data->device_info->type,
1402					&emif->plat_data->timings_arr_size);
1403
1404	emif->plat_data->min_tck = of_get_min_tck(np_ddr, emif->dev);
1405	goto out;
1406
1407error:
1408	return NULL;
1409out:
1410	return emif;
1411}
1412
1413#else
1414
1415static struct emif_data * __init_or_module of_get_memory_device_details(
1416		struct device_node *np_emif, struct device *dev)
1417{
1418	return NULL;
1419}
1420#endif
1421
1422static struct emif_data *__init_or_module get_device_details(
1423		struct platform_device *pdev)
1424{
1425	u32				size;
1426	struct emif_data		*emif = NULL;
1427	struct ddr_device_info		*dev_info;
1428	struct emif_custom_configs	*cust_cfgs;
1429	struct emif_platform_data	*pd;
1430	struct device			*dev;
1431	void				*temp;
1432
1433	pd = pdev->dev.platform_data;
1434	dev = &pdev->dev;
1435
1436	if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type,
1437			pd->device_info->density, pd->device_info->io_width,
1438			pd->phy_type, pd->ip_rev, dev))) {
1439		dev_err(dev, "%s: invalid device data\n", __func__);
1440		goto error;
1441	}
1442
1443	emif	= devm_kzalloc(dev, sizeof(*emif), GFP_KERNEL);
1444	temp	= devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1445	dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
1446
1447	if (!emif || !pd || !dev_info) {
1448		dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__);
1449		goto error;
1450	}
1451
1452	memcpy(temp, pd, sizeof(*pd));
1453	pd = temp;
1454	memcpy(dev_info, pd->device_info, sizeof(*dev_info));
1455
1456	pd->device_info		= dev_info;
1457	emif->plat_data		= pd;
1458	emif->dev		= dev;
1459	emif->temperature_level	= SDRAM_TEMP_NOMINAL;
1460
1461	/*
1462	 * For EMIF instances other than EMIF1 see if the devices connected
1463	 * are exactly same as on EMIF1(which is typically the case). If so,
1464	 * mark it as a duplicate of EMIF1 and skip copying timings data.
1465	 * This will save some memory and some computation later.
1466	 */
1467	emif->duplicate = emif1 && (memcmp(dev_info,
1468		emif1->plat_data->device_info,
1469		sizeof(struct ddr_device_info)) == 0);
1470
1471	if (emif->duplicate) {
1472		pd->timings = NULL;
1473		pd->min_tck = NULL;
1474		goto out;
1475	} else if (emif1) {
1476		dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
1477			__func__);
1478	}
1479
1480	/*
1481	 * Copy custom configs - ignore allocation error, if any, as
1482	 * custom_configs is not very critical
1483	 */
1484	cust_cfgs = pd->custom_configs;
1485	if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) {
1486		temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
1487		if (temp)
1488			memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
1489		else
1490			dev_warn(dev, "%s:%d: allocation error\n", __func__,
1491				__LINE__);
1492		pd->custom_configs = temp;
1493	}
1494
1495	/*
1496	 * Copy timings and min-tck values from platform data. If it is not
1497	 * available or if memory allocation fails, use JEDEC defaults
1498	 */
1499	size = sizeof(struct lpddr2_timings) * pd->timings_arr_size;
1500	if (pd->timings) {
1501		temp = devm_kzalloc(dev, size, GFP_KERNEL);
1502		if (temp) {
1503			memcpy(temp, pd->timings, size);
1504			pd->timings = temp;
1505		} else {
1506			dev_warn(dev, "%s:%d: allocation error\n", __func__,
1507				__LINE__);
1508			get_default_timings(emif);
1509		}
1510	} else {
1511		get_default_timings(emif);
1512	}
1513
1514	if (pd->min_tck) {
1515		temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL);
1516		if (temp) {
1517			memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
1518			pd->min_tck = temp;
1519		} else {
1520			dev_warn(dev, "%s:%d: allocation error\n", __func__,
1521				__LINE__);
1522			pd->min_tck = &lpddr2_jedec_min_tck;
1523		}
1524	} else {
1525		pd->min_tck = &lpddr2_jedec_min_tck;
1526	}
1527
1528out:
1529	return emif;
1530
1531error:
1532	return NULL;
1533}
1534
1535static int __init_or_module emif_probe(struct platform_device *pdev)
1536{
1537	struct emif_data	*emif;
1538	struct resource		*res;
1539	int			irq;
1540
1541	if (pdev->dev.of_node)
1542		emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
1543	else
1544		emif = get_device_details(pdev);
1545
1546	if (!emif) {
1547		pr_err("%s: error getting device data\n", __func__);
1548		goto error;
1549	}
1550
1551	list_add(&emif->node, &device_list);
1552	emif->addressing = get_addressing_table(emif->plat_data->device_info);
1553
1554	/* Save pointers to each other in emif and device structures */
1555	emif->dev = &pdev->dev;
1556	platform_set_drvdata(pdev, emif);
1557
1558	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1559	emif->base = devm_ioremap_resource(emif->dev, res);
1560	if (IS_ERR(emif->base))
1561		goto error;
1562
1563	irq = platform_get_irq(pdev, 0);
1564	if (irq < 0)
1565		goto error;
1566
1567	emif_onetime_settings(emif);
1568	emif_debugfs_init(emif);
1569	disable_and_clear_all_interrupts(emif);
1570	setup_interrupts(emif, irq);
 
 
1571
1572	/* One-time actions taken on probing the first device */
1573	if (!emif1) {
1574		emif1 = emif;
1575		spin_lock_init(&emif_lock);
1576
1577		/*
1578		 * TODO: register notifiers for frequency and voltage
1579		 * change here once the respective frameworks are
1580		 * available
1581		 */
1582	}
1583
1584	dev_info(&pdev->dev, "%s: device configured with addr = %p and IRQ%d\n",
1585		__func__, emif->base, irq);
1586
1587	return 0;
1588error:
1589	return -ENODEV;
1590}
1591
1592static int __exit emif_remove(struct platform_device *pdev)
1593{
1594	struct emif_data *emif = platform_get_drvdata(pdev);
1595
1596	emif_debugfs_exit(emif);
1597
1598	return 0;
1599}
1600
1601static void emif_shutdown(struct platform_device *pdev)
1602{
1603	struct emif_data	*emif = platform_get_drvdata(pdev);
1604
1605	disable_and_clear_all_interrupts(emif);
1606}
1607
1608static int get_emif_reg_values(struct emif_data *emif, u32 freq,
1609		struct emif_regs *regs)
1610{
1611	u32				ip_rev, phy_type;
1612	u32				cl, type;
1613	const struct lpddr2_timings	*timings;
1614	const struct lpddr2_min_tck	*min_tck;
1615	const struct ddr_device_info	*device_info;
1616	const struct lpddr2_addressing	*addressing;
1617	struct emif_data		*emif_for_calc;
1618	struct device			*dev;
1619
1620	dev = emif->dev;
1621	/*
1622	 * If the devices on this EMIF instance is duplicate of EMIF1,
1623	 * use EMIF1 details for the calculation
1624	 */
1625	emif_for_calc	= emif->duplicate ? emif1 : emif;
1626	timings		= get_timings_table(emif_for_calc, freq);
1627	addressing	= emif_for_calc->addressing;
1628	if (!timings || !addressing) {
1629		dev_err(dev, "%s: not enough data available for %dHz",
1630			__func__, freq);
1631		return -1;
1632	}
1633
1634	device_info	= emif_for_calc->plat_data->device_info;
1635	type		= device_info->type;
1636	ip_rev		= emif_for_calc->plat_data->ip_rev;
1637	phy_type	= emif_for_calc->plat_data->phy_type;
1638
1639	min_tck		= emif_for_calc->plat_data->min_tck;
1640
1641	set_ddr_clk_period(freq);
1642
1643	regs->ref_ctrl_shdw = get_sdram_ref_ctrl_shdw(freq, addressing);
1644	regs->sdram_tim1_shdw = get_sdram_tim_1_shdw(timings, min_tck,
1645			addressing);
1646	regs->sdram_tim2_shdw = get_sdram_tim_2_shdw(timings, min_tck,
1647			addressing, type);
1648	regs->sdram_tim3_shdw = get_sdram_tim_3_shdw(timings, min_tck,
1649		addressing, type, ip_rev, EMIF_NORMAL_TIMINGS);
1650
1651	cl = get_cl(emif);
1652
1653	if (phy_type == EMIF_PHY_TYPE_ATTILAPHY && ip_rev == EMIF_4D) {
1654		regs->phy_ctrl_1_shdw = get_ddr_phy_ctrl_1_attilaphy_4d(
1655			timings, freq, cl);
1656	} else if (phy_type == EMIF_PHY_TYPE_INTELLIPHY && ip_rev == EMIF_4D5) {
1657		regs->phy_ctrl_1_shdw = get_phy_ctrl_1_intelliphy_4d5(freq, cl);
1658		regs->ext_phy_ctrl_2_shdw = get_ext_phy_ctrl_2_intelliphy_4d5();
1659		regs->ext_phy_ctrl_3_shdw = get_ext_phy_ctrl_3_intelliphy_4d5();
1660		regs->ext_phy_ctrl_4_shdw = get_ext_phy_ctrl_4_intelliphy_4d5();
1661	} else {
1662		return -1;
1663	}
1664
1665	/* Only timeout values in pwr_mgmt_ctrl_shdw register */
1666	regs->pwr_mgmt_ctrl_shdw =
1667		get_pwr_mgmt_ctrl(freq, emif_for_calc, ip_rev) &
1668		(CS_TIM_MASK | SR_TIM_MASK | PD_TIM_MASK);
1669
1670	if (ip_rev & EMIF_4D) {
1671		regs->read_idle_ctrl_shdw_normal =
1672			get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE);
1673
1674		regs->read_idle_ctrl_shdw_volt_ramp =
1675			get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING);
1676	} else if (ip_rev & EMIF_4D5) {
1677		regs->dll_calib_ctrl_shdw_normal =
1678			get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE);
1679
1680		regs->dll_calib_ctrl_shdw_volt_ramp =
1681			get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING);
1682	}
1683
1684	if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
1685		regs->ref_ctrl_shdw_derated = get_sdram_ref_ctrl_shdw(freq / 4,
1686			addressing);
1687
1688		regs->sdram_tim1_shdw_derated =
1689			get_sdram_tim_1_shdw_derated(timings, min_tck,
1690				addressing);
1691
1692		regs->sdram_tim3_shdw_derated = get_sdram_tim_3_shdw(timings,
1693			min_tck, addressing, type, ip_rev,
1694			EMIF_DERATED_TIMINGS);
1695	}
1696
1697	regs->freq = freq;
1698
1699	return 0;
1700}
1701
1702/*
1703 * get_regs() - gets the cached emif_regs structure for a given EMIF instance
1704 * given frequency(freq):
1705 *
1706 * As an optimisation, every EMIF instance other than EMIF1 shares the
1707 * register cache with EMIF1 if the devices connected on this instance
1708 * are same as that on EMIF1(indicated by the duplicate flag)
1709 *
1710 * If we do not have an entry corresponding to the frequency given, we
1711 * allocate a new entry and calculate the values
1712 *
1713 * Upon finding the right reg dump, save it in curr_regs. It can be
1714 * directly used for thermal de-rating and voltage ramping changes.
1715 */
1716static struct emif_regs *get_regs(struct emif_data *emif, u32 freq)
1717{
1718	int			i;
1719	struct emif_regs	**regs_cache;
1720	struct emif_regs	*regs = NULL;
1721	struct device		*dev;
1722
1723	dev = emif->dev;
1724	if (emif->curr_regs && emif->curr_regs->freq == freq) {
1725		dev_dbg(dev, "%s: using curr_regs - %u Hz", __func__, freq);
1726		return emif->curr_regs;
1727	}
1728
1729	if (emif->duplicate)
1730		regs_cache = emif1->regs_cache;
1731	else
1732		regs_cache = emif->regs_cache;
1733
1734	for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
1735		if (regs_cache[i]->freq == freq) {
1736			regs = regs_cache[i];
1737			dev_dbg(dev,
1738				"%s: reg dump found in reg cache for %u Hz\n",
1739				__func__, freq);
1740			break;
1741		}
1742	}
1743
1744	/*
1745	 * If we don't have an entry for this frequency in the cache create one
1746	 * and calculate the values
1747	 */
1748	if (!regs) {
1749		regs = devm_kzalloc(emif->dev, sizeof(*regs), GFP_ATOMIC);
1750		if (!regs)
1751			return NULL;
1752
1753		if (get_emif_reg_values(emif, freq, regs)) {
1754			devm_kfree(emif->dev, regs);
1755			return NULL;
1756		}
1757
1758		/*
1759		 * Now look for an un-used entry in the cache and save the
1760		 * newly created struct. If there are no free entries
1761		 * over-write the last entry
1762		 */
1763		for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++)
1764			;
1765
1766		if (i >= EMIF_MAX_NUM_FREQUENCIES) {
1767			dev_warn(dev, "%s: regs_cache full - reusing a slot!!\n",
1768				__func__);
1769			i = EMIF_MAX_NUM_FREQUENCIES - 1;
1770			devm_kfree(emif->dev, regs_cache[i]);
1771		}
1772		regs_cache[i] = regs;
1773	}
1774
1775	return regs;
1776}
1777
1778static void do_volt_notify_handling(struct emif_data *emif, u32 volt_state)
1779{
1780	dev_dbg(emif->dev, "%s: voltage notification : %d", __func__,
1781		volt_state);
1782
1783	if (!emif->curr_regs) {
1784		dev_err(emif->dev,
1785			"%s: volt-notify before registers are ready: %d\n",
1786			__func__, volt_state);
1787		return;
1788	}
1789
1790	setup_volt_sensitive_regs(emif, emif->curr_regs, volt_state);
1791}
1792
1793/*
1794 * TODO: voltage notify handling should be hooked up to
1795 * regulator framework as soon as the necessary support
1796 * is available in mainline kernel. This function is un-used
1797 * right now.
1798 */
1799static void __attribute__((unused)) volt_notify_handling(u32 volt_state)
1800{
1801	struct emif_data *emif;
1802
1803	spin_lock_irqsave(&emif_lock, irq_state);
1804
1805	list_for_each_entry(emif, &device_list, node)
1806		do_volt_notify_handling(emif, volt_state);
1807	do_freq_update();
1808
1809	spin_unlock_irqrestore(&emif_lock, irq_state);
1810}
1811
1812static void do_freq_pre_notify_handling(struct emif_data *emif, u32 new_freq)
1813{
1814	struct emif_regs *regs;
1815
1816	regs = get_regs(emif, new_freq);
1817	if (!regs)
1818		return;
1819
1820	emif->curr_regs = regs;
1821
1822	/*
1823	 * Update the shadow registers:
1824	 * Temperature and voltage-ramp sensitive settings are also configured
1825	 * in terms of DDR cycles. So, we need to update them too when there
1826	 * is a freq change
1827	 */
1828	dev_dbg(emif->dev, "%s: setting up shadow registers for %uHz",
1829		__func__, new_freq);
1830	setup_registers(emif, regs);
1831	setup_temperature_sensitive_regs(emif, regs);
1832	setup_volt_sensitive_regs(emif, regs, DDR_VOLTAGE_STABLE);
1833
1834	/*
1835	 * Part of workaround for errata i728. See do_freq_update()
1836	 * for more details
1837	 */
1838	if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
1839		set_lpmode(emif, EMIF_LP_MODE_DISABLE);
1840}
1841
1842/*
1843 * TODO: frequency notify handling should be hooked up to
1844 * clock framework as soon as the necessary support is
1845 * available in mainline kernel. This function is un-used
1846 * right now.
1847 */
1848static void __attribute__((unused)) freq_pre_notify_handling(u32 new_freq)
1849{
1850	struct emif_data *emif;
1851
1852	/*
1853	 * NOTE: we are taking the spin-lock here and releases it
1854	 * only in post-notifier. This doesn't look good and
1855	 * Sparse complains about it, but this seems to be
1856	 * un-avoidable. We need to lock a sequence of events
1857	 * that is split between EMIF and clock framework.
1858	 *
1859	 * 1. EMIF driver updates EMIF timings in shadow registers in the
1860	 *    frequency pre-notify callback from clock framework
1861	 * 2. clock framework sets up the registers for the new frequency
1862	 * 3. clock framework initiates a hw-sequence that updates
1863	 *    the frequency EMIF timings synchronously.
1864	 *
1865	 * All these 3 steps should be performed as an atomic operation
1866	 * vis-a-vis similar sequence in the EMIF interrupt handler
1867	 * for temperature events. Otherwise, there could be race
1868	 * conditions that could result in incorrect EMIF timings for
1869	 * a given frequency
1870	 */
1871	spin_lock_irqsave(&emif_lock, irq_state);
1872
1873	list_for_each_entry(emif, &device_list, node)
1874		do_freq_pre_notify_handling(emif, new_freq);
1875}
1876
1877static void do_freq_post_notify_handling(struct emif_data *emif)
1878{
1879	/*
1880	 * Part of workaround for errata i728. See do_freq_update()
1881	 * for more details
1882	 */
1883	if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
1884		set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
1885}
1886
1887/*
1888 * TODO: frequency notify handling should be hooked up to
1889 * clock framework as soon as the necessary support is
1890 * available in mainline kernel. This function is un-used
1891 * right now.
1892 */
1893static void __attribute__((unused)) freq_post_notify_handling(void)
1894{
1895	struct emif_data *emif;
1896
1897	list_for_each_entry(emif, &device_list, node)
1898		do_freq_post_notify_handling(emif);
1899
1900	/*
1901	 * Lock is done in pre-notify handler. See freq_pre_notify_handling()
1902	 * for more details
1903	 */
1904	spin_unlock_irqrestore(&emif_lock, irq_state);
1905}
1906
1907#if defined(CONFIG_OF)
1908static const struct of_device_id emif_of_match[] = {
1909		{ .compatible = "ti,emif-4d" },
1910		{ .compatible = "ti,emif-4d5" },
1911		{},
1912};
1913MODULE_DEVICE_TABLE(of, emif_of_match);
1914#endif
1915
1916static struct platform_driver emif_driver = {
1917	.remove		= __exit_p(emif_remove),
1918	.shutdown	= emif_shutdown,
1919	.driver = {
1920		.name = "emif",
1921		.of_match_table = of_match_ptr(emif_of_match),
1922	},
1923};
1924
1925module_platform_driver_probe(emif_driver, emif_probe);
1926
1927MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
1928MODULE_LICENSE("GPL");
1929MODULE_ALIAS("platform:emif");
1930MODULE_AUTHOR("Texas Instruments Inc");