Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Qualcomm External Bus Interface 2 (EBI2) driver
  4 * an older version of the Qualcomm Parallel Interface Controller (QPIC)
  5 *
  6 * Copyright (C) 2016 Linaro Ltd.
  7 *
  8 * Author: Linus Walleij <linus.walleij@linaro.org>
  9 *
 
 
 
 
 10 * See the device tree bindings for this block for more details on the
 11 * hardware.
 12 */
 13
 14#include <linux/module.h>
 15#include <linux/clk.h>
 16#include <linux/err.h>
 17#include <linux/io.h>
 18#include <linux/of.h>
 19#include <linux/of_platform.h>
 20#include <linux/init.h>
 
 21#include <linux/slab.h>
 22#include <linux/platform_device.h>
 23#include <linux/bitops.h>
 24
 25/*
 26 * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
 27 */
 28#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
 29#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
 30#define EBI2_CS2_ENABLE_MASK BIT(4)
 31#define EBI2_CS3_ENABLE_MASK BIT(5)
 32#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
 33#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
 34#define EBI2_CSN_MASK GENMASK(9, 0)
 35
 36#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
 37
 38/*
 39 * SLOW CSn CFG
 40 *
 41 * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
 42 *             memory continues to drive the data bus after OE is de-asserted.
 43 *             Inserted when reading one CS and switching to another CS or read
 44 *             followed by write on the same CS. Valid values 0 thru 15.
 45 * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
 46 *             every write minimum 1. The data out is driven from the time WE is
 47 *             asserted until CS is asserted. With a hold of 1, the CS stays
 48 *             active for 1 extra cycle etc. Valid values 0 thru 15.
 49 * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
 50 *             write to a page or burst memory
 51 * Bits 15-8:  RD_DELTA initial latency for read cycles inserted for the first
 52 *             read to a page or burst memory
 53 * Bits 7-4:   WR_WAIT number of wait cycles for every write access, 0=1 cycle
 54 *             so 1 thru 16 cycles.
 55 * Bits 3-0:   RD_WAIT number of wait cycles for every read access, 0=1 cycle
 56 *             so 1 thru 16 cycles.
 57 */
 58#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
 59#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
 60#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
 61#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
 62#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
 63#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
 64
 65#define EBI2_XMEM_RECOVERY_SHIFT	28
 66#define EBI2_XMEM_WR_HOLD_SHIFT		24
 67#define EBI2_XMEM_WR_DELTA_SHIFT	16
 68#define EBI2_XMEM_RD_DELTA_SHIFT	8
 69#define EBI2_XMEM_WR_WAIT_SHIFT		4
 70#define EBI2_XMEM_RD_WAIT_SHIFT		0
 71
 72/*
 73 * FAST CSn CFG
 74 * Bits 31-28: ?
 75 * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
 76 *             transfer. For a single read trandfer this will be the time
 77 *             from CS assertion to OE assertion.
 78 * Bits 18-24: ?
 79 * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
 80 *             assertion, with respect to the cycle where ADV is asserted.
 81 *             2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
 82 * Bits 5:     ADDR_HOLD_ENA, The address is held for an extra cycle to meet
 83 *             hold time requirements with ADV assertion.
 84 *
 85 * The manual mentions "write precharge cycles" and "precharge cycles".
 86 * We have not been able to figure out which bit fields these correspond to
 87 * in the hardware, or what valid values exist. The current hypothesis is that
 88 * this is something just used on the FAST chip selects. There is also a "byte
 89 * device enable" flag somewhere for 8bit memories.
 90 */
 91#define EBI2_XMEM_CS0_FAST_CFG 0x0028
 92#define EBI2_XMEM_CS1_FAST_CFG 0x002C
 93#define EBI2_XMEM_CS2_FAST_CFG 0x0030
 94#define EBI2_XMEM_CS3_FAST_CFG 0x0034
 95#define EBI2_XMEM_CS4_FAST_CFG 0x0038
 96#define EBI2_XMEM_CS5_FAST_CFG 0x003C
 97
 98#define EBI2_XMEM_RD_HOLD_SHIFT		24
 99#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT	16
100#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT	5
101
102/**
103 * struct cs_data - struct with info on a chipselect setting
104 * @enable_mask: mask to enable the chipselect in the EBI2 config
105 * @slow_cfg: offset to XMEMC slow CS config
106 * @fast_cfg: offset to XMEMC fast CS config
107 */
108struct cs_data {
109	u32 enable_mask;
110	u16 slow_cfg;
111	u16 fast_cfg;
112};
113
114static const struct cs_data cs_info[] = {
115	{
116		/* CS0 */
117		.enable_mask = EBI2_CS0_ENABLE_MASK,
118		.slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
119		.fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
120	},
121	{
122		/* CS1 */
123		.enable_mask = EBI2_CS1_ENABLE_MASK,
124		.slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
125		.fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
126	},
127	{
128		/* CS2 */
129		.enable_mask = EBI2_CS2_ENABLE_MASK,
130		.slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
131		.fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
132	},
133	{
134		/* CS3 */
135		.enable_mask = EBI2_CS3_ENABLE_MASK,
136		.slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
137		.fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
138	},
139	{
140		/* CS4 */
141		.enable_mask = EBI2_CS4_ENABLE_MASK,
142		.slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
143		.fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
144	},
145	{
146		/* CS5 */
147		.enable_mask = EBI2_CS5_ENABLE_MASK,
148		.slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
149		.fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
150	},
151};
152
153/**
154 * struct ebi2_xmem_prop - describes an XMEM config property
155 * @prop: the device tree binding name
156 * @max: maximum value for the property
157 * @slowreg: true if this property is in the SLOW CS config register
158 * else it is assumed to be in the FAST config register
159 * @shift: the bit field start in the SLOW or FAST register for this
160 * property
161 */
162struct ebi2_xmem_prop {
163	const char *prop;
164	u32 max;
165	bool slowreg;
166	u16 shift;
167};
168
169static const struct ebi2_xmem_prop xmem_props[] = {
170	{
171		.prop = "qcom,xmem-recovery-cycles",
172		.max = 15,
173		.slowreg = true,
174		.shift = EBI2_XMEM_RECOVERY_SHIFT,
175	},
176	{
177		.prop = "qcom,xmem-write-hold-cycles",
178		.max = 15,
179		.slowreg = true,
180		.shift = EBI2_XMEM_WR_HOLD_SHIFT,
181	},
182	{
183		.prop = "qcom,xmem-write-delta-cycles",
184		.max = 255,
185		.slowreg = true,
186		.shift = EBI2_XMEM_WR_DELTA_SHIFT,
187	},
188	{
189		.prop = "qcom,xmem-read-delta-cycles",
190		.max = 255,
191		.slowreg = true,
192		.shift = EBI2_XMEM_RD_DELTA_SHIFT,
193	},
194	{
195		.prop = "qcom,xmem-write-wait-cycles",
196		.max = 15,
197		.slowreg = true,
198		.shift = EBI2_XMEM_WR_WAIT_SHIFT,
199	},
200	{
201		.prop = "qcom,xmem-read-wait-cycles",
202		.max = 15,
203		.slowreg = true,
204		.shift = EBI2_XMEM_RD_WAIT_SHIFT,
205	},
206	{
207		.prop = "qcom,xmem-address-hold-enable",
208		.max = 1, /* boolean prop */
209		.slowreg = false,
210		.shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
211	},
212	{
213		.prop = "qcom,xmem-adv-to-oe-recovery-cycles",
214		.max = 3,
215		.slowreg = false,
216		.shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
217	},
218	{
219		.prop = "qcom,xmem-read-hold-cycles",
220		.max = 15,
221		.slowreg = false,
222		.shift = EBI2_XMEM_RD_HOLD_SHIFT,
223	},
224};
225
226static void qcom_ebi2_setup_chipselect(struct device_node *np,
227				       struct device *dev,
228				       void __iomem *ebi2_base,
229				       void __iomem *ebi2_xmem,
230				       u32 csindex)
231{
232	const struct cs_data *csd;
233	u32 slowcfg, fastcfg;
234	u32 val;
235	int ret;
236	int i;
237
238	csd = &cs_info[csindex];
239	val = readl(ebi2_base);
240	val |= csd->enable_mask;
241	writel(val, ebi2_base);
242	dev_dbg(dev, "enabled CS%u\n", csindex);
243
244	/* Next set up the XMEMC */
245	slowcfg = 0;
246	fastcfg = 0;
247
248	for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
249		const struct ebi2_xmem_prop *xp = &xmem_props[i];
250
251		/* All are regular u32 values */
252		ret = of_property_read_u32(np, xp->prop, &val);
253		if (ret) {
254			dev_dbg(dev, "could not read %s for CS%d\n",
255				xp->prop, csindex);
256			continue;
257		}
258
259		/* First check boolean props */
260		if (xp->max == 1 && val) {
261			if (xp->slowreg)
262				slowcfg |= BIT(xp->shift);
263			else
264				fastcfg |= BIT(xp->shift);
265			dev_dbg(dev, "set %s flag\n", xp->prop);
266			continue;
267		}
268
269		/* We're dealing with an u32 */
270		if (val > xp->max) {
271			dev_err(dev,
272				"too high value for %s: %u, capped at %u\n",
273				xp->prop, val, xp->max);
274			val = xp->max;
275		}
276		if (xp->slowreg)
277			slowcfg |= (val << xp->shift);
278		else
279			fastcfg |= (val << xp->shift);
280		dev_dbg(dev, "set %s to %u\n", xp->prop, val);
281	}
282
283	dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
284		 csindex, slowcfg, fastcfg);
285
286	if (slowcfg)
287		writel(slowcfg, ebi2_xmem + csd->slow_cfg);
288	if (fastcfg)
289		writel(fastcfg, ebi2_xmem + csd->fast_cfg);
290}
291
292static int qcom_ebi2_probe(struct platform_device *pdev)
293{
294	struct device_node *np = pdev->dev.of_node;
295	struct device_node *child;
296	struct device *dev = &pdev->dev;
297	struct resource *res;
298	void __iomem *ebi2_base;
299	void __iomem *ebi2_xmem;
300	struct clk *ebi2xclk;
301	struct clk *ebi2clk;
302	bool have_children = false;
303	u32 val;
304	int ret;
305
306	ebi2xclk = devm_clk_get(dev, "ebi2x");
307	if (IS_ERR(ebi2xclk))
308		return PTR_ERR(ebi2xclk);
309
310	ret = clk_prepare_enable(ebi2xclk);
311	if (ret) {
312		dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
313		return ret;
314	}
315
316	ebi2clk = devm_clk_get(dev, "ebi2");
317	if (IS_ERR(ebi2clk)) {
318		ret = PTR_ERR(ebi2clk);
319		goto err_disable_2x_clk;
320	}
321
322	ret = clk_prepare_enable(ebi2clk);
323	if (ret) {
324		dev_err(dev, "could not enable EBI2 clk\n");
325		goto err_disable_2x_clk;
326	}
327
328	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
329	ebi2_base = devm_ioremap_resource(dev, res);
330	if (IS_ERR(ebi2_base)) {
331		ret = PTR_ERR(ebi2_base);
332		goto err_disable_clk;
333	}
334
335	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
336	ebi2_xmem = devm_ioremap_resource(dev, res);
337	if (IS_ERR(ebi2_xmem)) {
338		ret = PTR_ERR(ebi2_xmem);
339		goto err_disable_clk;
340	}
341
342	/* Allegedly this turns the power save mode off */
343	writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
344
345	/* Disable all chipselects */
346	val = readl(ebi2_base);
347	val &= ~EBI2_CSN_MASK;
348	writel(val, ebi2_base);
349
350	/* Walk over the child nodes and see what chipselects we use */
351	for_each_available_child_of_node(np, child) {
352		u32 csindex;
353
354		/* Figure out the chipselect */
355		ret = of_property_read_u32(child, "reg", &csindex);
356		if (ret) {
357			of_node_put(child);
358			return ret;
359		}
360
361		if (csindex > 5) {
362			dev_err(dev,
363				"invalid chipselect %u, we only support 0-5\n",
364				csindex);
365			continue;
366		}
367
368		qcom_ebi2_setup_chipselect(child,
369					   dev,
370					   ebi2_base,
371					   ebi2_xmem,
372					   csindex);
373
374		/* We have at least one child */
375		have_children = true;
376	}
377
378	if (have_children)
379		return of_platform_default_populate(np, NULL, dev);
380	return 0;
381
382err_disable_clk:
383	clk_disable_unprepare(ebi2clk);
384err_disable_2x_clk:
385	clk_disable_unprepare(ebi2xclk);
386
387	return ret;
388}
389
390static const struct of_device_id qcom_ebi2_of_match[] = {
391	{ .compatible = "qcom,msm8660-ebi2", },
392	{ .compatible = "qcom,apq8060-ebi2", },
393	{ }
394};
395
396static struct platform_driver qcom_ebi2_driver = {
397	.probe = qcom_ebi2_probe,
398	.driver = {
399		.name = "qcom-ebi2",
400		.of_match_table = qcom_ebi2_of_match,
401	},
402};
403module_platform_driver(qcom_ebi2_driver);
404MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
405MODULE_DESCRIPTION("Qualcomm EBI2 driver");
v4.10.11
 
  1/*
  2 * Qualcomm External Bus Interface 2 (EBI2) driver
  3 * an older version of the Qualcomm Parallel Interface Controller (QPIC)
  4 *
  5 * Copyright (C) 2016 Linaro Ltd.
  6 *
  7 * Author: Linus Walleij <linus.walleij@linaro.org>
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License version 2, as
 11 * published by the Free Software Foundation.
 12 *
 13 * See the device tree bindings for this block for more details on the
 14 * hardware.
 15 */
 16
 17#include <linux/module.h>
 18#include <linux/clk.h>
 19#include <linux/err.h>
 20#include <linux/io.h>
 21#include <linux/of.h>
 22#include <linux/of_platform.h>
 23#include <linux/init.h>
 24#include <linux/io.h>
 25#include <linux/slab.h>
 26#include <linux/platform_device.h>
 27#include <linux/bitops.h>
 28
 29/*
 30 * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
 31 */
 32#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
 33#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
 34#define EBI2_CS2_ENABLE_MASK BIT(4)
 35#define EBI2_CS3_ENABLE_MASK BIT(5)
 36#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
 37#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
 38#define EBI2_CSN_MASK GENMASK(9, 0)
 39
 40#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
 41
 42/*
 43 * SLOW CSn CFG
 44 *
 45 * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
 46 *             memory continues to drive the data bus after OE is de-asserted.
 47 *             Inserted when reading one CS and switching to another CS or read
 48 *             followed by write on the same CS. Valid values 0 thru 15.
 49 * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
 50 *             every write minimum 1. The data out is driven from the time WE is
 51 *             asserted until CS is asserted. With a hold of 1, the CS stays
 52 *             active for 1 extra cycle etc. Valid values 0 thru 15.
 53 * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
 54 *             write to a page or burst memory
 55 * Bits 15-8:  RD_DELTA initial latency for read cycles inserted for the first
 56 *             read to a page or burst memory
 57 * Bits 7-4:   WR_WAIT number of wait cycles for every write access, 0=1 cycle
 58 *             so 1 thru 16 cycles.
 59 * Bits 3-0:   RD_WAIT number of wait cycles for every read access, 0=1 cycle
 60 *             so 1 thru 16 cycles.
 61 */
 62#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
 63#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
 64#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
 65#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
 66#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
 67#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
 68
 69#define EBI2_XMEM_RECOVERY_SHIFT	28
 70#define EBI2_XMEM_WR_HOLD_SHIFT		24
 71#define EBI2_XMEM_WR_DELTA_SHIFT	16
 72#define EBI2_XMEM_RD_DELTA_SHIFT	8
 73#define EBI2_XMEM_WR_WAIT_SHIFT		4
 74#define EBI2_XMEM_RD_WAIT_SHIFT		0
 75
 76/*
 77 * FAST CSn CFG
 78 * Bits 31-28: ?
 79 * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
 80 *             transfer. For a single read trandfer this will be the time
 81 *             from CS assertion to OE assertion.
 82 * Bits 18-24: ?
 83 * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
 84 *             assertion, with respect to the cycle where ADV is asserted.
 85 *             2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
 86 * Bits 5:     ADDR_HOLD_ENA, The address is held for an extra cycle to meet
 87 *             hold time requirements with ADV assertion.
 88 *
 89 * The manual mentions "write precharge cycles" and "precharge cycles".
 90 * We have not been able to figure out which bit fields these correspond to
 91 * in the hardware, or what valid values exist. The current hypothesis is that
 92 * this is something just used on the FAST chip selects. There is also a "byte
 93 * device enable" flag somewhere for 8bit memories.
 94 */
 95#define EBI2_XMEM_CS0_FAST_CFG 0x0028
 96#define EBI2_XMEM_CS1_FAST_CFG 0x002C
 97#define EBI2_XMEM_CS2_FAST_CFG 0x0030
 98#define EBI2_XMEM_CS3_FAST_CFG 0x0034
 99#define EBI2_XMEM_CS4_FAST_CFG 0x0038
100#define EBI2_XMEM_CS5_FAST_CFG 0x003C
101
102#define EBI2_XMEM_RD_HOLD_SHIFT		24
103#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT	16
104#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT	5
105
106/**
107 * struct cs_data - struct with info on a chipselect setting
108 * @enable_mask: mask to enable the chipselect in the EBI2 config
109 * @slow_cfg0: offset to XMEMC slow CS config
110 * @fast_cfg1: offset to XMEMC fast CS config
111 */
112struct cs_data {
113	u32 enable_mask;
114	u16 slow_cfg;
115	u16 fast_cfg;
116};
117
118static const struct cs_data cs_info[] = {
119	{
120		/* CS0 */
121		.enable_mask = EBI2_CS0_ENABLE_MASK,
122		.slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
123		.fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
124	},
125	{
126		/* CS1 */
127		.enable_mask = EBI2_CS1_ENABLE_MASK,
128		.slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
129		.fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
130	},
131	{
132		/* CS2 */
133		.enable_mask = EBI2_CS2_ENABLE_MASK,
134		.slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
135		.fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
136	},
137	{
138		/* CS3 */
139		.enable_mask = EBI2_CS3_ENABLE_MASK,
140		.slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
141		.fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
142	},
143	{
144		/* CS4 */
145		.enable_mask = EBI2_CS4_ENABLE_MASK,
146		.slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
147		.fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
148	},
149	{
150		/* CS5 */
151		.enable_mask = EBI2_CS5_ENABLE_MASK,
152		.slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
153		.fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
154	},
155};
156
157/**
158 * struct ebi2_xmem_prop - describes an XMEM config property
159 * @prop: the device tree binding name
160 * @max: maximum value for the property
161 * @slowreg: true if this property is in the SLOW CS config register
162 * else it is assumed to be in the FAST config register
163 * @shift: the bit field start in the SLOW or FAST register for this
164 * property
165 */
166struct ebi2_xmem_prop {
167	const char *prop;
168	u32 max;
169	bool slowreg;
170	u16 shift;
171};
172
173static const struct ebi2_xmem_prop xmem_props[] = {
174	{
175		.prop = "qcom,xmem-recovery-cycles",
176		.max = 15,
177		.slowreg = true,
178		.shift = EBI2_XMEM_RECOVERY_SHIFT,
179	},
180	{
181		.prop = "qcom,xmem-write-hold-cycles",
182		.max = 15,
183		.slowreg = true,
184		.shift = EBI2_XMEM_WR_HOLD_SHIFT,
185	},
186	{
187		.prop = "qcom,xmem-write-delta-cycles",
188		.max = 255,
189		.slowreg = true,
190		.shift = EBI2_XMEM_WR_DELTA_SHIFT,
191	},
192	{
193		.prop = "qcom,xmem-read-delta-cycles",
194		.max = 255,
195		.slowreg = true,
196		.shift = EBI2_XMEM_RD_DELTA_SHIFT,
197	},
198	{
199		.prop = "qcom,xmem-write-wait-cycles",
200		.max = 15,
201		.slowreg = true,
202		.shift = EBI2_XMEM_WR_WAIT_SHIFT,
203	},
204	{
205		.prop = "qcom,xmem-read-wait-cycles",
206		.max = 15,
207		.slowreg = true,
208		.shift = EBI2_XMEM_RD_WAIT_SHIFT,
209	},
210	{
211		.prop = "qcom,xmem-address-hold-enable",
212		.max = 1, /* boolean prop */
213		.slowreg = false,
214		.shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
215	},
216	{
217		.prop = "qcom,xmem-adv-to-oe-recovery-cycles",
218		.max = 3,
219		.slowreg = false,
220		.shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
221	},
222	{
223		.prop = "qcom,xmem-read-hold-cycles",
224		.max = 15,
225		.slowreg = false,
226		.shift = EBI2_XMEM_RD_HOLD_SHIFT,
227	},
228};
229
230static void qcom_ebi2_setup_chipselect(struct device_node *np,
231				       struct device *dev,
232				       void __iomem *ebi2_base,
233				       void __iomem *ebi2_xmem,
234				       u32 csindex)
235{
236	const struct cs_data *csd;
237	u32 slowcfg, fastcfg;
238	u32 val;
239	int ret;
240	int i;
241
242	csd = &cs_info[csindex];
243	val = readl(ebi2_base);
244	val |= csd->enable_mask;
245	writel(val, ebi2_base);
246	dev_dbg(dev, "enabled CS%u\n", csindex);
247
248	/* Next set up the XMEMC */
249	slowcfg = 0;
250	fastcfg = 0;
251
252	for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
253		const struct ebi2_xmem_prop *xp = &xmem_props[i];
254
255		/* All are regular u32 values */
256		ret = of_property_read_u32(np, xp->prop, &val);
257		if (ret) {
258			dev_dbg(dev, "could not read %s for CS%d\n",
259				xp->prop, csindex);
260			continue;
261		}
262
263		/* First check boolean props */
264		if (xp->max == 1 && val) {
265			if (xp->slowreg)
266				slowcfg |= BIT(xp->shift);
267			else
268				fastcfg |= BIT(xp->shift);
269			dev_dbg(dev, "set %s flag\n", xp->prop);
270			continue;
271		}
272
273		/* We're dealing with an u32 */
274		if (val > xp->max) {
275			dev_err(dev,
276				"too high value for %s: %u, capped at %u\n",
277				xp->prop, val, xp->max);
278			val = xp->max;
279		}
280		if (xp->slowreg)
281			slowcfg |= (val << xp->shift);
282		else
283			fastcfg |= (val << xp->shift);
284		dev_dbg(dev, "set %s to %u\n", xp->prop, val);
285	}
286
287	dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
288		 csindex, slowcfg, fastcfg);
289
290	if (slowcfg)
291		writel(slowcfg, ebi2_xmem + csd->slow_cfg);
292	if (fastcfg)
293		writel(fastcfg, ebi2_xmem + csd->fast_cfg);
294}
295
296static int qcom_ebi2_probe(struct platform_device *pdev)
297{
298	struct device_node *np = pdev->dev.of_node;
299	struct device_node *child;
300	struct device *dev = &pdev->dev;
301	struct resource *res;
302	void __iomem *ebi2_base;
303	void __iomem *ebi2_xmem;
304	struct clk *ebi2xclk;
305	struct clk *ebi2clk;
306	bool have_children = false;
307	u32 val;
308	int ret;
309
310	ebi2xclk = devm_clk_get(dev, "ebi2x");
311	if (IS_ERR(ebi2xclk))
312		return PTR_ERR(ebi2xclk);
313
314	ret = clk_prepare_enable(ebi2xclk);
315	if (ret) {
316		dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
317		return ret;
318	}
319
320	ebi2clk = devm_clk_get(dev, "ebi2");
321	if (IS_ERR(ebi2clk)) {
322		ret = PTR_ERR(ebi2clk);
323		goto err_disable_2x_clk;
324	}
325
326	ret = clk_prepare_enable(ebi2clk);
327	if (ret) {
328		dev_err(dev, "could not enable EBI2 clk\n");
329		goto err_disable_2x_clk;
330	}
331
332	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
333	ebi2_base = devm_ioremap_resource(dev, res);
334	if (IS_ERR(ebi2_base)) {
335		ret = PTR_ERR(ebi2_base);
336		goto err_disable_clk;
337	}
338
339	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
340	ebi2_xmem = devm_ioremap_resource(dev, res);
341	if (IS_ERR(ebi2_xmem)) {
342		ret = PTR_ERR(ebi2_xmem);
343		goto err_disable_clk;
344	}
345
346	/* Allegedly this turns the power save mode off */
347	writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
348
349	/* Disable all chipselects */
350	val = readl(ebi2_base);
351	val &= ~EBI2_CSN_MASK;
352	writel(val, ebi2_base);
353
354	/* Walk over the child nodes and see what chipselects we use */
355	for_each_available_child_of_node(np, child) {
356		u32 csindex;
357
358		/* Figure out the chipselect */
359		ret = of_property_read_u32(child, "reg", &csindex);
360		if (ret)
 
361			return ret;
 
362
363		if (csindex > 5) {
364			dev_err(dev,
365				"invalid chipselect %u, we only support 0-5\n",
366				csindex);
367			continue;
368		}
369
370		qcom_ebi2_setup_chipselect(child,
371					   dev,
372					   ebi2_base,
373					   ebi2_xmem,
374					   csindex);
375
376		/* We have at least one child */
377		have_children = true;
378	}
379
380	if (have_children)
381		return of_platform_default_populate(np, NULL, dev);
382	return 0;
383
384err_disable_clk:
385	clk_disable_unprepare(ebi2clk);
386err_disable_2x_clk:
387	clk_disable_unprepare(ebi2xclk);
388
389	return ret;
390}
391
392static const struct of_device_id qcom_ebi2_of_match[] = {
393	{ .compatible = "qcom,msm8660-ebi2", },
394	{ .compatible = "qcom,apq8060-ebi2", },
395	{ }
396};
397
398static struct platform_driver qcom_ebi2_driver = {
399	.probe = qcom_ebi2_probe,
400	.driver = {
401		.name = "qcom-ebi2",
402		.of_match_table = qcom_ebi2_of_match,
403	},
404};
405module_platform_driver(qcom_ebi2_driver);
406MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
407MODULE_DESCRIPTION("Qualcomm EBI2 driver");
408MODULE_LICENSE("GPL");