Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel Sunrisepoint LPSS core support.
4 *
5 * Copyright (C) 2015, Intel Corporation
6 *
7 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
9 * Heikki Krogerus <heikki.krogerus@linux.intel.com>
10 * Jarkko Nikula <jarkko.nikula@linux.intel.com>
11 */
12
13#include <linux/array_size.h>
14#include <linux/bits.h>
15#include <linux/clkdev.h>
16#include <linux/clk.h>
17#include <linux/clk-provider.h>
18#include <linux/debugfs.h>
19#include <linux/device.h>
20#include <linux/err.h>
21#include <linux/gfp_types.h>
22#include <linux/idr.h>
23#include <linux/io.h>
24#include <linux/ioport.h>
25#include <linux/mfd/core.h>
26#include <linux/module.h>
27#include <linux/pm.h>
28#include <linux/pm_qos.h>
29#include <linux/pm_runtime.h>
30#include <linux/sprintf.h>
31#include <linux/types.h>
32
33#include <linux/io-64-nonatomic-lo-hi.h>
34
35#include <linux/dma/idma64.h>
36
37#include "intel-lpss.h"
38
39struct dentry;
40
41#define LPSS_DEV_OFFSET 0x000
42#define LPSS_DEV_SIZE 0x200
43#define LPSS_PRIV_OFFSET 0x200
44#define LPSS_PRIV_SIZE 0x100
45#define LPSS_PRIV_REG_COUNT (LPSS_PRIV_SIZE / 4)
46#define LPSS_IDMA64_OFFSET 0x800
47#define LPSS_IDMA64_SIZE 0x800
48
49/* Offsets from lpss->priv */
50#define LPSS_PRIV_RESETS 0x04
51#define LPSS_PRIV_RESETS_IDMA BIT(2)
52#define LPSS_PRIV_RESETS_FUNC 0x3
53
54#define LPSS_PRIV_ACTIVELTR 0x10
55#define LPSS_PRIV_IDLELTR 0x14
56
57#define LPSS_PRIV_LTR_REQ BIT(15)
58#define LPSS_PRIV_LTR_SCALE_MASK GENMASK(11, 10)
59#define LPSS_PRIV_LTR_SCALE_1US (2 << 10)
60#define LPSS_PRIV_LTR_SCALE_32US (3 << 10)
61#define LPSS_PRIV_LTR_VALUE_MASK GENMASK(9, 0)
62
63#define LPSS_PRIV_SSP_REG 0x20
64#define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0)
65
66#define LPSS_PRIV_REMAP_ADDR 0x40
67
68#define LPSS_PRIV_CAPS 0xfc
69#define LPSS_PRIV_CAPS_NO_IDMA BIT(8)
70#define LPSS_PRIV_CAPS_TYPE_MASK GENMASK(7, 4)
71#define LPSS_PRIV_CAPS_TYPE_SHIFT 4
72
73/* This matches the type field in CAPS register */
74enum intel_lpss_dev_type {
75 LPSS_DEV_I2C = 0,
76 LPSS_DEV_UART,
77 LPSS_DEV_SPI,
78};
79
80struct intel_lpss {
81 const struct intel_lpss_platform_info *info;
82 enum intel_lpss_dev_type type;
83 struct clk *clk;
84 struct clk_lookup *clock;
85 struct mfd_cell *cell;
86 struct device *dev;
87 void __iomem *priv;
88 u32 priv_ctx[LPSS_PRIV_REG_COUNT];
89 int devid;
90 u32 caps;
91 u32 active_ltr;
92 u32 idle_ltr;
93 struct dentry *debugfs;
94};
95
96static const struct resource intel_lpss_dev_resources[] = {
97 DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"),
98 DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"),
99 DEFINE_RES_IRQ(0),
100};
101
102static const struct resource intel_lpss_idma64_resources[] = {
103 DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE),
104 DEFINE_RES_IRQ(0),
105};
106
107/*
108 * Cells needs to be ordered so that the iDMA is created first. This is
109 * because we need to be sure the DMA is available when the host controller
110 * driver is probed.
111 */
112static const struct mfd_cell intel_lpss_idma64_cell = {
113 .name = LPSS_IDMA64_DRIVER_NAME,
114 .num_resources = ARRAY_SIZE(intel_lpss_idma64_resources),
115 .resources = intel_lpss_idma64_resources,
116};
117
118static const struct mfd_cell intel_lpss_i2c_cell = {
119 .name = "i2c_designware",
120 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
121 .resources = intel_lpss_dev_resources,
122};
123
124static const struct mfd_cell intel_lpss_uart_cell = {
125 .name = "dw-apb-uart",
126 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
127 .resources = intel_lpss_dev_resources,
128};
129
130static const struct mfd_cell intel_lpss_spi_cell = {
131 .name = "pxa2xx-spi",
132 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
133 .resources = intel_lpss_dev_resources,
134};
135
136static DEFINE_IDA(intel_lpss_devid_ida);
137static struct dentry *intel_lpss_debugfs;
138
139static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
140{
141 lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
142 lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR);
143}
144
145static int intel_lpss_debugfs_add(struct intel_lpss *lpss)
146{
147 struct dentry *dir;
148
149 dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs);
150 if (IS_ERR(dir))
151 return PTR_ERR(dir);
152
153 /* Cache the values into lpss structure */
154 intel_lpss_cache_ltr(lpss);
155
156 debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps);
157 debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr);
158 debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr);
159
160 lpss->debugfs = dir;
161 return 0;
162}
163
164static void intel_lpss_debugfs_remove(struct intel_lpss *lpss)
165{
166 debugfs_remove_recursive(lpss->debugfs);
167}
168
169static void intel_lpss_ltr_set(struct device *dev, s32 val)
170{
171 struct intel_lpss *lpss = dev_get_drvdata(dev);
172 u32 ltr;
173
174 /*
175 * Program latency tolerance (LTR) accordingly what has been asked
176 * by the PM QoS layer or disable it in case we were passed
177 * negative value or PM_QOS_LATENCY_ANY.
178 */
179 ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
180
181 if (val == PM_QOS_LATENCY_ANY || val < 0) {
182 ltr &= ~LPSS_PRIV_LTR_REQ;
183 } else {
184 ltr |= LPSS_PRIV_LTR_REQ;
185 ltr &= ~LPSS_PRIV_LTR_SCALE_MASK;
186 ltr &= ~LPSS_PRIV_LTR_VALUE_MASK;
187
188 if (val > LPSS_PRIV_LTR_VALUE_MASK)
189 ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5;
190 else
191 ltr |= LPSS_PRIV_LTR_SCALE_1US | val;
192 }
193
194 if (ltr == lpss->active_ltr)
195 return;
196
197 writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR);
198 writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR);
199
200 /* Cache the values into lpss structure */
201 intel_lpss_cache_ltr(lpss);
202}
203
204static void intel_lpss_ltr_expose(struct intel_lpss *lpss)
205{
206 lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set;
207 dev_pm_qos_expose_latency_tolerance(lpss->dev);
208}
209
210static void intel_lpss_ltr_hide(struct intel_lpss *lpss)
211{
212 dev_pm_qos_hide_latency_tolerance(lpss->dev);
213 lpss->dev->power.set_latency_tolerance = NULL;
214}
215
216static int intel_lpss_assign_devs(struct intel_lpss *lpss)
217{
218 const struct mfd_cell *cell;
219 unsigned int type;
220
221 type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK;
222 type >>= LPSS_PRIV_CAPS_TYPE_SHIFT;
223
224 switch (type) {
225 case LPSS_DEV_I2C:
226 cell = &intel_lpss_i2c_cell;
227 break;
228 case LPSS_DEV_UART:
229 cell = &intel_lpss_uart_cell;
230 break;
231 case LPSS_DEV_SPI:
232 cell = &intel_lpss_spi_cell;
233 break;
234 default:
235 return -ENODEV;
236 }
237
238 lpss->cell = devm_kmemdup(lpss->dev, cell, sizeof(*cell), GFP_KERNEL);
239 if (!lpss->cell)
240 return -ENOMEM;
241
242 lpss->type = type;
243
244 return 0;
245}
246
247static bool intel_lpss_has_idma(const struct intel_lpss *lpss)
248{
249 return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0;
250}
251
252static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss)
253{
254 resource_size_t addr = lpss->info->mem->start;
255
256 lo_hi_writeq(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR);
257}
258
259static void intel_lpss_deassert_reset(const struct intel_lpss *lpss)
260{
261 u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA;
262
263 /* Bring out the device from reset */
264 writel(value, lpss->priv + LPSS_PRIV_RESETS);
265}
266
267static void intel_lpss_init_dev(const struct intel_lpss *lpss)
268{
269 u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
270
271 /* Set the device in reset state */
272 writel(0, lpss->priv + LPSS_PRIV_RESETS);
273
274 intel_lpss_deassert_reset(lpss);
275
276 intel_lpss_set_remap_addr(lpss);
277
278 if (!intel_lpss_has_idma(lpss))
279 return;
280
281 /* Make sure that SPI multiblock DMA transfers are re-enabled */
282 if (lpss->type == LPSS_DEV_SPI)
283 writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
284}
285
286static void intel_lpss_unregister_clock_tree(struct clk *clk)
287{
288 struct clk *parent;
289
290 while (clk) {
291 parent = clk_get_parent(clk);
292 clk_unregister(clk);
293 clk = parent;
294 }
295}
296
297static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
298 const char *devname,
299 struct clk **clk)
300{
301 char name[32];
302 struct clk *tmp = *clk;
303
304 snprintf(name, sizeof(name), "%s-enable", devname);
305 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
306 lpss->priv, 0, 0, NULL);
307 if (IS_ERR(tmp))
308 return PTR_ERR(tmp);
309
310 snprintf(name, sizeof(name), "%s-div", devname);
311 tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
312 0, lpss->priv, 1, 15, 16, 15,
313 CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
314 NULL);
315 if (IS_ERR(tmp))
316 return PTR_ERR(tmp);
317 *clk = tmp;
318
319 snprintf(name, sizeof(name), "%s-update", devname);
320 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
321 CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
322 if (IS_ERR(tmp))
323 return PTR_ERR(tmp);
324 *clk = tmp;
325
326 return 0;
327}
328
329static int intel_lpss_register_clock(struct intel_lpss *lpss)
330{
331 const struct mfd_cell *cell = lpss->cell;
332 struct clk *clk;
333 char devname[24];
334 int ret;
335
336 if (!lpss->info->clk_rate)
337 return 0;
338
339 /* Root clock */
340 clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL, 0,
341 lpss->info->clk_rate);
342 if (IS_ERR(clk))
343 return PTR_ERR(clk);
344
345 snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid);
346
347 /*
348 * Support for clock divider only if it has some preset value.
349 * Otherwise we assume that the divider is not used.
350 */
351 if (lpss->type != LPSS_DEV_I2C) {
352 ret = intel_lpss_register_clock_divider(lpss, devname, &clk);
353 if (ret)
354 goto err_clk_register;
355 }
356
357 ret = -ENOMEM;
358
359 /* Clock for the host controller */
360 lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname);
361 if (!lpss->clock)
362 goto err_clk_register;
363
364 lpss->clk = clk;
365
366 return 0;
367
368err_clk_register:
369 intel_lpss_unregister_clock_tree(clk);
370
371 return ret;
372}
373
374static void intel_lpss_unregister_clock(struct intel_lpss *lpss)
375{
376 if (IS_ERR_OR_NULL(lpss->clk))
377 return;
378
379 clkdev_drop(lpss->clock);
380 intel_lpss_unregister_clock_tree(lpss->clk);
381}
382
383int intel_lpss_probe(struct device *dev,
384 const struct intel_lpss_platform_info *info)
385{
386 struct intel_lpss *lpss;
387 int ret;
388
389 if (!info || !info->mem)
390 return -EINVAL;
391
392 if (info->irq < 0)
393 return info->irq;
394
395 lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
396 if (!lpss)
397 return -ENOMEM;
398
399 lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET,
400 LPSS_PRIV_SIZE);
401 if (!lpss->priv)
402 return -ENOMEM;
403
404 lpss->info = info;
405 lpss->dev = dev;
406 lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS);
407
408 dev_set_drvdata(dev, lpss);
409
410 ret = intel_lpss_assign_devs(lpss);
411 if (ret)
412 return ret;
413
414 lpss->cell->swnode = info->swnode;
415 lpss->cell->ignore_resource_conflicts = info->ignore_resource_conflicts;
416
417 intel_lpss_init_dev(lpss);
418
419 lpss->devid = ida_alloc(&intel_lpss_devid_ida, GFP_KERNEL);
420 if (lpss->devid < 0)
421 return lpss->devid;
422
423 ret = intel_lpss_register_clock(lpss);
424 if (ret)
425 goto err_clk_register;
426
427 intel_lpss_ltr_expose(lpss);
428
429 ret = intel_lpss_debugfs_add(lpss);
430 if (ret)
431 dev_warn(dev, "Failed to create debugfs entries\n");
432
433 if (intel_lpss_has_idma(lpss)) {
434 ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
435 1, info->mem, info->irq, NULL);
436 if (ret)
437 dev_warn(dev, "Failed to add %s, fallback to PIO\n",
438 LPSS_IDMA64_DRIVER_NAME);
439 }
440
441 ret = mfd_add_devices(dev, lpss->devid, lpss->cell,
442 1, info->mem, info->irq, NULL);
443 if (ret)
444 goto err_remove_ltr;
445
446 dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
447
448 return 0;
449
450err_remove_ltr:
451 intel_lpss_debugfs_remove(lpss);
452 intel_lpss_ltr_hide(lpss);
453 intel_lpss_unregister_clock(lpss);
454
455err_clk_register:
456 ida_free(&intel_lpss_devid_ida, lpss->devid);
457
458 return ret;
459}
460EXPORT_SYMBOL_NS_GPL(intel_lpss_probe, INTEL_LPSS);
461
462void intel_lpss_remove(struct device *dev)
463{
464 struct intel_lpss *lpss = dev_get_drvdata(dev);
465
466 mfd_remove_devices(dev);
467 intel_lpss_debugfs_remove(lpss);
468 intel_lpss_ltr_hide(lpss);
469 intel_lpss_unregister_clock(lpss);
470 ida_free(&intel_lpss_devid_ida, lpss->devid);
471}
472EXPORT_SYMBOL_NS_GPL(intel_lpss_remove, INTEL_LPSS);
473
474static int resume_lpss_device(struct device *dev, void *data)
475{
476 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
477 pm_runtime_resume(dev);
478
479 return 0;
480}
481
482static int intel_lpss_prepare(struct device *dev)
483{
484 /*
485 * Resume both child devices before entering system sleep. This
486 * ensures that they are in proper state before they get suspended.
487 */
488 device_for_each_child_reverse(dev, NULL, resume_lpss_device);
489 return 0;
490}
491
492static int intel_lpss_suspend(struct device *dev)
493{
494 struct intel_lpss *lpss = dev_get_drvdata(dev);
495 unsigned int i;
496
497 /* Save device context */
498 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
499 lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
500
501 /*
502 * If the device type is not UART, then put the controller into
503 * reset. UART cannot be put into reset since S3/S0ix fail when
504 * no_console_suspend flag is enabled.
505 */
506 if (lpss->type != LPSS_DEV_UART)
507 writel(0, lpss->priv + LPSS_PRIV_RESETS);
508
509 return 0;
510}
511
512static int intel_lpss_resume(struct device *dev)
513{
514 struct intel_lpss *lpss = dev_get_drvdata(dev);
515 unsigned int i;
516
517 intel_lpss_deassert_reset(lpss);
518
519 /* Restore device context */
520 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
521 writel(lpss->priv_ctx[i], lpss->priv + i * 4);
522
523 return 0;
524}
525
526EXPORT_NS_GPL_DEV_PM_OPS(intel_lpss_pm_ops, INTEL_LPSS) = {
527 .prepare = pm_sleep_ptr(&intel_lpss_prepare),
528 LATE_SYSTEM_SLEEP_PM_OPS(intel_lpss_suspend, intel_lpss_resume)
529 RUNTIME_PM_OPS(intel_lpss_suspend, intel_lpss_resume, NULL)
530};
531
532static int __init intel_lpss_init(void)
533{
534 intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL);
535 return 0;
536}
537module_init(intel_lpss_init);
538
539static void __exit intel_lpss_exit(void)
540{
541 ida_destroy(&intel_lpss_devid_ida);
542 debugfs_remove(intel_lpss_debugfs);
543}
544module_exit(intel_lpss_exit);
545
546MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
547MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
548MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
549MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
550MODULE_DESCRIPTION("Intel LPSS core driver");
551MODULE_LICENSE("GPL v2");
552/*
553 * Ensure the DMA driver is loaded before the host controller device appears,
554 * so that the host controller driver can request its DMA channels as early
555 * as possible.
556 *
557 * If the DMA module is not there that's OK as well.
558 */
559MODULE_SOFTDEP("pre: platform:" LPSS_IDMA64_DRIVER_NAME);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel Sunrisepoint LPSS core support.
4 *
5 * Copyright (C) 2015, Intel Corporation
6 *
7 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
9 * Heikki Krogerus <heikki.krogerus@linux.intel.com>
10 * Jarkko Nikula <jarkko.nikula@linux.intel.com>
11 */
12
13#include <linux/clk.h>
14#include <linux/clkdev.h>
15#include <linux/clk-provider.h>
16#include <linux/debugfs.h>
17#include <linux/idr.h>
18#include <linux/io.h>
19#include <linux/ioport.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/mfd/core.h>
23#include <linux/pm_qos.h>
24#include <linux/pm_runtime.h>
25#include <linux/property.h>
26#include <linux/seq_file.h>
27#include <linux/io-64-nonatomic-lo-hi.h>
28
29#include <linux/dma/idma64.h>
30
31#include "intel-lpss.h"
32
33#define LPSS_DEV_OFFSET 0x000
34#define LPSS_DEV_SIZE 0x200
35#define LPSS_PRIV_OFFSET 0x200
36#define LPSS_PRIV_SIZE 0x100
37#define LPSS_PRIV_REG_COUNT (LPSS_PRIV_SIZE / 4)
38#define LPSS_IDMA64_OFFSET 0x800
39#define LPSS_IDMA64_SIZE 0x800
40
41/* Offsets from lpss->priv */
42#define LPSS_PRIV_RESETS 0x04
43#define LPSS_PRIV_RESETS_IDMA BIT(2)
44#define LPSS_PRIV_RESETS_FUNC 0x3
45
46#define LPSS_PRIV_ACTIVELTR 0x10
47#define LPSS_PRIV_IDLELTR 0x14
48
49#define LPSS_PRIV_LTR_REQ BIT(15)
50#define LPSS_PRIV_LTR_SCALE_MASK GENMASK(11, 10)
51#define LPSS_PRIV_LTR_SCALE_1US (2 << 10)
52#define LPSS_PRIV_LTR_SCALE_32US (3 << 10)
53#define LPSS_PRIV_LTR_VALUE_MASK GENMASK(9, 0)
54
55#define LPSS_PRIV_SSP_REG 0x20
56#define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0)
57
58#define LPSS_PRIV_REMAP_ADDR 0x40
59
60#define LPSS_PRIV_CAPS 0xfc
61#define LPSS_PRIV_CAPS_NO_IDMA BIT(8)
62#define LPSS_PRIV_CAPS_TYPE_MASK GENMASK(7, 4)
63#define LPSS_PRIV_CAPS_TYPE_SHIFT 4
64
65/* This matches the type field in CAPS register */
66enum intel_lpss_dev_type {
67 LPSS_DEV_I2C = 0,
68 LPSS_DEV_UART,
69 LPSS_DEV_SPI,
70};
71
72struct intel_lpss {
73 const struct intel_lpss_platform_info *info;
74 enum intel_lpss_dev_type type;
75 struct clk *clk;
76 struct clk_lookup *clock;
77 struct mfd_cell *cell;
78 struct device *dev;
79 void __iomem *priv;
80 u32 priv_ctx[LPSS_PRIV_REG_COUNT];
81 int devid;
82 u32 caps;
83 u32 active_ltr;
84 u32 idle_ltr;
85 struct dentry *debugfs;
86};
87
88static const struct resource intel_lpss_dev_resources[] = {
89 DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"),
90 DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"),
91 DEFINE_RES_IRQ(0),
92};
93
94static const struct resource intel_lpss_idma64_resources[] = {
95 DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE),
96 DEFINE_RES_IRQ(0),
97};
98
99/*
100 * Cells needs to be ordered so that the iDMA is created first. This is
101 * because we need to be sure the DMA is available when the host controller
102 * driver is probed.
103 */
104static const struct mfd_cell intel_lpss_idma64_cell = {
105 .name = LPSS_IDMA64_DRIVER_NAME,
106 .num_resources = ARRAY_SIZE(intel_lpss_idma64_resources),
107 .resources = intel_lpss_idma64_resources,
108};
109
110static const struct mfd_cell intel_lpss_i2c_cell = {
111 .name = "i2c_designware",
112 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
113 .resources = intel_lpss_dev_resources,
114};
115
116static const struct mfd_cell intel_lpss_uart_cell = {
117 .name = "dw-apb-uart",
118 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
119 .resources = intel_lpss_dev_resources,
120};
121
122static const struct mfd_cell intel_lpss_spi_cell = {
123 .name = "pxa2xx-spi",
124 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
125 .resources = intel_lpss_dev_resources,
126};
127
128static DEFINE_IDA(intel_lpss_devid_ida);
129static struct dentry *intel_lpss_debugfs;
130
131static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
132{
133 lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
134 lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR);
135}
136
137static int intel_lpss_debugfs_add(struct intel_lpss *lpss)
138{
139 struct dentry *dir;
140
141 dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs);
142 if (IS_ERR(dir))
143 return PTR_ERR(dir);
144
145 /* Cache the values into lpss structure */
146 intel_lpss_cache_ltr(lpss);
147
148 debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps);
149 debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr);
150 debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr);
151
152 lpss->debugfs = dir;
153 return 0;
154}
155
156static void intel_lpss_debugfs_remove(struct intel_lpss *lpss)
157{
158 debugfs_remove_recursive(lpss->debugfs);
159}
160
161static void intel_lpss_ltr_set(struct device *dev, s32 val)
162{
163 struct intel_lpss *lpss = dev_get_drvdata(dev);
164 u32 ltr;
165
166 /*
167 * Program latency tolerance (LTR) accordingly what has been asked
168 * by the PM QoS layer or disable it in case we were passed
169 * negative value or PM_QOS_LATENCY_ANY.
170 */
171 ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
172
173 if (val == PM_QOS_LATENCY_ANY || val < 0) {
174 ltr &= ~LPSS_PRIV_LTR_REQ;
175 } else {
176 ltr |= LPSS_PRIV_LTR_REQ;
177 ltr &= ~LPSS_PRIV_LTR_SCALE_MASK;
178 ltr &= ~LPSS_PRIV_LTR_VALUE_MASK;
179
180 if (val > LPSS_PRIV_LTR_VALUE_MASK)
181 ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5;
182 else
183 ltr |= LPSS_PRIV_LTR_SCALE_1US | val;
184 }
185
186 if (ltr == lpss->active_ltr)
187 return;
188
189 writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR);
190 writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR);
191
192 /* Cache the values into lpss structure */
193 intel_lpss_cache_ltr(lpss);
194}
195
196static void intel_lpss_ltr_expose(struct intel_lpss *lpss)
197{
198 lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set;
199 dev_pm_qos_expose_latency_tolerance(lpss->dev);
200}
201
202static void intel_lpss_ltr_hide(struct intel_lpss *lpss)
203{
204 dev_pm_qos_hide_latency_tolerance(lpss->dev);
205 lpss->dev->power.set_latency_tolerance = NULL;
206}
207
208static int intel_lpss_assign_devs(struct intel_lpss *lpss)
209{
210 const struct mfd_cell *cell;
211 unsigned int type;
212
213 type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK;
214 type >>= LPSS_PRIV_CAPS_TYPE_SHIFT;
215
216 switch (type) {
217 case LPSS_DEV_I2C:
218 cell = &intel_lpss_i2c_cell;
219 break;
220 case LPSS_DEV_UART:
221 cell = &intel_lpss_uart_cell;
222 break;
223 case LPSS_DEV_SPI:
224 cell = &intel_lpss_spi_cell;
225 break;
226 default:
227 return -ENODEV;
228 }
229
230 lpss->cell = devm_kmemdup(lpss->dev, cell, sizeof(*cell), GFP_KERNEL);
231 if (!lpss->cell)
232 return -ENOMEM;
233
234 lpss->type = type;
235
236 return 0;
237}
238
239static bool intel_lpss_has_idma(const struct intel_lpss *lpss)
240{
241 return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0;
242}
243
244static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss)
245{
246 resource_size_t addr = lpss->info->mem->start;
247
248 lo_hi_writeq(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR);
249}
250
251static void intel_lpss_deassert_reset(const struct intel_lpss *lpss)
252{
253 u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA;
254
255 /* Bring out the device from reset */
256 writel(value, lpss->priv + LPSS_PRIV_RESETS);
257}
258
259static void intel_lpss_init_dev(const struct intel_lpss *lpss)
260{
261 u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
262
263 /* Set the device in reset state */
264 writel(0, lpss->priv + LPSS_PRIV_RESETS);
265
266 intel_lpss_deassert_reset(lpss);
267
268 intel_lpss_set_remap_addr(lpss);
269
270 if (!intel_lpss_has_idma(lpss))
271 return;
272
273 /* Make sure that SPI multiblock DMA transfers are re-enabled */
274 if (lpss->type == LPSS_DEV_SPI)
275 writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
276}
277
278static void intel_lpss_unregister_clock_tree(struct clk *clk)
279{
280 struct clk *parent;
281
282 while (clk) {
283 parent = clk_get_parent(clk);
284 clk_unregister(clk);
285 clk = parent;
286 }
287}
288
289static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
290 const char *devname,
291 struct clk **clk)
292{
293 char name[32];
294 struct clk *tmp = *clk;
295
296 snprintf(name, sizeof(name), "%s-enable", devname);
297 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
298 lpss->priv, 0, 0, NULL);
299 if (IS_ERR(tmp))
300 return PTR_ERR(tmp);
301
302 snprintf(name, sizeof(name), "%s-div", devname);
303 tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
304 0, lpss->priv, 1, 15, 16, 15, 0,
305 NULL);
306 if (IS_ERR(tmp))
307 return PTR_ERR(tmp);
308 *clk = tmp;
309
310 snprintf(name, sizeof(name), "%s-update", devname);
311 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
312 CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
313 if (IS_ERR(tmp))
314 return PTR_ERR(tmp);
315 *clk = tmp;
316
317 return 0;
318}
319
320static int intel_lpss_register_clock(struct intel_lpss *lpss)
321{
322 const struct mfd_cell *cell = lpss->cell;
323 struct clk *clk;
324 char devname[24];
325 int ret;
326
327 if (!lpss->info->clk_rate)
328 return 0;
329
330 /* Root clock */
331 clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL, 0,
332 lpss->info->clk_rate);
333 if (IS_ERR(clk))
334 return PTR_ERR(clk);
335
336 snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid);
337
338 /*
339 * Support for clock divider only if it has some preset value.
340 * Otherwise we assume that the divider is not used.
341 */
342 if (lpss->type != LPSS_DEV_I2C) {
343 ret = intel_lpss_register_clock_divider(lpss, devname, &clk);
344 if (ret)
345 goto err_clk_register;
346 }
347
348 ret = -ENOMEM;
349
350 /* Clock for the host controller */
351 lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname);
352 if (!lpss->clock)
353 goto err_clk_register;
354
355 lpss->clk = clk;
356
357 return 0;
358
359err_clk_register:
360 intel_lpss_unregister_clock_tree(clk);
361
362 return ret;
363}
364
365static void intel_lpss_unregister_clock(struct intel_lpss *lpss)
366{
367 if (IS_ERR_OR_NULL(lpss->clk))
368 return;
369
370 clkdev_drop(lpss->clock);
371 intel_lpss_unregister_clock_tree(lpss->clk);
372}
373
374int intel_lpss_probe(struct device *dev,
375 const struct intel_lpss_platform_info *info)
376{
377 struct intel_lpss *lpss;
378 int ret;
379
380 if (!info || !info->mem || info->irq <= 0)
381 return -EINVAL;
382
383 lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
384 if (!lpss)
385 return -ENOMEM;
386
387 lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET,
388 LPSS_PRIV_SIZE);
389 if (!lpss->priv)
390 return -ENOMEM;
391
392 lpss->info = info;
393 lpss->dev = dev;
394 lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS);
395
396 dev_set_drvdata(dev, lpss);
397
398 ret = intel_lpss_assign_devs(lpss);
399 if (ret)
400 return ret;
401
402 lpss->cell->swnode = info->swnode;
403
404 intel_lpss_init_dev(lpss);
405
406 lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL);
407 if (lpss->devid < 0)
408 return lpss->devid;
409
410 ret = intel_lpss_register_clock(lpss);
411 if (ret)
412 goto err_clk_register;
413
414 intel_lpss_ltr_expose(lpss);
415
416 ret = intel_lpss_debugfs_add(lpss);
417 if (ret)
418 dev_warn(dev, "Failed to create debugfs entries\n");
419
420 if (intel_lpss_has_idma(lpss)) {
421 ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
422 1, info->mem, info->irq, NULL);
423 if (ret)
424 dev_warn(dev, "Failed to add %s, fallback to PIO\n",
425 LPSS_IDMA64_DRIVER_NAME);
426 }
427
428 ret = mfd_add_devices(dev, lpss->devid, lpss->cell,
429 1, info->mem, info->irq, NULL);
430 if (ret)
431 goto err_remove_ltr;
432
433 dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
434
435 return 0;
436
437err_remove_ltr:
438 intel_lpss_debugfs_remove(lpss);
439 intel_lpss_ltr_hide(lpss);
440 intel_lpss_unregister_clock(lpss);
441
442err_clk_register:
443 ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
444
445 return ret;
446}
447EXPORT_SYMBOL_GPL(intel_lpss_probe);
448
449void intel_lpss_remove(struct device *dev)
450{
451 struct intel_lpss *lpss = dev_get_drvdata(dev);
452
453 mfd_remove_devices(dev);
454 intel_lpss_debugfs_remove(lpss);
455 intel_lpss_ltr_hide(lpss);
456 intel_lpss_unregister_clock(lpss);
457 ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
458}
459EXPORT_SYMBOL_GPL(intel_lpss_remove);
460
461static int resume_lpss_device(struct device *dev, void *data)
462{
463 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
464 pm_runtime_resume(dev);
465
466 return 0;
467}
468
469int intel_lpss_prepare(struct device *dev)
470{
471 /*
472 * Resume both child devices before entering system sleep. This
473 * ensures that they are in proper state before they get suspended.
474 */
475 device_for_each_child_reverse(dev, NULL, resume_lpss_device);
476 return 0;
477}
478EXPORT_SYMBOL_GPL(intel_lpss_prepare);
479
480int intel_lpss_suspend(struct device *dev)
481{
482 struct intel_lpss *lpss = dev_get_drvdata(dev);
483 unsigned int i;
484
485 /* Save device context */
486 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
487 lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
488
489 /*
490 * If the device type is not UART, then put the controller into
491 * reset. UART cannot be put into reset since S3/S0ix fail when
492 * no_console_suspend flag is enabled.
493 */
494 if (lpss->type != LPSS_DEV_UART)
495 writel(0, lpss->priv + LPSS_PRIV_RESETS);
496
497 return 0;
498}
499EXPORT_SYMBOL_GPL(intel_lpss_suspend);
500
501int intel_lpss_resume(struct device *dev)
502{
503 struct intel_lpss *lpss = dev_get_drvdata(dev);
504 unsigned int i;
505
506 intel_lpss_deassert_reset(lpss);
507
508 /* Restore device context */
509 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
510 writel(lpss->priv_ctx[i], lpss->priv + i * 4);
511
512 return 0;
513}
514EXPORT_SYMBOL_GPL(intel_lpss_resume);
515
516static int __init intel_lpss_init(void)
517{
518 intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL);
519 return 0;
520}
521module_init(intel_lpss_init);
522
523static void __exit intel_lpss_exit(void)
524{
525 ida_destroy(&intel_lpss_devid_ida);
526 debugfs_remove(intel_lpss_debugfs);
527}
528module_exit(intel_lpss_exit);
529
530MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
531MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
532MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
533MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
534MODULE_DESCRIPTION("Intel LPSS core driver");
535MODULE_LICENSE("GPL v2");
536/*
537 * Ensure the DMA driver is loaded before the host controller device appears,
538 * so that the host controller driver can request its DMA channels as early
539 * as possible.
540 *
541 * If the DMA module is not there that's OK as well.
542 */
543MODULE_SOFTDEP("pre: platform:" LPSS_IDMA64_DRIVER_NAME);