Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * ACPI support for Intel Lynxpoint LPSS.
  3 *
  4 * Copyright (C) 2013, Intel Corporation
  5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  6 *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12
 13#include <linux/acpi.h>
 14#include <linux/clkdev.h>
 15#include <linux/clk-provider.h>
 16#include <linux/err.h>
 17#include <linux/io.h>
 18#include <linux/mutex.h>
 19#include <linux/platform_device.h>
 20#include <linux/platform_data/clk-lpss.h>
 21#include <linux/pm_domain.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/delay.h>
 24
 25#include "internal.h"
 26
 27ACPI_MODULE_NAME("acpi_lpss");
 28
 29#ifdef CONFIG_X86_INTEL_LPSS
 30
 31#include <asm/cpu_device_id.h>
 32#include <asm/iosf_mbi.h>
 33#include <asm/pmc_atom.h>
 34
 35#define LPSS_ADDR(desc) ((unsigned long)&desc)
 36
 37#define LPSS_CLK_SIZE	0x04
 38#define LPSS_LTR_SIZE	0x18
 39
 40/* Offsets relative to LPSS_PRIVATE_OFFSET */
 41#define LPSS_CLK_DIVIDER_DEF_MASK	(BIT(1) | BIT(16))
 42#define LPSS_RESETS			0x04
 43#define LPSS_RESETS_RESET_FUNC		BIT(0)
 44#define LPSS_RESETS_RESET_APB		BIT(1)
 45#define LPSS_GENERAL			0x08
 46#define LPSS_GENERAL_LTR_MODE_SW	BIT(2)
 47#define LPSS_GENERAL_UART_RTS_OVRD	BIT(3)
 48#define LPSS_SW_LTR			0x10
 49#define LPSS_AUTO_LTR			0x14
 50#define LPSS_LTR_SNOOP_REQ		BIT(15)
 51#define LPSS_LTR_SNOOP_MASK		0x0000FFFF
 52#define LPSS_LTR_SNOOP_LAT_1US		0x800
 53#define LPSS_LTR_SNOOP_LAT_32US		0xC00
 54#define LPSS_LTR_SNOOP_LAT_SHIFT	5
 55#define LPSS_LTR_SNOOP_LAT_CUTOFF	3000
 56#define LPSS_LTR_MAX_VAL		0x3FF
 57#define LPSS_TX_INT			0x20
 58#define LPSS_TX_INT_MASK		BIT(1)
 59
 60#define LPSS_PRV_REG_COUNT		9
 61
 62/* LPSS Flags */
 63#define LPSS_CLK			BIT(0)
 64#define LPSS_CLK_GATE			BIT(1)
 65#define LPSS_CLK_DIVIDER		BIT(2)
 66#define LPSS_LTR			BIT(3)
 67#define LPSS_SAVE_CTX			BIT(4)
 68#define LPSS_NO_D3_DELAY		BIT(5)
 69
 70struct lpss_private_data;
 71
 72struct lpss_device_desc {
 73	unsigned int flags;
 74	const char *clk_con_id;
 75	unsigned int prv_offset;
 76	size_t prv_size_override;
 77	void (*setup)(struct lpss_private_data *pdata);
 78};
 79
 80static const struct lpss_device_desc lpss_dma_desc = {
 81	.flags = LPSS_CLK,
 82};
 83
 84struct lpss_private_data {
 85	void __iomem *mmio_base;
 86	resource_size_t mmio_size;
 87	unsigned int fixed_clk_rate;
 88	struct clk *clk;
 89	const struct lpss_device_desc *dev_desc;
 90	u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
 91};
 92
 93/* LPSS run time quirks */
 94static unsigned int lpss_quirks;
 95
 96/*
 97 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
 98 *
 99 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
100 * it can be powered off automatically whenever the last LPSS device goes down.
101 * In case of no power any access to the DMA controller will hang the system.
102 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
103 * well as on ASuS T100TA transformer.
104 *
105 * This quirk overrides power state of entire LPSS island to keep DMA powered
106 * on whenever we have at least one other device in use.
107 */
108#define LPSS_QUIRK_ALWAYS_POWER_ON	BIT(0)
109
110/* UART Component Parameter Register */
111#define LPSS_UART_CPR			0xF4
112#define LPSS_UART_CPR_AFCE		BIT(4)
113
114static void lpss_uart_setup(struct lpss_private_data *pdata)
115{
116	unsigned int offset;
117	u32 val;
118
119	offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
120	val = readl(pdata->mmio_base + offset);
121	writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
122
123	val = readl(pdata->mmio_base + LPSS_UART_CPR);
124	if (!(val & LPSS_UART_CPR_AFCE)) {
125		offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
126		val = readl(pdata->mmio_base + offset);
127		val |= LPSS_GENERAL_UART_RTS_OVRD;
128		writel(val, pdata->mmio_base + offset);
129	}
130}
131
132static void lpss_deassert_reset(struct lpss_private_data *pdata)
133{
134	unsigned int offset;
135	u32 val;
136
137	offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
138	val = readl(pdata->mmio_base + offset);
139	val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
140	writel(val, pdata->mmio_base + offset);
141}
142
143#define LPSS_I2C_ENABLE			0x6c
144
145static void byt_i2c_setup(struct lpss_private_data *pdata)
146{
147	lpss_deassert_reset(pdata);
148
149	if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
150		pdata->fixed_clk_rate = 133000000;
151
152	writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
153}
154
155static const struct lpss_device_desc lpt_dev_desc = {
156	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
157	.prv_offset = 0x800,
158};
159
160static const struct lpss_device_desc lpt_i2c_dev_desc = {
161	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
162	.prv_offset = 0x800,
163};
164
165static const struct lpss_device_desc lpt_uart_dev_desc = {
166	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
167	.clk_con_id = "baudclk",
168	.prv_offset = 0x800,
169	.setup = lpss_uart_setup,
170};
171
172static const struct lpss_device_desc lpt_sdio_dev_desc = {
173	.flags = LPSS_LTR,
174	.prv_offset = 0x1000,
175	.prv_size_override = 0x1018,
176};
177
178static const struct lpss_device_desc byt_pwm_dev_desc = {
179	.flags = LPSS_SAVE_CTX,
180};
181
182static const struct lpss_device_desc bsw_pwm_dev_desc = {
183	.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
184};
185
186static const struct lpss_device_desc byt_uart_dev_desc = {
187	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
188	.clk_con_id = "baudclk",
189	.prv_offset = 0x800,
190	.setup = lpss_uart_setup,
191};
192
193static const struct lpss_device_desc bsw_uart_dev_desc = {
194	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
195			| LPSS_NO_D3_DELAY,
196	.clk_con_id = "baudclk",
197	.prv_offset = 0x800,
198	.setup = lpss_uart_setup,
199};
200
201static const struct lpss_device_desc byt_spi_dev_desc = {
202	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
203	.prv_offset = 0x400,
204};
205
206static const struct lpss_device_desc byt_sdio_dev_desc = {
207	.flags = LPSS_CLK,
208};
209
210static const struct lpss_device_desc byt_i2c_dev_desc = {
211	.flags = LPSS_CLK | LPSS_SAVE_CTX,
212	.prv_offset = 0x800,
213	.setup = byt_i2c_setup,
214};
215
216static const struct lpss_device_desc bsw_i2c_dev_desc = {
217	.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
218	.prv_offset = 0x800,
219	.setup = byt_i2c_setup,
220};
221
222static const struct lpss_device_desc bsw_spi_dev_desc = {
223	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
224			| LPSS_NO_D3_DELAY,
225	.prv_offset = 0x400,
226	.setup = lpss_deassert_reset,
227};
228
229#define ICPU(model)	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
230
231static const struct x86_cpu_id lpss_cpu_ids[] = {
232	ICPU(0x37),	/* Valleyview, Bay Trail */
233	ICPU(0x4c),	/* Braswell, Cherry Trail */
234	{}
235};
236
237#else
238
239#define LPSS_ADDR(desc) (0UL)
240
241#endif /* CONFIG_X86_INTEL_LPSS */
242
243static const struct acpi_device_id acpi_lpss_device_ids[] = {
244	/* Generic LPSS devices */
245	{ "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
246
247	/* Lynxpoint LPSS devices */
248	{ "INT33C0", LPSS_ADDR(lpt_dev_desc) },
249	{ "INT33C1", LPSS_ADDR(lpt_dev_desc) },
250	{ "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
251	{ "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
252	{ "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
253	{ "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
254	{ "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
255	{ "INT33C7", },
256
257	/* BayTrail LPSS devices */
258	{ "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
259	{ "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
260	{ "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
261	{ "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
262	{ "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
263	{ "INT33B2", },
264	{ "INT33FC", },
265
266	/* Braswell LPSS devices */
267	{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
268	{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
269	{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
270	{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
271
272	/* Broadwell LPSS devices */
273	{ "INT3430", LPSS_ADDR(lpt_dev_desc) },
274	{ "INT3431", LPSS_ADDR(lpt_dev_desc) },
275	{ "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
276	{ "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
277	{ "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
278	{ "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
279	{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
280	{ "INT3437", },
281
282	/* Wildcat Point LPSS devices */
283	{ "INT3438", LPSS_ADDR(lpt_dev_desc) },
284
285	{ }
286};
287
288#ifdef CONFIG_X86_INTEL_LPSS
289
290static int is_memory(struct acpi_resource *res, void *not_used)
291{
292	struct resource r;
293	return !acpi_dev_resource_memory(res, &r);
294}
295
296/* LPSS main clock device. */
297static struct platform_device *lpss_clk_dev;
298
299static inline void lpt_register_clock_device(void)
300{
301	lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
302}
303
304static int register_device_clock(struct acpi_device *adev,
305				 struct lpss_private_data *pdata)
306{
307	const struct lpss_device_desc *dev_desc = pdata->dev_desc;
308	const char *devname = dev_name(&adev->dev);
309	struct clk *clk = ERR_PTR(-ENODEV);
310	struct lpss_clk_data *clk_data;
311	const char *parent, *clk_name;
312	void __iomem *prv_base;
313
314	if (!lpss_clk_dev)
315		lpt_register_clock_device();
316
317	clk_data = platform_get_drvdata(lpss_clk_dev);
318	if (!clk_data)
319		return -ENODEV;
320	clk = clk_data->clk;
321
322	if (!pdata->mmio_base
323	    || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
324		return -ENODATA;
325
326	parent = clk_data->name;
327	prv_base = pdata->mmio_base + dev_desc->prv_offset;
328
329	if (pdata->fixed_clk_rate) {
330		clk = clk_register_fixed_rate(NULL, devname, parent, 0,
331					      pdata->fixed_clk_rate);
332		goto out;
333	}
334
335	if (dev_desc->flags & LPSS_CLK_GATE) {
336		clk = clk_register_gate(NULL, devname, parent, 0,
337					prv_base, 0, 0, NULL);
338		parent = devname;
339	}
340
341	if (dev_desc->flags & LPSS_CLK_DIVIDER) {
342		/* Prevent division by zero */
343		if (!readl(prv_base))
344			writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
345
346		clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
347		if (!clk_name)
348			return -ENOMEM;
349		clk = clk_register_fractional_divider(NULL, clk_name, parent,
350						      0, prv_base,
351						      1, 15, 16, 15, 0, NULL);
352		parent = clk_name;
353
354		clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
355		if (!clk_name) {
356			kfree(parent);
357			return -ENOMEM;
358		}
359		clk = clk_register_gate(NULL, clk_name, parent,
360					CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
361					prv_base, 31, 0, NULL);
362		kfree(parent);
363		kfree(clk_name);
364	}
365out:
366	if (IS_ERR(clk))
367		return PTR_ERR(clk);
368
369	pdata->clk = clk;
370	clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
371	return 0;
372}
373
374static int acpi_lpss_create_device(struct acpi_device *adev,
375				   const struct acpi_device_id *id)
376{
377	const struct lpss_device_desc *dev_desc;
378	struct lpss_private_data *pdata;
379	struct resource_entry *rentry;
380	struct list_head resource_list;
381	struct platform_device *pdev;
382	int ret;
383
384	dev_desc = (const struct lpss_device_desc *)id->driver_data;
385	if (!dev_desc) {
386		pdev = acpi_create_platform_device(adev);
387		return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
388	}
389	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
390	if (!pdata)
391		return -ENOMEM;
392
393	INIT_LIST_HEAD(&resource_list);
394	ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
395	if (ret < 0)
396		goto err_out;
397
398	list_for_each_entry(rentry, &resource_list, node)
399		if (resource_type(rentry->res) == IORESOURCE_MEM) {
400			if (dev_desc->prv_size_override)
401				pdata->mmio_size = dev_desc->prv_size_override;
402			else
403				pdata->mmio_size = resource_size(rentry->res);
404			pdata->mmio_base = ioremap(rentry->res->start,
405						   pdata->mmio_size);
406			break;
407		}
408
409	acpi_dev_free_resource_list(&resource_list);
410
411	if (!pdata->mmio_base) {
412		ret = -ENOMEM;
413		goto err_out;
414	}
415
416	pdata->dev_desc = dev_desc;
417
418	if (dev_desc->setup)
419		dev_desc->setup(pdata);
420
421	if (dev_desc->flags & LPSS_CLK) {
422		ret = register_device_clock(adev, pdata);
423		if (ret) {
424			/* Skip the device, but continue the namespace scan. */
425			ret = 0;
426			goto err_out;
427		}
428	}
429
430	/*
431	 * This works around a known issue in ACPI tables where LPSS devices
432	 * have _PS0 and _PS3 without _PSC (and no power resources), so
433	 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
434	 */
435	ret = acpi_device_fix_up_power(adev);
436	if (ret) {
437		/* Skip the device, but continue the namespace scan. */
438		ret = 0;
439		goto err_out;
440	}
441
442	adev->driver_data = pdata;
443	pdev = acpi_create_platform_device(adev);
444	if (!IS_ERR_OR_NULL(pdev)) {
445		return 1;
446	}
447
448	ret = PTR_ERR(pdev);
449	adev->driver_data = NULL;
450
451 err_out:
452	kfree(pdata);
453	return ret;
454}
455
456static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
457{
458	return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
459}
460
461static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
462			     unsigned int reg)
463{
464	writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
465}
466
467static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
468{
469	struct acpi_device *adev;
470	struct lpss_private_data *pdata;
471	unsigned long flags;
472	int ret;
473
474	ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
475	if (WARN_ON(ret))
476		return ret;
477
478	spin_lock_irqsave(&dev->power.lock, flags);
479	if (pm_runtime_suspended(dev)) {
480		ret = -EAGAIN;
481		goto out;
482	}
483	pdata = acpi_driver_data(adev);
484	if (WARN_ON(!pdata || !pdata->mmio_base)) {
485		ret = -ENODEV;
486		goto out;
487	}
488	*val = __lpss_reg_read(pdata, reg);
489
490 out:
491	spin_unlock_irqrestore(&dev->power.lock, flags);
492	return ret;
493}
494
495static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
496			     char *buf)
497{
498	u32 ltr_value = 0;
499	unsigned int reg;
500	int ret;
501
502	reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
503	ret = lpss_reg_read(dev, reg, &ltr_value);
504	if (ret)
505		return ret;
506
507	return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
508}
509
510static ssize_t lpss_ltr_mode_show(struct device *dev,
511				  struct device_attribute *attr, char *buf)
512{
513	u32 ltr_mode = 0;
514	char *outstr;
515	int ret;
516
517	ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
518	if (ret)
519		return ret;
520
521	outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
522	return sprintf(buf, "%s\n", outstr);
523}
524
525static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
526static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
527static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
528
529static struct attribute *lpss_attrs[] = {
530	&dev_attr_auto_ltr.attr,
531	&dev_attr_sw_ltr.attr,
532	&dev_attr_ltr_mode.attr,
533	NULL,
534};
535
536static struct attribute_group lpss_attr_group = {
537	.attrs = lpss_attrs,
538	.name = "lpss_ltr",
539};
540
541static void acpi_lpss_set_ltr(struct device *dev, s32 val)
542{
543	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
544	u32 ltr_mode, ltr_val;
545
546	ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
547	if (val < 0) {
548		if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
549			ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
550			__lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
551		}
552		return;
553	}
554	ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
555	if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
556		ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
557		val = LPSS_LTR_MAX_VAL;
558	} else if (val > LPSS_LTR_MAX_VAL) {
559		ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
560		val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
561	} else {
562		ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
563	}
564	ltr_val |= val;
565	__lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
566	if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
567		ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
568		__lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
569	}
570}
571
572#ifdef CONFIG_PM
573/**
574 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
575 * @dev: LPSS device
576 * @pdata: pointer to the private data of the LPSS device
577 *
578 * Most LPSS devices have private registers which may loose their context when
579 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
580 * prv_reg_ctx array.
581 */
582static void acpi_lpss_save_ctx(struct device *dev,
583			       struct lpss_private_data *pdata)
584{
585	unsigned int i;
586
587	for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
588		unsigned long offset = i * sizeof(u32);
589
590		pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
591		dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
592			pdata->prv_reg_ctx[i], offset);
593	}
594}
595
596/**
597 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
598 * @dev: LPSS device
599 * @pdata: pointer to the private data of the LPSS device
600 *
601 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
602 */
603static void acpi_lpss_restore_ctx(struct device *dev,
604				  struct lpss_private_data *pdata)
605{
606	unsigned int i;
607
608	for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
609		unsigned long offset = i * sizeof(u32);
610
611		__lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
612		dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
613			pdata->prv_reg_ctx[i], offset);
614	}
615}
616
617static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
618{
619	/*
620	 * The following delay is needed or the subsequent write operations may
621	 * fail. The LPSS devices are actually PCI devices and the PCI spec
622	 * expects 10ms delay before the device can be accessed after D3 to D0
623	 * transition. However some platforms like BSW does not need this delay.
624	 */
625	unsigned int delay = 10;	/* default 10ms delay */
626
627	if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
628		delay = 0;
629
630	msleep(delay);
631}
632
633static int acpi_lpss_activate(struct device *dev)
634{
635	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
636	int ret;
637
638	ret = acpi_dev_runtime_resume(dev);
639	if (ret)
640		return ret;
641
642	acpi_lpss_d3_to_d0_delay(pdata);
643
644	/*
645	 * This is called only on ->probe() stage where a device is either in
646	 * known state defined by BIOS or most likely powered off. Due to this
647	 * we have to deassert reset line to be sure that ->probe() will
648	 * recognize the device.
649	 */
650	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
651		lpss_deassert_reset(pdata);
652
653	return 0;
654}
655
656static void acpi_lpss_dismiss(struct device *dev)
657{
658	acpi_dev_runtime_suspend(dev);
659}
660
661#ifdef CONFIG_PM_SLEEP
662static int acpi_lpss_suspend_late(struct device *dev)
663{
664	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
665	int ret;
666
667	ret = pm_generic_suspend_late(dev);
668	if (ret)
669		return ret;
670
671	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
672		acpi_lpss_save_ctx(dev, pdata);
673
674	return acpi_dev_suspend_late(dev);
675}
676
677static int acpi_lpss_resume_early(struct device *dev)
678{
679	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
680	int ret;
681
682	ret = acpi_dev_resume_early(dev);
683	if (ret)
684		return ret;
685
686	acpi_lpss_d3_to_d0_delay(pdata);
687
688	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
689		acpi_lpss_restore_ctx(dev, pdata);
690
691	return pm_generic_resume_early(dev);
692}
693#endif /* CONFIG_PM_SLEEP */
694
695/* IOSF SB for LPSS island */
696#define LPSS_IOSF_UNIT_LPIOEP		0xA0
697#define LPSS_IOSF_UNIT_LPIO1		0xAB
698#define LPSS_IOSF_UNIT_LPIO2		0xAC
699
700#define LPSS_IOSF_PMCSR			0x84
701#define LPSS_PMCSR_D0			0
702#define LPSS_PMCSR_D3hot		3
703#define LPSS_PMCSR_Dx_MASK		GENMASK(1, 0)
704
705#define LPSS_IOSF_GPIODEF0		0x154
706#define LPSS_GPIODEF0_DMA1_D3		BIT(2)
707#define LPSS_GPIODEF0_DMA2_D3		BIT(3)
708#define LPSS_GPIODEF0_DMA_D3_MASK	GENMASK(3, 2)
709
710static DEFINE_MUTEX(lpss_iosf_mutex);
711
712static void lpss_iosf_enter_d3_state(void)
713{
714	u32 value1 = 0;
715	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
716	u32 value2 = LPSS_PMCSR_D3hot;
717	u32 mask2 = LPSS_PMCSR_Dx_MASK;
718	/*
719	 * PMC provides an information about actual status of the LPSS devices.
720	 * Here we read the values related to LPSS power island, i.e. LPSS
721	 * devices, excluding both LPSS DMA controllers, along with SCC domain.
722	 */
723	u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe;
724	int ret;
725
726	ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
727	if (ret)
728		return;
729
730	mutex_lock(&lpss_iosf_mutex);
731
732	ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
733	if (ret)
734		goto exit;
735
736	/*
737	 * Get the status of entire LPSS power island per device basis.
738	 * Shutdown both LPSS DMA controllers if and only if all other devices
739	 * are already in D3hot.
740	 */
741	pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask;
742	if (pmc_status)
743		goto exit;
744
745	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
746			LPSS_IOSF_PMCSR, value2, mask2);
747
748	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
749			LPSS_IOSF_PMCSR, value2, mask2);
750
751	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
752			LPSS_IOSF_GPIODEF0, value1, mask1);
753exit:
754	mutex_unlock(&lpss_iosf_mutex);
755}
756
757static void lpss_iosf_exit_d3_state(void)
758{
759	u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3;
760	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
761	u32 value2 = LPSS_PMCSR_D0;
762	u32 mask2 = LPSS_PMCSR_Dx_MASK;
763
764	mutex_lock(&lpss_iosf_mutex);
765
766	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
767			LPSS_IOSF_GPIODEF0, value1, mask1);
768
769	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
770			LPSS_IOSF_PMCSR, value2, mask2);
771
772	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
773			LPSS_IOSF_PMCSR, value2, mask2);
774
775	mutex_unlock(&lpss_iosf_mutex);
776}
777
778static int acpi_lpss_runtime_suspend(struct device *dev)
779{
780	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
781	int ret;
782
783	ret = pm_generic_runtime_suspend(dev);
784	if (ret)
785		return ret;
786
787	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
788		acpi_lpss_save_ctx(dev, pdata);
789
790	ret = acpi_dev_runtime_suspend(dev);
791
792	/*
793	 * This call must be last in the sequence, otherwise PMC will return
794	 * wrong status for devices being about to be powered off. See
795	 * lpss_iosf_enter_d3_state() for further information.
796	 */
797	if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
798		lpss_iosf_enter_d3_state();
799
800	return ret;
801}
802
803static int acpi_lpss_runtime_resume(struct device *dev)
804{
805	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
806	int ret;
807
808	/*
809	 * This call is kept first to be in symmetry with
810	 * acpi_lpss_runtime_suspend() one.
811	 */
812	if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
813		lpss_iosf_exit_d3_state();
814
815	ret = acpi_dev_runtime_resume(dev);
816	if (ret)
817		return ret;
818
819	acpi_lpss_d3_to_d0_delay(pdata);
820
821	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
822		acpi_lpss_restore_ctx(dev, pdata);
823
824	return pm_generic_runtime_resume(dev);
825}
826#endif /* CONFIG_PM */
827
828static struct dev_pm_domain acpi_lpss_pm_domain = {
829#ifdef CONFIG_PM
830	.activate = acpi_lpss_activate,
831	.dismiss = acpi_lpss_dismiss,
832#endif
833	.ops = {
834#ifdef CONFIG_PM
835#ifdef CONFIG_PM_SLEEP
836		.prepare = acpi_subsys_prepare,
837		.complete = pm_complete_with_resume_check,
838		.suspend = acpi_subsys_suspend,
839		.suspend_late = acpi_lpss_suspend_late,
840		.resume_early = acpi_lpss_resume_early,
841		.freeze = acpi_subsys_freeze,
842		.poweroff = acpi_subsys_suspend,
843		.poweroff_late = acpi_lpss_suspend_late,
844		.restore_early = acpi_lpss_resume_early,
845#endif
846		.runtime_suspend = acpi_lpss_runtime_suspend,
847		.runtime_resume = acpi_lpss_runtime_resume,
848#endif
849	},
850};
851
852static int acpi_lpss_platform_notify(struct notifier_block *nb,
853				     unsigned long action, void *data)
854{
855	struct platform_device *pdev = to_platform_device(data);
856	struct lpss_private_data *pdata;
857	struct acpi_device *adev;
858	const struct acpi_device_id *id;
859
860	id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
861	if (!id || !id->driver_data)
862		return 0;
863
864	if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
865		return 0;
866
867	pdata = acpi_driver_data(adev);
868	if (!pdata)
869		return 0;
870
871	if (pdata->mmio_base &&
872	    pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
873		dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
874		return 0;
875	}
876
877	switch (action) {
878	case BUS_NOTIFY_BIND_DRIVER:
879		dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
880		break;
881	case BUS_NOTIFY_DRIVER_NOT_BOUND:
882	case BUS_NOTIFY_UNBOUND_DRIVER:
883		dev_pm_domain_set(&pdev->dev, NULL);
884		break;
885	case BUS_NOTIFY_ADD_DEVICE:
886		dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
887		if (pdata->dev_desc->flags & LPSS_LTR)
888			return sysfs_create_group(&pdev->dev.kobj,
889						  &lpss_attr_group);
890		break;
891	case BUS_NOTIFY_DEL_DEVICE:
892		if (pdata->dev_desc->flags & LPSS_LTR)
893			sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
894		dev_pm_domain_set(&pdev->dev, NULL);
895		break;
896	default:
897		break;
898	}
899
900	return 0;
901}
902
903static struct notifier_block acpi_lpss_nb = {
904	.notifier_call = acpi_lpss_platform_notify,
905};
906
907static void acpi_lpss_bind(struct device *dev)
908{
909	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
910
911	if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
912		return;
913
914	if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
915		dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
916	else
917		dev_err(dev, "MMIO size insufficient to access LTR\n");
918}
919
920static void acpi_lpss_unbind(struct device *dev)
921{
922	dev->power.set_latency_tolerance = NULL;
923}
924
925static struct acpi_scan_handler lpss_handler = {
926	.ids = acpi_lpss_device_ids,
927	.attach = acpi_lpss_create_device,
928	.bind = acpi_lpss_bind,
929	.unbind = acpi_lpss_unbind,
930};
931
932void __init acpi_lpss_init(void)
933{
934	const struct x86_cpu_id *id;
935	int ret;
936
937	ret = lpt_clk_init();
938	if (ret)
939		return;
940
941	id = x86_match_cpu(lpss_cpu_ids);
942	if (id)
943		lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
944
945	bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
946	acpi_scan_add_handler(&lpss_handler);
947}
948
949#else
950
951static struct acpi_scan_handler lpss_handler = {
952	.ids = acpi_lpss_device_ids,
953};
954
955void __init acpi_lpss_init(void)
956{
957	acpi_scan_add_handler(&lpss_handler);
958}
959
960#endif /* CONFIG_X86_INTEL_LPSS */