Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ACPI support for Intel Lynxpoint LPSS.
4 *
5 * Copyright (C) 2013, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8 */
9
10#include <linux/acpi.h>
11#include <linux/clkdev.h>
12#include <linux/clk-provider.h>
13#include <linux/dmi.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/mutex.h>
17#include <linux/pci.h>
18#include <linux/platform_device.h>
19#include <linux/platform_data/x86/clk-lpss.h>
20#include <linux/platform_data/x86/pmc_atom.h>
21#include <linux/pm_domain.h>
22#include <linux/pm_runtime.h>
23#include <linux/pwm.h>
24#include <linux/pxa2xx_ssp.h>
25#include <linux/suspend.h>
26#include <linux/delay.h>
27
28#include "internal.h"
29
30#ifdef CONFIG_X86_INTEL_LPSS
31
32#include <asm/cpu_device_id.h>
33#include <asm/intel-family.h>
34#include <asm/iosf_mbi.h>
35
36#define LPSS_ADDR(desc) ((unsigned long)&desc)
37
38#define LPSS_CLK_SIZE 0x04
39#define LPSS_LTR_SIZE 0x18
40
41/* Offsets relative to LPSS_PRIVATE_OFFSET */
42#define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
43#define LPSS_RESETS 0x04
44#define LPSS_RESETS_RESET_FUNC BIT(0)
45#define LPSS_RESETS_RESET_APB BIT(1)
46#define LPSS_GENERAL 0x08
47#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
48#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
49#define LPSS_SW_LTR 0x10
50#define LPSS_AUTO_LTR 0x14
51#define LPSS_LTR_SNOOP_REQ BIT(15)
52#define LPSS_LTR_SNOOP_MASK 0x0000FFFF
53#define LPSS_LTR_SNOOP_LAT_1US 0x800
54#define LPSS_LTR_SNOOP_LAT_32US 0xC00
55#define LPSS_LTR_SNOOP_LAT_SHIFT 5
56#define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
57#define LPSS_LTR_MAX_VAL 0x3FF
58#define LPSS_TX_INT 0x20
59#define LPSS_TX_INT_MASK BIT(1)
60
61#define LPSS_PRV_REG_COUNT 9
62
63/* LPSS Flags */
64#define LPSS_CLK BIT(0)
65#define LPSS_CLK_GATE BIT(1)
66#define LPSS_CLK_DIVIDER BIT(2)
67#define LPSS_LTR BIT(3)
68#define LPSS_SAVE_CTX BIT(4)
69/*
70 * For some devices the DSDT AML code for another device turns off the device
71 * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff)
72 * as ctx register values.
73 * Luckily these devices always use the same ctx register values, so we can
74 * work around this by saving the ctx registers once on activation.
75 */
76#define LPSS_SAVE_CTX_ONCE BIT(5)
77#define LPSS_NO_D3_DELAY BIT(6)
78
79struct lpss_private_data;
80
81struct lpss_device_desc {
82 unsigned int flags;
83 const char *clk_con_id;
84 unsigned int prv_offset;
85 size_t prv_size_override;
86 const struct property_entry *properties;
87 void (*setup)(struct lpss_private_data *pdata);
88 bool resume_from_noirq;
89};
90
91static const struct lpss_device_desc lpss_dma_desc = {
92 .flags = LPSS_CLK,
93};
94
95struct lpss_private_data {
96 struct acpi_device *adev;
97 void __iomem *mmio_base;
98 resource_size_t mmio_size;
99 unsigned int fixed_clk_rate;
100 struct clk *clk;
101 const struct lpss_device_desc *dev_desc;
102 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
103};
104
105/* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
106static u32 pmc_atom_d3_mask = 0xfe000ffe;
107
108/* LPSS run time quirks */
109static unsigned int lpss_quirks;
110
111/*
112 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
113 *
114 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
115 * it can be powered off automatically whenever the last LPSS device goes down.
116 * In case of no power any access to the DMA controller will hang the system.
117 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
118 * well as on ASuS T100TA transformer.
119 *
120 * This quirk overrides power state of entire LPSS island to keep DMA powered
121 * on whenever we have at least one other device in use.
122 */
123#define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
124
125/* UART Component Parameter Register */
126#define LPSS_UART_CPR 0xF4
127#define LPSS_UART_CPR_AFCE BIT(4)
128
129static void lpss_uart_setup(struct lpss_private_data *pdata)
130{
131 unsigned int offset;
132 u32 val;
133
134 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
135 val = readl(pdata->mmio_base + offset);
136 writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
137
138 val = readl(pdata->mmio_base + LPSS_UART_CPR);
139 if (!(val & LPSS_UART_CPR_AFCE)) {
140 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
141 val = readl(pdata->mmio_base + offset);
142 val |= LPSS_GENERAL_UART_RTS_OVRD;
143 writel(val, pdata->mmio_base + offset);
144 }
145}
146
147static void lpss_deassert_reset(struct lpss_private_data *pdata)
148{
149 unsigned int offset;
150 u32 val;
151
152 offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
153 val = readl(pdata->mmio_base + offset);
154 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
155 writel(val, pdata->mmio_base + offset);
156}
157
158/*
159 * BYT PWM used for backlight control by the i915 driver on systems without
160 * the Crystal Cove PMIC.
161 */
162static struct pwm_lookup byt_pwm_lookup[] = {
163 PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
164 "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
165 "pwm-lpss-platform"),
166};
167
168static void byt_pwm_setup(struct lpss_private_data *pdata)
169{
170 /* Only call pwm_add_table for the first PWM controller */
171 if (acpi_dev_uid_match(pdata->adev, 1))
172 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
173}
174
175#define LPSS_I2C_ENABLE 0x6c
176
177static void byt_i2c_setup(struct lpss_private_data *pdata)
178{
179 acpi_handle handle = pdata->adev->handle;
180 unsigned long long shared_host = 0;
181 acpi_status status;
182 u64 uid;
183
184 /* Expected to always be successfull, but better safe then sorry */
185 if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
186 /* Detect I2C bus shared with PUNIT and ignore its d3 status */
187 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
188 if (ACPI_SUCCESS(status) && shared_host)
189 pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
190 }
191
192 lpss_deassert_reset(pdata);
193
194 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
195 pdata->fixed_clk_rate = 133000000;
196
197 writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
198}
199
200/*
201 * BSW PWM1 is used for backlight control by the i915 driver
202 * BSW PWM2 is used for backlight control for fixed (etched into the glass)
203 * touch controls on some models. These touch-controls have specialized
204 * drivers which know they need the "pwm_soc_lpss_2" con-id.
205 */
206static struct pwm_lookup bsw_pwm_lookup[] = {
207 PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
208 "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
209 "pwm-lpss-platform"),
210 PWM_LOOKUP_WITH_MODULE("80862289:00", 0, NULL,
211 "pwm_soc_lpss_2", 0, PWM_POLARITY_NORMAL,
212 "pwm-lpss-platform"),
213};
214
215static void bsw_pwm_setup(struct lpss_private_data *pdata)
216{
217 /* Only call pwm_add_table for the first PWM controller */
218 if (acpi_dev_uid_match(pdata->adev, 1))
219 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
220}
221
222static const struct property_entry lpt_spi_properties[] = {
223 PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP),
224 { }
225};
226
227static const struct lpss_device_desc lpt_spi_dev_desc = {
228 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
229 | LPSS_SAVE_CTX,
230 .prv_offset = 0x800,
231 .properties = lpt_spi_properties,
232};
233
234static const struct lpss_device_desc lpt_i2c_dev_desc = {
235 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
236 .prv_offset = 0x800,
237};
238
239static struct property_entry uart_properties[] = {
240 PROPERTY_ENTRY_U32("reg-io-width", 4),
241 PROPERTY_ENTRY_U32("reg-shift", 2),
242 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
243 { },
244};
245
246static const struct lpss_device_desc lpt_uart_dev_desc = {
247 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
248 | LPSS_SAVE_CTX,
249 .clk_con_id = "baudclk",
250 .prv_offset = 0x800,
251 .setup = lpss_uart_setup,
252 .properties = uart_properties,
253};
254
255static const struct lpss_device_desc lpt_sdio_dev_desc = {
256 .flags = LPSS_LTR,
257 .prv_offset = 0x1000,
258 .prv_size_override = 0x1018,
259};
260
261static const struct lpss_device_desc byt_pwm_dev_desc = {
262 .flags = LPSS_SAVE_CTX,
263 .prv_offset = 0x800,
264 .setup = byt_pwm_setup,
265};
266
267static const struct lpss_device_desc bsw_pwm_dev_desc = {
268 .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
269 .prv_offset = 0x800,
270 .setup = bsw_pwm_setup,
271 .resume_from_noirq = true,
272};
273
274static const struct lpss_device_desc bsw_pwm2_dev_desc = {
275 .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
276 .prv_offset = 0x800,
277 .resume_from_noirq = true,
278};
279
280static const struct lpss_device_desc byt_uart_dev_desc = {
281 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
282 .clk_con_id = "baudclk",
283 .prv_offset = 0x800,
284 .setup = lpss_uart_setup,
285 .properties = uart_properties,
286};
287
288static const struct lpss_device_desc bsw_uart_dev_desc = {
289 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
290 | LPSS_NO_D3_DELAY,
291 .clk_con_id = "baudclk",
292 .prv_offset = 0x800,
293 .setup = lpss_uart_setup,
294 .properties = uart_properties,
295};
296
297static const struct property_entry byt_spi_properties[] = {
298 PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP),
299 { }
300};
301
302static const struct lpss_device_desc byt_spi_dev_desc = {
303 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
304 .prv_offset = 0x400,
305 .properties = byt_spi_properties,
306};
307
308static const struct lpss_device_desc byt_sdio_dev_desc = {
309 .flags = LPSS_CLK,
310};
311
312static const struct lpss_device_desc byt_i2c_dev_desc = {
313 .flags = LPSS_CLK | LPSS_SAVE_CTX,
314 .prv_offset = 0x800,
315 .setup = byt_i2c_setup,
316 .resume_from_noirq = true,
317};
318
319static const struct lpss_device_desc bsw_i2c_dev_desc = {
320 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
321 .prv_offset = 0x800,
322 .setup = byt_i2c_setup,
323 .resume_from_noirq = true,
324};
325
326static const struct property_entry bsw_spi_properties[] = {
327 PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
328 PROPERTY_ENTRY_U32("num-cs", 2),
329 { }
330};
331
332static const struct lpss_device_desc bsw_spi_dev_desc = {
333 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
334 | LPSS_NO_D3_DELAY,
335 .prv_offset = 0x400,
336 .setup = lpss_deassert_reset,
337 .properties = bsw_spi_properties,
338};
339
340static const struct x86_cpu_id lpss_cpu_ids[] = {
341 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
342 X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
343 {}
344};
345
346#else
347
348#define LPSS_ADDR(desc) (0UL)
349
350#endif /* CONFIG_X86_INTEL_LPSS */
351
352static const struct acpi_device_id acpi_lpss_device_ids[] = {
353 /* Generic LPSS devices */
354 { "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
355
356 /* Lynxpoint LPSS devices */
357 { "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) },
358 { "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) },
359 { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
360 { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
361 { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
362 { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
363 { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
364
365 /* BayTrail LPSS devices */
366 { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
367 { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
368 { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
369 { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
370 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
371
372 /* Braswell LPSS devices */
373 { "80862286", LPSS_ADDR(lpss_dma_desc) },
374 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
375 { "80862289", LPSS_ADDR(bsw_pwm2_dev_desc) },
376 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
377 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
378 { "808622C0", LPSS_ADDR(lpss_dma_desc) },
379 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
380
381 /* Broadwell LPSS devices */
382 { "INT3430", LPSS_ADDR(lpt_spi_dev_desc) },
383 { "INT3431", LPSS_ADDR(lpt_spi_dev_desc) },
384 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
385 { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
386 { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
387 { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
388 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
389
390 /* Wildcat Point LPSS devices */
391 { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
392
393 { }
394};
395
396#ifdef CONFIG_X86_INTEL_LPSS
397
398/* LPSS main clock device. */
399static struct platform_device *lpss_clk_dev;
400
401static inline void lpt_register_clock_device(void)
402{
403 lpss_clk_dev = platform_device_register_simple("clk-lpss-atom",
404 PLATFORM_DEVID_NONE,
405 NULL, 0);
406}
407
408static int register_device_clock(struct acpi_device *adev,
409 struct lpss_private_data *pdata)
410{
411 const struct lpss_device_desc *dev_desc = pdata->dev_desc;
412 const char *devname = dev_name(&adev->dev);
413 struct clk *clk;
414 struct lpss_clk_data *clk_data;
415 const char *parent, *clk_name;
416 void __iomem *prv_base;
417
418 if (!lpss_clk_dev)
419 lpt_register_clock_device();
420
421 if (IS_ERR(lpss_clk_dev))
422 return PTR_ERR(lpss_clk_dev);
423
424 clk_data = platform_get_drvdata(lpss_clk_dev);
425 if (!clk_data)
426 return -ENODEV;
427 clk = clk_data->clk;
428
429 if (!pdata->mmio_base
430 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
431 return -ENODATA;
432
433 parent = clk_data->name;
434 prv_base = pdata->mmio_base + dev_desc->prv_offset;
435
436 if (pdata->fixed_clk_rate) {
437 clk = clk_register_fixed_rate(NULL, devname, parent, 0,
438 pdata->fixed_clk_rate);
439 goto out;
440 }
441
442 if (dev_desc->flags & LPSS_CLK_GATE) {
443 clk = clk_register_gate(NULL, devname, parent, 0,
444 prv_base, 0, 0, NULL);
445 parent = devname;
446 }
447
448 if (dev_desc->flags & LPSS_CLK_DIVIDER) {
449 /* Prevent division by zero */
450 if (!readl(prv_base))
451 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
452
453 clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
454 if (!clk_name)
455 return -ENOMEM;
456 clk = clk_register_fractional_divider(NULL, clk_name, parent,
457 0, prv_base, 1, 15, 16, 15,
458 CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
459 NULL);
460 parent = clk_name;
461
462 clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
463 if (!clk_name) {
464 kfree(parent);
465 return -ENOMEM;
466 }
467 clk = clk_register_gate(NULL, clk_name, parent,
468 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
469 prv_base, 31, 0, NULL);
470 kfree(parent);
471 kfree(clk_name);
472 }
473out:
474 if (IS_ERR(clk))
475 return PTR_ERR(clk);
476
477 pdata->clk = clk;
478 clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
479 return 0;
480}
481
482struct lpss_device_links {
483 const char *supplier_hid;
484 const char *supplier_uid;
485 const char *consumer_hid;
486 const char *consumer_uid;
487 u32 flags;
488 const struct dmi_system_id *dep_missing_ids;
489};
490
491/* Please keep this list sorted alphabetically by vendor and model */
492static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = {
493 {
494 .matches = {
495 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
496 DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
497 },
498 },
499 {}
500};
501
502/*
503 * The _DEP method is used to identify dependencies but instead of creating
504 * device links for every handle in _DEP, only links in the following list are
505 * created. That is necessary because, in the general case, _DEP can refer to
506 * devices that might not have drivers, or that are on different buses, or where
507 * the supplier is not enumerated until after the consumer is probed.
508 */
509static const struct lpss_device_links lpss_device_links[] = {
510 /* CHT External sdcard slot controller depends on PMIC I2C ctrl */
511 {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
512 /* CHT iGPU depends on PMIC I2C controller */
513 {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
514 /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
515 {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME,
516 i2c1_dep_missing_dmi_ids},
517 /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
518 {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
519 /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
520 {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
521};
522
523static bool acpi_lpss_is_supplier(struct acpi_device *adev,
524 const struct lpss_device_links *link)
525{
526 return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
527}
528
529static bool acpi_lpss_is_consumer(struct acpi_device *adev,
530 const struct lpss_device_links *link)
531{
532 return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
533}
534
535struct hid_uid {
536 const char *hid;
537 const char *uid;
538};
539
540static int match_hid_uid(struct device *dev, const void *data)
541{
542 struct acpi_device *adev = ACPI_COMPANION(dev);
543 const struct hid_uid *id = data;
544
545 if (!adev)
546 return 0;
547
548 return acpi_dev_hid_uid_match(adev, id->hid, id->uid);
549}
550
551static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
552{
553 struct device *dev;
554
555 struct hid_uid data = {
556 .hid = hid,
557 .uid = uid,
558 };
559
560 dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
561 if (dev)
562 return dev;
563
564 return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
565}
566
567static void acpi_lpss_link_consumer(struct device *dev1,
568 const struct lpss_device_links *link)
569{
570 struct device *dev2;
571
572 dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
573 if (!dev2)
574 return;
575
576 if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
577 || acpi_device_dep(ACPI_HANDLE(dev2), ACPI_HANDLE(dev1)))
578 device_link_add(dev2, dev1, link->flags);
579
580 put_device(dev2);
581}
582
583static void acpi_lpss_link_supplier(struct device *dev1,
584 const struct lpss_device_links *link)
585{
586 struct device *dev2;
587
588 dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
589 if (!dev2)
590 return;
591
592 if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
593 || acpi_device_dep(ACPI_HANDLE(dev1), ACPI_HANDLE(dev2)))
594 device_link_add(dev1, dev2, link->flags);
595
596 put_device(dev2);
597}
598
599static void acpi_lpss_create_device_links(struct acpi_device *adev,
600 struct platform_device *pdev)
601{
602 int i;
603
604 for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
605 const struct lpss_device_links *link = &lpss_device_links[i];
606
607 if (acpi_lpss_is_supplier(adev, link))
608 acpi_lpss_link_consumer(&pdev->dev, link);
609
610 if (acpi_lpss_is_consumer(adev, link))
611 acpi_lpss_link_supplier(&pdev->dev, link);
612 }
613}
614
615static int acpi_lpss_create_device(struct acpi_device *adev,
616 const struct acpi_device_id *id)
617{
618 const struct lpss_device_desc *dev_desc;
619 struct lpss_private_data *pdata;
620 struct resource_entry *rentry;
621 struct list_head resource_list;
622 struct platform_device *pdev;
623 int ret;
624
625 dev_desc = (const struct lpss_device_desc *)id->driver_data;
626 if (!dev_desc)
627 return -EINVAL;
628
629 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
630 if (!pdata)
631 return -ENOMEM;
632
633 INIT_LIST_HEAD(&resource_list);
634 ret = acpi_dev_get_memory_resources(adev, &resource_list);
635 if (ret < 0)
636 goto err_out;
637
638 rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
639 if (rentry) {
640 if (dev_desc->prv_size_override)
641 pdata->mmio_size = dev_desc->prv_size_override;
642 else
643 pdata->mmio_size = resource_size(rentry->res);
644 pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size);
645 }
646
647 acpi_dev_free_resource_list(&resource_list);
648
649 if (!pdata->mmio_base) {
650 /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
651 adev->pnp.type.platform_id = 0;
652 goto out_free;
653 }
654
655 pdata->adev = adev;
656 pdata->dev_desc = dev_desc;
657
658 if (dev_desc->setup)
659 dev_desc->setup(pdata);
660
661 if (dev_desc->flags & LPSS_CLK) {
662 ret = register_device_clock(adev, pdata);
663 if (ret)
664 goto out_free;
665 }
666
667 /*
668 * This works around a known issue in ACPI tables where LPSS devices
669 * have _PS0 and _PS3 without _PSC (and no power resources), so
670 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
671 */
672 acpi_device_fix_up_power(adev);
673
674 adev->driver_data = pdata;
675 pdev = acpi_create_platform_device(adev, dev_desc->properties);
676 if (IS_ERR_OR_NULL(pdev)) {
677 adev->driver_data = NULL;
678 ret = PTR_ERR(pdev);
679 goto err_out;
680 }
681
682 acpi_lpss_create_device_links(adev, pdev);
683 return 1;
684
685out_free:
686 /* Skip the device, but continue the namespace scan */
687 ret = 0;
688err_out:
689 kfree(pdata);
690 return ret;
691}
692
693static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
694{
695 return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
696}
697
698static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
699 unsigned int reg)
700{
701 writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
702}
703
704static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
705{
706 struct acpi_device *adev = ACPI_COMPANION(dev);
707 struct lpss_private_data *pdata;
708 unsigned long flags;
709 int ret;
710
711 if (WARN_ON(!adev))
712 return -ENODEV;
713
714 spin_lock_irqsave(&dev->power.lock, flags);
715 if (pm_runtime_suspended(dev)) {
716 ret = -EAGAIN;
717 goto out;
718 }
719 pdata = acpi_driver_data(adev);
720 if (WARN_ON(!pdata || !pdata->mmio_base)) {
721 ret = -ENODEV;
722 goto out;
723 }
724 *val = __lpss_reg_read(pdata, reg);
725 ret = 0;
726
727 out:
728 spin_unlock_irqrestore(&dev->power.lock, flags);
729 return ret;
730}
731
732static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
733 char *buf)
734{
735 u32 ltr_value = 0;
736 unsigned int reg;
737 int ret;
738
739 reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
740 ret = lpss_reg_read(dev, reg, <r_value);
741 if (ret)
742 return ret;
743
744 return sysfs_emit(buf, "%08x\n", ltr_value);
745}
746
747static ssize_t lpss_ltr_mode_show(struct device *dev,
748 struct device_attribute *attr, char *buf)
749{
750 u32 ltr_mode = 0;
751 char *outstr;
752 int ret;
753
754 ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode);
755 if (ret)
756 return ret;
757
758 outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
759 return sprintf(buf, "%s\n", outstr);
760}
761
762static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
763static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
764static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
765
766static struct attribute *lpss_attrs[] = {
767 &dev_attr_auto_ltr.attr,
768 &dev_attr_sw_ltr.attr,
769 &dev_attr_ltr_mode.attr,
770 NULL,
771};
772
773static const struct attribute_group lpss_attr_group = {
774 .attrs = lpss_attrs,
775 .name = "lpss_ltr",
776};
777
778static void acpi_lpss_set_ltr(struct device *dev, s32 val)
779{
780 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
781 u32 ltr_mode, ltr_val;
782
783 ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
784 if (val < 0) {
785 if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
786 ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
787 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
788 }
789 return;
790 }
791 ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
792 if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
793 ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
794 val = LPSS_LTR_MAX_VAL;
795 } else if (val > LPSS_LTR_MAX_VAL) {
796 ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
797 val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
798 } else {
799 ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
800 }
801 ltr_val |= val;
802 __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
803 if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
804 ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
805 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
806 }
807}
808
809#ifdef CONFIG_PM
810/**
811 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
812 * @dev: LPSS device
813 * @pdata: pointer to the private data of the LPSS device
814 *
815 * Most LPSS devices have private registers which may loose their context when
816 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
817 * prv_reg_ctx array.
818 */
819static void acpi_lpss_save_ctx(struct device *dev,
820 struct lpss_private_data *pdata)
821{
822 unsigned int i;
823
824 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
825 unsigned long offset = i * sizeof(u32);
826
827 pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
828 dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
829 pdata->prv_reg_ctx[i], offset);
830 }
831}
832
833/**
834 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
835 * @dev: LPSS device
836 * @pdata: pointer to the private data of the LPSS device
837 *
838 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
839 */
840static void acpi_lpss_restore_ctx(struct device *dev,
841 struct lpss_private_data *pdata)
842{
843 unsigned int i;
844
845 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
846 unsigned long offset = i * sizeof(u32);
847
848 __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
849 dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
850 pdata->prv_reg_ctx[i], offset);
851 }
852}
853
854static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
855{
856 /*
857 * The following delay is needed or the subsequent write operations may
858 * fail. The LPSS devices are actually PCI devices and the PCI spec
859 * expects 10ms delay before the device can be accessed after D3 to D0
860 * transition. However some platforms like BSW does not need this delay.
861 */
862 unsigned int delay = 10; /* default 10ms delay */
863
864 if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
865 delay = 0;
866
867 msleep(delay);
868}
869
870static int acpi_lpss_activate(struct device *dev)
871{
872 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
873 int ret;
874
875 ret = acpi_dev_resume(dev);
876 if (ret)
877 return ret;
878
879 acpi_lpss_d3_to_d0_delay(pdata);
880
881 /*
882 * This is called only on ->probe() stage where a device is either in
883 * known state defined by BIOS or most likely powered off. Due to this
884 * we have to deassert reset line to be sure that ->probe() will
885 * recognize the device.
886 */
887 if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
888 lpss_deassert_reset(pdata);
889
890#ifdef CONFIG_PM
891 if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
892 acpi_lpss_save_ctx(dev, pdata);
893#endif
894
895 return 0;
896}
897
898static void acpi_lpss_dismiss(struct device *dev)
899{
900 acpi_dev_suspend(dev, false);
901}
902
903/* IOSF SB for LPSS island */
904#define LPSS_IOSF_UNIT_LPIOEP 0xA0
905#define LPSS_IOSF_UNIT_LPIO1 0xAB
906#define LPSS_IOSF_UNIT_LPIO2 0xAC
907
908#define LPSS_IOSF_PMCSR 0x84
909#define LPSS_PMCSR_D0 0
910#define LPSS_PMCSR_D3hot 3
911#define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
912
913#define LPSS_IOSF_GPIODEF0 0x154
914#define LPSS_GPIODEF0_DMA1_D3 BIT(2)
915#define LPSS_GPIODEF0_DMA2_D3 BIT(3)
916#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
917#define LPSS_GPIODEF0_DMA_LLP BIT(13)
918
919static DEFINE_MUTEX(lpss_iosf_mutex);
920static bool lpss_iosf_d3_entered = true;
921
922static void lpss_iosf_enter_d3_state(void)
923{
924 u32 value1 = 0;
925 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
926 u32 value2 = LPSS_PMCSR_D3hot;
927 u32 mask2 = LPSS_PMCSR_Dx_MASK;
928 /*
929 * PMC provides an information about actual status of the LPSS devices.
930 * Here we read the values related to LPSS power island, i.e. LPSS
931 * devices, excluding both LPSS DMA controllers, along with SCC domain.
932 */
933 u32 func_dis, d3_sts_0, pmc_status;
934 int ret;
935
936 ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
937 if (ret)
938 return;
939
940 mutex_lock(&lpss_iosf_mutex);
941
942 ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
943 if (ret)
944 goto exit;
945
946 /*
947 * Get the status of entire LPSS power island per device basis.
948 * Shutdown both LPSS DMA controllers if and only if all other devices
949 * are already in D3hot.
950 */
951 pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
952 if (pmc_status)
953 goto exit;
954
955 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
956 LPSS_IOSF_PMCSR, value2, mask2);
957
958 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
959 LPSS_IOSF_PMCSR, value2, mask2);
960
961 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
962 LPSS_IOSF_GPIODEF0, value1, mask1);
963
964 lpss_iosf_d3_entered = true;
965
966exit:
967 mutex_unlock(&lpss_iosf_mutex);
968}
969
970static void lpss_iosf_exit_d3_state(void)
971{
972 u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
973 LPSS_GPIODEF0_DMA_LLP;
974 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
975 u32 value2 = LPSS_PMCSR_D0;
976 u32 mask2 = LPSS_PMCSR_Dx_MASK;
977
978 mutex_lock(&lpss_iosf_mutex);
979
980 if (!lpss_iosf_d3_entered)
981 goto exit;
982
983 lpss_iosf_d3_entered = false;
984
985 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
986 LPSS_IOSF_GPIODEF0, value1, mask1);
987
988 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
989 LPSS_IOSF_PMCSR, value2, mask2);
990
991 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
992 LPSS_IOSF_PMCSR, value2, mask2);
993
994exit:
995 mutex_unlock(&lpss_iosf_mutex);
996}
997
998static int acpi_lpss_suspend(struct device *dev, bool wakeup)
999{
1000 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1001 int ret;
1002
1003 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
1004 acpi_lpss_save_ctx(dev, pdata);
1005
1006 ret = acpi_dev_suspend(dev, wakeup);
1007
1008 /*
1009 * This call must be last in the sequence, otherwise PMC will return
1010 * wrong status for devices being about to be powered off. See
1011 * lpss_iosf_enter_d3_state() for further information.
1012 */
1013 if (acpi_target_system_state() == ACPI_STATE_S0 &&
1014 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1015 lpss_iosf_enter_d3_state();
1016
1017 return ret;
1018}
1019
1020static int acpi_lpss_resume(struct device *dev)
1021{
1022 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1023 int ret;
1024
1025 /*
1026 * This call is kept first to be in symmetry with
1027 * acpi_lpss_runtime_suspend() one.
1028 */
1029 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1030 lpss_iosf_exit_d3_state();
1031
1032 ret = acpi_dev_resume(dev);
1033 if (ret)
1034 return ret;
1035
1036 acpi_lpss_d3_to_d0_delay(pdata);
1037
1038 if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
1039 acpi_lpss_restore_ctx(dev, pdata);
1040
1041 return 0;
1042}
1043
1044#ifdef CONFIG_PM_SLEEP
1045static int acpi_lpss_do_suspend_late(struct device *dev)
1046{
1047 int ret;
1048
1049 if (dev_pm_skip_suspend(dev))
1050 return 0;
1051
1052 ret = pm_generic_suspend_late(dev);
1053 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1054}
1055
1056static int acpi_lpss_suspend_late(struct device *dev)
1057{
1058 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1059
1060 if (pdata->dev_desc->resume_from_noirq)
1061 return 0;
1062
1063 return acpi_lpss_do_suspend_late(dev);
1064}
1065
1066static int acpi_lpss_suspend_noirq(struct device *dev)
1067{
1068 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1069 int ret;
1070
1071 if (pdata->dev_desc->resume_from_noirq) {
1072 /*
1073 * The driver's ->suspend_late callback will be invoked by
1074 * acpi_lpss_do_suspend_late(), with the assumption that the
1075 * driver really wanted to run that code in ->suspend_noirq, but
1076 * it could not run after acpi_dev_suspend() and the driver
1077 * expected the latter to be called in the "late" phase.
1078 */
1079 ret = acpi_lpss_do_suspend_late(dev);
1080 if (ret)
1081 return ret;
1082 }
1083
1084 return acpi_subsys_suspend_noirq(dev);
1085}
1086
1087static int acpi_lpss_do_resume_early(struct device *dev)
1088{
1089 int ret = acpi_lpss_resume(dev);
1090
1091 return ret ? ret : pm_generic_resume_early(dev);
1092}
1093
1094static int acpi_lpss_resume_early(struct device *dev)
1095{
1096 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1097
1098 if (pdata->dev_desc->resume_from_noirq)
1099 return 0;
1100
1101 if (dev_pm_skip_resume(dev))
1102 return 0;
1103
1104 return acpi_lpss_do_resume_early(dev);
1105}
1106
1107static int acpi_lpss_resume_noirq(struct device *dev)
1108{
1109 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1110 int ret;
1111
1112 /* Follow acpi_subsys_resume_noirq(). */
1113 if (dev_pm_skip_resume(dev))
1114 return 0;
1115
1116 ret = pm_generic_resume_noirq(dev);
1117 if (ret)
1118 return ret;
1119
1120 if (!pdata->dev_desc->resume_from_noirq)
1121 return 0;
1122
1123 /*
1124 * The driver's ->resume_early callback will be invoked by
1125 * acpi_lpss_do_resume_early(), with the assumption that the driver
1126 * really wanted to run that code in ->resume_noirq, but it could not
1127 * run before acpi_dev_resume() and the driver expected the latter to be
1128 * called in the "early" phase.
1129 */
1130 return acpi_lpss_do_resume_early(dev);
1131}
1132
1133static int acpi_lpss_do_restore_early(struct device *dev)
1134{
1135 int ret = acpi_lpss_resume(dev);
1136
1137 return ret ? ret : pm_generic_restore_early(dev);
1138}
1139
1140static int acpi_lpss_restore_early(struct device *dev)
1141{
1142 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1143
1144 if (pdata->dev_desc->resume_from_noirq)
1145 return 0;
1146
1147 return acpi_lpss_do_restore_early(dev);
1148}
1149
1150static int acpi_lpss_restore_noirq(struct device *dev)
1151{
1152 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1153 int ret;
1154
1155 ret = pm_generic_restore_noirq(dev);
1156 if (ret)
1157 return ret;
1158
1159 if (!pdata->dev_desc->resume_from_noirq)
1160 return 0;
1161
1162 /* This is analogous to what happens in acpi_lpss_resume_noirq(). */
1163 return acpi_lpss_do_restore_early(dev);
1164}
1165
1166static int acpi_lpss_do_poweroff_late(struct device *dev)
1167{
1168 int ret = pm_generic_poweroff_late(dev);
1169
1170 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1171}
1172
1173static int acpi_lpss_poweroff_late(struct device *dev)
1174{
1175 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1176
1177 if (dev_pm_skip_suspend(dev))
1178 return 0;
1179
1180 if (pdata->dev_desc->resume_from_noirq)
1181 return 0;
1182
1183 return acpi_lpss_do_poweroff_late(dev);
1184}
1185
1186static int acpi_lpss_poweroff_noirq(struct device *dev)
1187{
1188 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1189
1190 if (dev_pm_skip_suspend(dev))
1191 return 0;
1192
1193 if (pdata->dev_desc->resume_from_noirq) {
1194 /* This is analogous to the acpi_lpss_suspend_noirq() case. */
1195 int ret = acpi_lpss_do_poweroff_late(dev);
1196
1197 if (ret)
1198 return ret;
1199 }
1200
1201 return pm_generic_poweroff_noirq(dev);
1202}
1203#endif /* CONFIG_PM_SLEEP */
1204
1205static int acpi_lpss_runtime_suspend(struct device *dev)
1206{
1207 int ret = pm_generic_runtime_suspend(dev);
1208
1209 return ret ? ret : acpi_lpss_suspend(dev, true);
1210}
1211
1212static int acpi_lpss_runtime_resume(struct device *dev)
1213{
1214 int ret = acpi_lpss_resume(dev);
1215
1216 return ret ? ret : pm_generic_runtime_resume(dev);
1217}
1218#endif /* CONFIG_PM */
1219
1220static struct dev_pm_domain acpi_lpss_pm_domain = {
1221#ifdef CONFIG_PM
1222 .activate = acpi_lpss_activate,
1223 .dismiss = acpi_lpss_dismiss,
1224#endif
1225 .ops = {
1226#ifdef CONFIG_PM
1227#ifdef CONFIG_PM_SLEEP
1228 .prepare = acpi_subsys_prepare,
1229 .complete = acpi_subsys_complete,
1230 .suspend = acpi_subsys_suspend,
1231 .suspend_late = acpi_lpss_suspend_late,
1232 .suspend_noirq = acpi_lpss_suspend_noirq,
1233 .resume_noirq = acpi_lpss_resume_noirq,
1234 .resume_early = acpi_lpss_resume_early,
1235 .freeze = acpi_subsys_freeze,
1236 .poweroff = acpi_subsys_poweroff,
1237 .poweroff_late = acpi_lpss_poweroff_late,
1238 .poweroff_noirq = acpi_lpss_poweroff_noirq,
1239 .restore_noirq = acpi_lpss_restore_noirq,
1240 .restore_early = acpi_lpss_restore_early,
1241#endif
1242 .runtime_suspend = acpi_lpss_runtime_suspend,
1243 .runtime_resume = acpi_lpss_runtime_resume,
1244#endif
1245 },
1246};
1247
1248static int acpi_lpss_platform_notify(struct notifier_block *nb,
1249 unsigned long action, void *data)
1250{
1251 struct platform_device *pdev = to_platform_device(data);
1252 struct lpss_private_data *pdata;
1253 struct acpi_device *adev;
1254 const struct acpi_device_id *id;
1255
1256 id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
1257 if (!id || !id->driver_data)
1258 return 0;
1259
1260 adev = ACPI_COMPANION(&pdev->dev);
1261 if (!adev)
1262 return 0;
1263
1264 pdata = acpi_driver_data(adev);
1265 if (!pdata)
1266 return 0;
1267
1268 if (pdata->mmio_base &&
1269 pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
1270 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
1271 return 0;
1272 }
1273
1274 switch (action) {
1275 case BUS_NOTIFY_BIND_DRIVER:
1276 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1277 break;
1278 case BUS_NOTIFY_DRIVER_NOT_BOUND:
1279 case BUS_NOTIFY_UNBOUND_DRIVER:
1280 dev_pm_domain_set(&pdev->dev, NULL);
1281 break;
1282 case BUS_NOTIFY_ADD_DEVICE:
1283 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1284 if (pdata->dev_desc->flags & LPSS_LTR)
1285 return sysfs_create_group(&pdev->dev.kobj,
1286 &lpss_attr_group);
1287 break;
1288 case BUS_NOTIFY_DEL_DEVICE:
1289 if (pdata->dev_desc->flags & LPSS_LTR)
1290 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
1291 dev_pm_domain_set(&pdev->dev, NULL);
1292 break;
1293 default:
1294 break;
1295 }
1296
1297 return 0;
1298}
1299
1300static struct notifier_block acpi_lpss_nb = {
1301 .notifier_call = acpi_lpss_platform_notify,
1302};
1303
1304static void acpi_lpss_bind(struct device *dev)
1305{
1306 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1307
1308 if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
1309 return;
1310
1311 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
1312 dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
1313 else
1314 dev_err(dev, "MMIO size insufficient to access LTR\n");
1315}
1316
1317static void acpi_lpss_unbind(struct device *dev)
1318{
1319 dev->power.set_latency_tolerance = NULL;
1320}
1321
1322static struct acpi_scan_handler lpss_handler = {
1323 .ids = acpi_lpss_device_ids,
1324 .attach = acpi_lpss_create_device,
1325 .bind = acpi_lpss_bind,
1326 .unbind = acpi_lpss_unbind,
1327};
1328
1329void __init acpi_lpss_init(void)
1330{
1331 const struct x86_cpu_id *id;
1332 int ret;
1333
1334 ret = lpss_atom_clk_init();
1335 if (ret)
1336 return;
1337
1338 id = x86_match_cpu(lpss_cpu_ids);
1339 if (id)
1340 lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
1341
1342 bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
1343 acpi_scan_add_handler(&lpss_handler);
1344}
1345
1346#else
1347
1348static struct acpi_scan_handler lpss_handler = {
1349 .ids = acpi_lpss_device_ids,
1350};
1351
1352void __init acpi_lpss_init(void)
1353{
1354 acpi_scan_add_handler(&lpss_handler);
1355}
1356
1357#endif /* CONFIG_X86_INTEL_LPSS */