Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ACPI support for Intel Lynxpoint LPSS.
4 *
5 * Copyright (C) 2013, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8 */
9
10#include <linux/acpi.h>
11#include <linux/clkdev.h>
12#include <linux/clk-provider.h>
13#include <linux/dmi.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/mutex.h>
17#include <linux/pci.h>
18#include <linux/platform_device.h>
19#include <linux/platform_data/x86/clk-lpss.h>
20#include <linux/platform_data/x86/pmc_atom.h>
21#include <linux/pm_domain.h>
22#include <linux/pm_runtime.h>
23#include <linux/pwm.h>
24#include <linux/pxa2xx_ssp.h>
25#include <linux/suspend.h>
26#include <linux/delay.h>
27
28#include "../internal.h"
29
30#ifdef CONFIG_X86_INTEL_LPSS
31
32#include <asm/cpu_device_id.h>
33#include <asm/intel-family.h>
34#include <asm/iosf_mbi.h>
35
36#define LPSS_ADDR(desc) ((unsigned long)&desc)
37
38#define LPSS_CLK_SIZE 0x04
39#define LPSS_LTR_SIZE 0x18
40
41/* Offsets relative to LPSS_PRIVATE_OFFSET */
42#define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
43#define LPSS_RESETS 0x04
44#define LPSS_RESETS_RESET_FUNC BIT(0)
45#define LPSS_RESETS_RESET_APB BIT(1)
46#define LPSS_GENERAL 0x08
47#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
48#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
49#define LPSS_SW_LTR 0x10
50#define LPSS_AUTO_LTR 0x14
51#define LPSS_LTR_SNOOP_REQ BIT(15)
52#define LPSS_LTR_SNOOP_MASK 0x0000FFFF
53#define LPSS_LTR_SNOOP_LAT_1US 0x800
54#define LPSS_LTR_SNOOP_LAT_32US 0xC00
55#define LPSS_LTR_SNOOP_LAT_SHIFT 5
56#define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
57#define LPSS_LTR_MAX_VAL 0x3FF
58#define LPSS_TX_INT 0x20
59#define LPSS_TX_INT_MASK BIT(1)
60
61#define LPSS_PRV_REG_COUNT 9
62
63/* LPSS Flags */
64#define LPSS_CLK BIT(0)
65#define LPSS_CLK_GATE BIT(1)
66#define LPSS_CLK_DIVIDER BIT(2)
67#define LPSS_LTR BIT(3)
68#define LPSS_SAVE_CTX BIT(4)
69/*
70 * For some devices the DSDT AML code for another device turns off the device
71 * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff)
72 * as ctx register values.
73 * Luckily these devices always use the same ctx register values, so we can
74 * work around this by saving the ctx registers once on activation.
75 */
76#define LPSS_SAVE_CTX_ONCE BIT(5)
77#define LPSS_NO_D3_DELAY BIT(6)
78
79struct lpss_private_data;
80
81struct lpss_device_desc {
82 unsigned int flags;
83 const char *clk_con_id;
84 unsigned int prv_offset;
85 size_t prv_size_override;
86 const struct property_entry *properties;
87 void (*setup)(struct lpss_private_data *pdata);
88 bool resume_from_noirq;
89};
90
91static const struct lpss_device_desc lpss_dma_desc = {
92 .flags = LPSS_CLK,
93};
94
95struct lpss_private_data {
96 struct acpi_device *adev;
97 void __iomem *mmio_base;
98 resource_size_t mmio_size;
99 unsigned int fixed_clk_rate;
100 struct clk *clk;
101 const struct lpss_device_desc *dev_desc;
102 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
103};
104
105/* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
106static u32 pmc_atom_d3_mask = 0xfe000ffe;
107
108/* LPSS run time quirks */
109static unsigned int lpss_quirks;
110
111/*
112 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
113 *
114 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
115 * it can be powered off automatically whenever the last LPSS device goes down.
116 * In case of no power any access to the DMA controller will hang the system.
117 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
118 * well as on ASuS T100TA transformer.
119 *
120 * This quirk overrides power state of entire LPSS island to keep DMA powered
121 * on whenever we have at least one other device in use.
122 */
123#define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
124
125/* UART Component Parameter Register */
126#define LPSS_UART_CPR 0xF4
127#define LPSS_UART_CPR_AFCE BIT(4)
128
129static void lpss_uart_setup(struct lpss_private_data *pdata)
130{
131 unsigned int offset;
132 u32 val;
133
134 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
135 val = readl(pdata->mmio_base + offset);
136 writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
137
138 val = readl(pdata->mmio_base + LPSS_UART_CPR);
139 if (!(val & LPSS_UART_CPR_AFCE)) {
140 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
141 val = readl(pdata->mmio_base + offset);
142 val |= LPSS_GENERAL_UART_RTS_OVRD;
143 writel(val, pdata->mmio_base + offset);
144 }
145}
146
147static void lpss_deassert_reset(struct lpss_private_data *pdata)
148{
149 unsigned int offset;
150 u32 val;
151
152 offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
153 val = readl(pdata->mmio_base + offset);
154 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
155 writel(val, pdata->mmio_base + offset);
156}
157
158/*
159 * BYT PWM used for backlight control by the i915 driver on systems without
160 * the Crystal Cove PMIC.
161 */
162static struct pwm_lookup byt_pwm_lookup[] = {
163 PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
164 "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
165 "pwm-lpss-platform"),
166};
167
168static void byt_pwm_setup(struct lpss_private_data *pdata)
169{
170 /* Only call pwm_add_table for the first PWM controller */
171 if (acpi_dev_uid_match(pdata->adev, 1))
172 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
173}
174
175#define LPSS_I2C_ENABLE 0x6c
176
177static void byt_i2c_setup(struct lpss_private_data *pdata)
178{
179 acpi_handle handle = pdata->adev->handle;
180 unsigned long long shared_host = 0;
181 acpi_status status;
182 u64 uid;
183
184 /* Expected to always be successfull, but better safe then sorry */
185 if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
186 /* Detect I2C bus shared with PUNIT and ignore its d3 status */
187 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
188 if (ACPI_SUCCESS(status) && shared_host)
189 pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
190 }
191
192 lpss_deassert_reset(pdata);
193
194 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
195 pdata->fixed_clk_rate = 133000000;
196
197 writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
198}
199
200/*
201 * BSW PWM1 is used for backlight control by the i915 driver
202 * BSW PWM2 is used for backlight control for fixed (etched into the glass)
203 * touch controls on some models. These touch-controls have specialized
204 * drivers which know they need the "pwm_soc_lpss_2" con-id.
205 */
206static struct pwm_lookup bsw_pwm_lookup[] = {
207 PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
208 "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
209 "pwm-lpss-platform"),
210 PWM_LOOKUP_WITH_MODULE("80862289:00", 0, NULL,
211 "pwm_soc_lpss_2", 0, PWM_POLARITY_NORMAL,
212 "pwm-lpss-platform"),
213};
214
215static void bsw_pwm_setup(struct lpss_private_data *pdata)
216{
217 /* Only call pwm_add_table for the first PWM controller */
218 if (acpi_dev_uid_match(pdata->adev, 1))
219 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
220}
221
222static const struct property_entry lpt_spi_properties[] = {
223 PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP),
224 { }
225};
226
227static const struct lpss_device_desc lpt_spi_dev_desc = {
228 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
229 | LPSS_SAVE_CTX,
230 .prv_offset = 0x800,
231 .properties = lpt_spi_properties,
232};
233
234static const struct lpss_device_desc lpt_i2c_dev_desc = {
235 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
236 .prv_offset = 0x800,
237};
238
239static struct property_entry uart_properties[] = {
240 PROPERTY_ENTRY_U32("reg-io-width", 4),
241 PROPERTY_ENTRY_U32("reg-shift", 2),
242 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
243 { },
244};
245
246static const struct lpss_device_desc lpt_uart_dev_desc = {
247 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
248 | LPSS_SAVE_CTX,
249 .clk_con_id = "baudclk",
250 .prv_offset = 0x800,
251 .setup = lpss_uart_setup,
252 .properties = uart_properties,
253};
254
255static const struct lpss_device_desc lpt_sdio_dev_desc = {
256 .flags = LPSS_LTR,
257 .prv_offset = 0x1000,
258 .prv_size_override = 0x1018,
259};
260
261static const struct lpss_device_desc byt_pwm_dev_desc = {
262 .flags = LPSS_SAVE_CTX,
263 .prv_offset = 0x800,
264 .setup = byt_pwm_setup,
265};
266
267static const struct lpss_device_desc bsw_pwm_dev_desc = {
268 .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
269 .prv_offset = 0x800,
270 .setup = bsw_pwm_setup,
271 .resume_from_noirq = true,
272};
273
274static const struct lpss_device_desc bsw_pwm2_dev_desc = {
275 .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
276 .prv_offset = 0x800,
277 .resume_from_noirq = true,
278};
279
280static const struct lpss_device_desc byt_uart_dev_desc = {
281 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
282 .clk_con_id = "baudclk",
283 .prv_offset = 0x800,
284 .setup = lpss_uart_setup,
285 .properties = uart_properties,
286};
287
288static const struct lpss_device_desc bsw_uart_dev_desc = {
289 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
290 | LPSS_NO_D3_DELAY,
291 .clk_con_id = "baudclk",
292 .prv_offset = 0x800,
293 .setup = lpss_uart_setup,
294 .properties = uart_properties,
295};
296
297static const struct property_entry byt_spi_properties[] = {
298 PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP),
299 { }
300};
301
302static const struct lpss_device_desc byt_spi_dev_desc = {
303 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
304 .prv_offset = 0x400,
305 .properties = byt_spi_properties,
306};
307
308static const struct lpss_device_desc byt_sdio_dev_desc = {
309 .flags = LPSS_CLK,
310};
311
312static const struct lpss_device_desc byt_i2c_dev_desc = {
313 .flags = LPSS_CLK | LPSS_SAVE_CTX,
314 .prv_offset = 0x800,
315 .setup = byt_i2c_setup,
316 .resume_from_noirq = true,
317};
318
319static const struct lpss_device_desc bsw_i2c_dev_desc = {
320 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
321 .prv_offset = 0x800,
322 .setup = byt_i2c_setup,
323 .resume_from_noirq = true,
324};
325
326static const struct property_entry bsw_spi_properties[] = {
327 PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
328 PROPERTY_ENTRY_U32("num-cs", 2),
329 { }
330};
331
332static const struct lpss_device_desc bsw_spi_dev_desc = {
333 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
334 | LPSS_NO_D3_DELAY,
335 .prv_offset = 0x400,
336 .setup = lpss_deassert_reset,
337 .properties = bsw_spi_properties,
338};
339
340static const struct x86_cpu_id lpss_cpu_ids[] = {
341 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, NULL),
342 X86_MATCH_VFM(INTEL_ATOM_AIRMONT, NULL),
343 {}
344};
345
346#else
347
348#define LPSS_ADDR(desc) (0UL)
349
350#endif /* CONFIG_X86_INTEL_LPSS */
351
352static const struct acpi_device_id acpi_lpss_device_ids[] = {
353 /* Generic LPSS devices */
354 { "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
355
356 /* Lynxpoint LPSS devices */
357 { "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) },
358 { "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) },
359 { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
360 { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
361 { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
362 { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
363 { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
364
365 /* BayTrail LPSS devices */
366 { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
367 { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
368 { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
369 { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
370 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
371
372 /* Braswell LPSS devices */
373 { "80862286", LPSS_ADDR(lpss_dma_desc) },
374 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
375 { "80862289", LPSS_ADDR(bsw_pwm2_dev_desc) },
376 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
377 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
378 { "808622C0", LPSS_ADDR(lpss_dma_desc) },
379 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
380
381 /* Broadwell LPSS devices */
382 { "INT3430", LPSS_ADDR(lpt_spi_dev_desc) },
383 { "INT3431", LPSS_ADDR(lpt_spi_dev_desc) },
384 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
385 { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
386 { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
387 { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
388 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
389
390 /* Wildcat Point LPSS devices */
391 { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
392
393 { }
394};
395
396#ifdef CONFIG_X86_INTEL_LPSS
397
398/* LPSS main clock device. */
399static struct platform_device *lpss_clk_dev;
400
401static inline void lpt_register_clock_device(void)
402{
403 lpss_clk_dev = platform_device_register_simple("clk-lpss-atom",
404 PLATFORM_DEVID_NONE,
405 NULL, 0);
406}
407
408static int register_device_clock(struct acpi_device *adev,
409 struct lpss_private_data *pdata)
410{
411 const struct lpss_device_desc *dev_desc = pdata->dev_desc;
412 const char *devname = dev_name(&adev->dev);
413 struct clk *clk;
414 struct lpss_clk_data *clk_data;
415 const char *parent, *clk_name;
416 void __iomem *prv_base;
417
418 if (!lpss_clk_dev)
419 lpt_register_clock_device();
420
421 if (IS_ERR(lpss_clk_dev))
422 return PTR_ERR(lpss_clk_dev);
423
424 clk_data = platform_get_drvdata(lpss_clk_dev);
425 if (!clk_data)
426 return -ENODEV;
427 clk = clk_data->clk;
428
429 if (!pdata->mmio_base
430 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
431 return -ENODATA;
432
433 parent = clk_data->name;
434 prv_base = pdata->mmio_base + dev_desc->prv_offset;
435
436 if (pdata->fixed_clk_rate) {
437 clk = clk_register_fixed_rate(NULL, devname, parent, 0,
438 pdata->fixed_clk_rate);
439 goto out;
440 }
441
442 if (dev_desc->flags & LPSS_CLK_GATE) {
443 clk = clk_register_gate(NULL, devname, parent, 0,
444 prv_base, 0, 0, NULL);
445 parent = devname;
446 }
447
448 if (dev_desc->flags & LPSS_CLK_DIVIDER) {
449 /* Prevent division by zero */
450 if (!readl(prv_base))
451 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
452
453 clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
454 if (!clk_name)
455 return -ENOMEM;
456 clk = clk_register_fractional_divider(NULL, clk_name, parent,
457 0, prv_base, 1, 15, 16, 15,
458 CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
459 NULL);
460 parent = clk_name;
461
462 clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
463 if (!clk_name) {
464 kfree(parent);
465 return -ENOMEM;
466 }
467 clk = clk_register_gate(NULL, clk_name, parent,
468 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
469 prv_base, 31, 0, NULL);
470 kfree(parent);
471 kfree(clk_name);
472 }
473out:
474 if (IS_ERR(clk))
475 return PTR_ERR(clk);
476
477 pdata->clk = clk;
478 clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
479 return 0;
480}
481
482struct lpss_device_links {
483 const char *supplier_hid;
484 const char *supplier_uid;
485 const char *consumer_hid;
486 const char *consumer_uid;
487 u32 flags;
488 const struct dmi_system_id *dep_missing_ids;
489};
490
491/* Please keep this list sorted alphabetically by vendor and model */
492static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = {
493 {
494 .matches = {
495 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
496 DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
497 },
498 },
499 {}
500};
501
502/*
503 * The _DEP method is used to identify dependencies but instead of creating
504 * device links for every handle in _DEP, only links in the following list are
505 * created. That is necessary because, in the general case, _DEP can refer to
506 * devices that might not have drivers, or that are on different buses, or where
507 * the supplier is not enumerated until after the consumer is probed.
508 */
509static const struct lpss_device_links lpss_device_links[] = {
510 /* CHT External sdcard slot controller depends on PMIC I2C ctrl */
511 {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
512 /* CHT iGPU depends on PMIC I2C controller */
513 {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
514 /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
515 {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME,
516 i2c1_dep_missing_dmi_ids},
517 /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
518 {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
519 /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
520 {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
521};
522
523static bool acpi_lpss_is_supplier(struct acpi_device *adev,
524 const struct lpss_device_links *link)
525{
526 return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
527}
528
529static bool acpi_lpss_is_consumer(struct acpi_device *adev,
530 const struct lpss_device_links *link)
531{
532 return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
533}
534
535struct hid_uid {
536 const char *hid;
537 const char *uid;
538};
539
540static int match_hid_uid(struct device *dev, const void *data)
541{
542 struct acpi_device *adev = ACPI_COMPANION(dev);
543 const struct hid_uid *id = data;
544
545 if (!adev)
546 return 0;
547
548 return acpi_dev_hid_uid_match(adev, id->hid, id->uid);
549}
550
551static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
552{
553 struct device *dev;
554
555 struct hid_uid data = {
556 .hid = hid,
557 .uid = uid,
558 };
559
560 dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
561 if (dev)
562 return dev;
563
564 return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
565}
566
567static void acpi_lpss_link_consumer(struct device *dev1,
568 const struct lpss_device_links *link)
569{
570 struct device *dev2;
571
572 dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
573 if (!dev2)
574 return;
575
576 if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
577 || acpi_device_dep(ACPI_HANDLE(dev2), ACPI_HANDLE(dev1)))
578 device_link_add(dev2, dev1, link->flags);
579
580 put_device(dev2);
581}
582
583static void acpi_lpss_link_supplier(struct device *dev1,
584 const struct lpss_device_links *link)
585{
586 struct device *dev2;
587
588 dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
589 if (!dev2)
590 return;
591
592 if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
593 || acpi_device_dep(ACPI_HANDLE(dev1), ACPI_HANDLE(dev2)))
594 device_link_add(dev1, dev2, link->flags);
595
596 put_device(dev2);
597}
598
599static void acpi_lpss_create_device_links(struct acpi_device *adev,
600 struct platform_device *pdev)
601{
602 int i;
603
604 for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
605 const struct lpss_device_links *link = &lpss_device_links[i];
606
607 if (acpi_lpss_is_supplier(adev, link))
608 acpi_lpss_link_consumer(&pdev->dev, link);
609
610 if (acpi_lpss_is_consumer(adev, link))
611 acpi_lpss_link_supplier(&pdev->dev, link);
612 }
613}
614
615static int acpi_lpss_create_device(struct acpi_device *adev,
616 const struct acpi_device_id *id)
617{
618 const struct lpss_device_desc *dev_desc;
619 struct lpss_private_data *pdata;
620 struct resource_entry *rentry;
621 struct list_head resource_list;
622 struct platform_device *pdev;
623 int ret;
624
625 dev_desc = (const struct lpss_device_desc *)id->driver_data;
626 if (!dev_desc)
627 return -EINVAL;
628
629 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
630 if (!pdata)
631 return -ENOMEM;
632
633 INIT_LIST_HEAD(&resource_list);
634 ret = acpi_dev_get_memory_resources(adev, &resource_list);
635 if (ret < 0)
636 goto err_out;
637
638 rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
639 if (rentry) {
640 if (dev_desc->prv_size_override)
641 pdata->mmio_size = dev_desc->prv_size_override;
642 else
643 pdata->mmio_size = resource_size(rentry->res);
644 pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size);
645 }
646
647 acpi_dev_free_resource_list(&resource_list);
648
649 if (!pdata->mmio_base) {
650 /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
651 adev->pnp.type.platform_id = 0;
652 goto out_free;
653 }
654
655 pdata->adev = adev;
656 pdata->dev_desc = dev_desc;
657
658 if (dev_desc->setup)
659 dev_desc->setup(pdata);
660
661 if (dev_desc->flags & LPSS_CLK) {
662 ret = register_device_clock(adev, pdata);
663 if (ret)
664 goto out_free;
665 }
666
667 /*
668 * This works around a known issue in ACPI tables where LPSS devices
669 * have _PS0 and _PS3 without _PSC (and no power resources), so
670 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
671 */
672 acpi_device_fix_up_power(adev);
673
674 adev->driver_data = pdata;
675 pdev = acpi_create_platform_device(adev, dev_desc->properties);
676 if (IS_ERR_OR_NULL(pdev)) {
677 adev->driver_data = NULL;
678 ret = PTR_ERR(pdev);
679 goto err_out;
680 }
681
682 acpi_lpss_create_device_links(adev, pdev);
683 return 1;
684
685out_free:
686 /* Skip the device, but continue the namespace scan */
687 ret = 0;
688err_out:
689 kfree(pdata);
690 return ret;
691}
692
693static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
694{
695 return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
696}
697
698static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
699 unsigned int reg)
700{
701 writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
702}
703
704static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
705{
706 struct acpi_device *adev = ACPI_COMPANION(dev);
707 struct lpss_private_data *pdata;
708 unsigned long flags;
709 int ret;
710
711 if (WARN_ON(!adev))
712 return -ENODEV;
713
714 spin_lock_irqsave(&dev->power.lock, flags);
715 if (pm_runtime_suspended(dev)) {
716 ret = -EAGAIN;
717 goto out;
718 }
719 pdata = acpi_driver_data(adev);
720 if (WARN_ON(!pdata || !pdata->mmio_base)) {
721 ret = -ENODEV;
722 goto out;
723 }
724 *val = __lpss_reg_read(pdata, reg);
725 ret = 0;
726
727 out:
728 spin_unlock_irqrestore(&dev->power.lock, flags);
729 return ret;
730}
731
732static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
733 char *buf)
734{
735 u32 ltr_value = 0;
736 unsigned int reg;
737 int ret;
738
739 reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
740 ret = lpss_reg_read(dev, reg, <r_value);
741 if (ret)
742 return ret;
743
744 return sysfs_emit(buf, "%08x\n", ltr_value);
745}
746
747static ssize_t lpss_ltr_mode_show(struct device *dev,
748 struct device_attribute *attr, char *buf)
749{
750 u32 ltr_mode = 0;
751 char *outstr;
752 int ret;
753
754 ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode);
755 if (ret)
756 return ret;
757
758 outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
759 return sprintf(buf, "%s\n", outstr);
760}
761
762static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
763static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
764static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
765
766static struct attribute *lpss_attrs[] = {
767 &dev_attr_auto_ltr.attr,
768 &dev_attr_sw_ltr.attr,
769 &dev_attr_ltr_mode.attr,
770 NULL,
771};
772
773static const struct attribute_group lpss_attr_group = {
774 .attrs = lpss_attrs,
775 .name = "lpss_ltr",
776};
777
778static void acpi_lpss_set_ltr(struct device *dev, s32 val)
779{
780 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
781 u32 ltr_mode, ltr_val;
782
783 ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
784 if (val < 0) {
785 if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
786 ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
787 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
788 }
789 return;
790 }
791 ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
792 if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
793 ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
794 val = LPSS_LTR_MAX_VAL;
795 } else if (val > LPSS_LTR_MAX_VAL) {
796 ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
797 val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
798 } else {
799 ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
800 }
801 ltr_val |= val;
802 __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
803 if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
804 ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
805 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
806 }
807}
808
809#ifdef CONFIG_PM
810/**
811 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
812 * @dev: LPSS device
813 * @pdata: pointer to the private data of the LPSS device
814 *
815 * Most LPSS devices have private registers which may loose their context when
816 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
817 * prv_reg_ctx array.
818 */
819static void acpi_lpss_save_ctx(struct device *dev,
820 struct lpss_private_data *pdata)
821{
822 unsigned int i;
823
824 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
825 unsigned long offset = i * sizeof(u32);
826
827 pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
828 dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
829 pdata->prv_reg_ctx[i], offset);
830 }
831}
832
833/**
834 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
835 * @dev: LPSS device
836 * @pdata: pointer to the private data of the LPSS device
837 *
838 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
839 */
840static void acpi_lpss_restore_ctx(struct device *dev,
841 struct lpss_private_data *pdata)
842{
843 unsigned int i;
844
845 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
846 unsigned long offset = i * sizeof(u32);
847
848 __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
849 dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
850 pdata->prv_reg_ctx[i], offset);
851 }
852}
853
854static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
855{
856 /*
857 * The following delay is needed or the subsequent write operations may
858 * fail. The LPSS devices are actually PCI devices and the PCI spec
859 * expects 10ms delay before the device can be accessed after D3 to D0
860 * transition. However some platforms like BSW does not need this delay.
861 */
862 unsigned int delay = 10; /* default 10ms delay */
863
864 if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
865 delay = 0;
866
867 msleep(delay);
868}
869
870static int acpi_lpss_activate(struct device *dev)
871{
872 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
873 int ret;
874
875 ret = acpi_dev_resume(dev);
876 if (ret)
877 return ret;
878
879 acpi_lpss_d3_to_d0_delay(pdata);
880
881 /*
882 * This is called only on ->probe() stage where a device is either in
883 * known state defined by BIOS or most likely powered off. Due to this
884 * we have to deassert reset line to be sure that ->probe() will
885 * recognize the device.
886 */
887 if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
888 lpss_deassert_reset(pdata);
889
890 if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
891 acpi_lpss_save_ctx(dev, pdata);
892
893 return 0;
894}
895
896static void acpi_lpss_dismiss(struct device *dev)
897{
898 acpi_dev_suspend(dev, false);
899}
900
901/* IOSF SB for LPSS island */
902#define LPSS_IOSF_UNIT_LPIOEP 0xA0
903#define LPSS_IOSF_UNIT_LPIO1 0xAB
904#define LPSS_IOSF_UNIT_LPIO2 0xAC
905
906#define LPSS_IOSF_PMCSR 0x84
907#define LPSS_PMCSR_D0 0
908#define LPSS_PMCSR_D3hot 3
909#define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
910
911#define LPSS_IOSF_GPIODEF0 0x154
912#define LPSS_GPIODEF0_DMA1_D3 BIT(2)
913#define LPSS_GPIODEF0_DMA2_D3 BIT(3)
914#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
915#define LPSS_GPIODEF0_DMA_LLP BIT(13)
916
917static DEFINE_MUTEX(lpss_iosf_mutex);
918static bool lpss_iosf_d3_entered = true;
919
920static void lpss_iosf_enter_d3_state(void)
921{
922 u32 value1 = 0;
923 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
924 u32 value2 = LPSS_PMCSR_D3hot;
925 u32 mask2 = LPSS_PMCSR_Dx_MASK;
926 /*
927 * PMC provides an information about actual status of the LPSS devices.
928 * Here we read the values related to LPSS power island, i.e. LPSS
929 * devices, excluding both LPSS DMA controllers, along with SCC domain.
930 */
931 u32 func_dis, d3_sts_0, pmc_status;
932 int ret;
933
934 ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
935 if (ret)
936 return;
937
938 mutex_lock(&lpss_iosf_mutex);
939
940 ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
941 if (ret)
942 goto exit;
943
944 /*
945 * Get the status of entire LPSS power island per device basis.
946 * Shutdown both LPSS DMA controllers if and only if all other devices
947 * are already in D3hot.
948 */
949 pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
950 if (pmc_status)
951 goto exit;
952
953 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
954 LPSS_IOSF_PMCSR, value2, mask2);
955
956 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
957 LPSS_IOSF_PMCSR, value2, mask2);
958
959 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
960 LPSS_IOSF_GPIODEF0, value1, mask1);
961
962 lpss_iosf_d3_entered = true;
963
964exit:
965 mutex_unlock(&lpss_iosf_mutex);
966}
967
968static void lpss_iosf_exit_d3_state(void)
969{
970 u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
971 LPSS_GPIODEF0_DMA_LLP;
972 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
973 u32 value2 = LPSS_PMCSR_D0;
974 u32 mask2 = LPSS_PMCSR_Dx_MASK;
975
976 mutex_lock(&lpss_iosf_mutex);
977
978 if (!lpss_iosf_d3_entered)
979 goto exit;
980
981 lpss_iosf_d3_entered = false;
982
983 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
984 LPSS_IOSF_GPIODEF0, value1, mask1);
985
986 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
987 LPSS_IOSF_PMCSR, value2, mask2);
988
989 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
990 LPSS_IOSF_PMCSR, value2, mask2);
991
992exit:
993 mutex_unlock(&lpss_iosf_mutex);
994}
995
996static int acpi_lpss_suspend(struct device *dev, bool wakeup)
997{
998 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
999 int ret;
1000
1001 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
1002 acpi_lpss_save_ctx(dev, pdata);
1003
1004 ret = acpi_dev_suspend(dev, wakeup);
1005
1006 /*
1007 * This call must be last in the sequence, otherwise PMC will return
1008 * wrong status for devices being about to be powered off. See
1009 * lpss_iosf_enter_d3_state() for further information.
1010 */
1011 if (acpi_target_system_state() == ACPI_STATE_S0 &&
1012 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1013 lpss_iosf_enter_d3_state();
1014
1015 return ret;
1016}
1017
1018static int acpi_lpss_resume(struct device *dev)
1019{
1020 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1021 int ret;
1022
1023 /*
1024 * This call is kept first to be in symmetry with
1025 * acpi_lpss_runtime_suspend() one.
1026 */
1027 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1028 lpss_iosf_exit_d3_state();
1029
1030 ret = acpi_dev_resume(dev);
1031 if (ret)
1032 return ret;
1033
1034 acpi_lpss_d3_to_d0_delay(pdata);
1035
1036 if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
1037 acpi_lpss_restore_ctx(dev, pdata);
1038
1039 return 0;
1040}
1041
1042#ifdef CONFIG_PM_SLEEP
1043static int acpi_lpss_do_suspend_late(struct device *dev)
1044{
1045 int ret;
1046
1047 if (dev_pm_skip_suspend(dev))
1048 return 0;
1049
1050 ret = pm_generic_suspend_late(dev);
1051 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1052}
1053
1054static int acpi_lpss_suspend_late(struct device *dev)
1055{
1056 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1057
1058 if (pdata->dev_desc->resume_from_noirq)
1059 return 0;
1060
1061 return acpi_lpss_do_suspend_late(dev);
1062}
1063
1064static int acpi_lpss_suspend_noirq(struct device *dev)
1065{
1066 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1067 int ret;
1068
1069 if (pdata->dev_desc->resume_from_noirq) {
1070 /*
1071 * The driver's ->suspend_late callback will be invoked by
1072 * acpi_lpss_do_suspend_late(), with the assumption that the
1073 * driver really wanted to run that code in ->suspend_noirq, but
1074 * it could not run after acpi_dev_suspend() and the driver
1075 * expected the latter to be called in the "late" phase.
1076 */
1077 ret = acpi_lpss_do_suspend_late(dev);
1078 if (ret)
1079 return ret;
1080 }
1081
1082 return acpi_subsys_suspend_noirq(dev);
1083}
1084
1085static int acpi_lpss_do_resume_early(struct device *dev)
1086{
1087 int ret = acpi_lpss_resume(dev);
1088
1089 return ret ? ret : pm_generic_resume_early(dev);
1090}
1091
1092static int acpi_lpss_resume_early(struct device *dev)
1093{
1094 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1095
1096 if (pdata->dev_desc->resume_from_noirq)
1097 return 0;
1098
1099 if (dev_pm_skip_resume(dev))
1100 return 0;
1101
1102 return acpi_lpss_do_resume_early(dev);
1103}
1104
1105static int acpi_lpss_resume_noirq(struct device *dev)
1106{
1107 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1108 int ret;
1109
1110 /* Follow acpi_subsys_resume_noirq(). */
1111 if (dev_pm_skip_resume(dev))
1112 return 0;
1113
1114 ret = pm_generic_resume_noirq(dev);
1115 if (ret)
1116 return ret;
1117
1118 if (!pdata->dev_desc->resume_from_noirq)
1119 return 0;
1120
1121 /*
1122 * The driver's ->resume_early callback will be invoked by
1123 * acpi_lpss_do_resume_early(), with the assumption that the driver
1124 * really wanted to run that code in ->resume_noirq, but it could not
1125 * run before acpi_dev_resume() and the driver expected the latter to be
1126 * called in the "early" phase.
1127 */
1128 return acpi_lpss_do_resume_early(dev);
1129}
1130
1131static int acpi_lpss_do_restore_early(struct device *dev)
1132{
1133 int ret = acpi_lpss_resume(dev);
1134
1135 return ret ? ret : pm_generic_restore_early(dev);
1136}
1137
1138static int acpi_lpss_restore_early(struct device *dev)
1139{
1140 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1141
1142 if (pdata->dev_desc->resume_from_noirq)
1143 return 0;
1144
1145 return acpi_lpss_do_restore_early(dev);
1146}
1147
1148static int acpi_lpss_restore_noirq(struct device *dev)
1149{
1150 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1151 int ret;
1152
1153 ret = pm_generic_restore_noirq(dev);
1154 if (ret)
1155 return ret;
1156
1157 if (!pdata->dev_desc->resume_from_noirq)
1158 return 0;
1159
1160 /* This is analogous to what happens in acpi_lpss_resume_noirq(). */
1161 return acpi_lpss_do_restore_early(dev);
1162}
1163
1164static int acpi_lpss_do_poweroff_late(struct device *dev)
1165{
1166 int ret = pm_generic_poweroff_late(dev);
1167
1168 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1169}
1170
1171static int acpi_lpss_poweroff_late(struct device *dev)
1172{
1173 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1174
1175 if (dev_pm_skip_suspend(dev))
1176 return 0;
1177
1178 if (pdata->dev_desc->resume_from_noirq)
1179 return 0;
1180
1181 return acpi_lpss_do_poweroff_late(dev);
1182}
1183
1184static int acpi_lpss_poweroff_noirq(struct device *dev)
1185{
1186 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1187
1188 if (dev_pm_skip_suspend(dev))
1189 return 0;
1190
1191 if (pdata->dev_desc->resume_from_noirq) {
1192 /* This is analogous to the acpi_lpss_suspend_noirq() case. */
1193 int ret = acpi_lpss_do_poweroff_late(dev);
1194
1195 if (ret)
1196 return ret;
1197 }
1198
1199 return pm_generic_poweroff_noirq(dev);
1200}
1201#endif /* CONFIG_PM_SLEEP */
1202
1203static int acpi_lpss_runtime_suspend(struct device *dev)
1204{
1205 int ret = pm_generic_runtime_suspend(dev);
1206
1207 return ret ? ret : acpi_lpss_suspend(dev, true);
1208}
1209
1210static int acpi_lpss_runtime_resume(struct device *dev)
1211{
1212 int ret = acpi_lpss_resume(dev);
1213
1214 return ret ? ret : pm_generic_runtime_resume(dev);
1215}
1216#endif /* CONFIG_PM */
1217
1218static struct dev_pm_domain acpi_lpss_pm_domain = {
1219#ifdef CONFIG_PM
1220 .activate = acpi_lpss_activate,
1221 .dismiss = acpi_lpss_dismiss,
1222#endif
1223 .ops = {
1224#ifdef CONFIG_PM
1225#ifdef CONFIG_PM_SLEEP
1226 .prepare = acpi_subsys_prepare,
1227 .complete = acpi_subsys_complete,
1228 .suspend = acpi_subsys_suspend,
1229 .suspend_late = acpi_lpss_suspend_late,
1230 .suspend_noirq = acpi_lpss_suspend_noirq,
1231 .resume_noirq = acpi_lpss_resume_noirq,
1232 .resume_early = acpi_lpss_resume_early,
1233 .freeze = acpi_subsys_freeze,
1234 .poweroff = acpi_subsys_poweroff,
1235 .poweroff_late = acpi_lpss_poweroff_late,
1236 .poweroff_noirq = acpi_lpss_poweroff_noirq,
1237 .restore_noirq = acpi_lpss_restore_noirq,
1238 .restore_early = acpi_lpss_restore_early,
1239#endif
1240 .runtime_suspend = acpi_lpss_runtime_suspend,
1241 .runtime_resume = acpi_lpss_runtime_resume,
1242#endif
1243 },
1244};
1245
1246static int acpi_lpss_platform_notify(struct notifier_block *nb,
1247 unsigned long action, void *data)
1248{
1249 struct platform_device *pdev = to_platform_device(data);
1250 struct lpss_private_data *pdata;
1251 struct acpi_device *adev;
1252 const struct acpi_device_id *id;
1253
1254 id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
1255 if (!id || !id->driver_data)
1256 return 0;
1257
1258 adev = ACPI_COMPANION(&pdev->dev);
1259 if (!adev)
1260 return 0;
1261
1262 pdata = acpi_driver_data(adev);
1263 if (!pdata)
1264 return 0;
1265
1266 if (pdata->mmio_base &&
1267 pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
1268 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
1269 return 0;
1270 }
1271
1272 switch (action) {
1273 case BUS_NOTIFY_BIND_DRIVER:
1274 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1275 break;
1276 case BUS_NOTIFY_DRIVER_NOT_BOUND:
1277 case BUS_NOTIFY_UNBOUND_DRIVER:
1278 dev_pm_domain_set(&pdev->dev, NULL);
1279 break;
1280 case BUS_NOTIFY_ADD_DEVICE:
1281 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1282 if (pdata->dev_desc->flags & LPSS_LTR)
1283 return sysfs_create_group(&pdev->dev.kobj,
1284 &lpss_attr_group);
1285 break;
1286 case BUS_NOTIFY_DEL_DEVICE:
1287 if (pdata->dev_desc->flags & LPSS_LTR)
1288 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
1289 dev_pm_domain_set(&pdev->dev, NULL);
1290 break;
1291 default:
1292 break;
1293 }
1294
1295 return 0;
1296}
1297
1298static struct notifier_block acpi_lpss_nb = {
1299 .notifier_call = acpi_lpss_platform_notify,
1300};
1301
1302static void acpi_lpss_bind(struct device *dev)
1303{
1304 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1305
1306 if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
1307 return;
1308
1309 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
1310 dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
1311 else
1312 dev_err(dev, "MMIO size insufficient to access LTR\n");
1313}
1314
1315static void acpi_lpss_unbind(struct device *dev)
1316{
1317 dev->power.set_latency_tolerance = NULL;
1318}
1319
1320static struct acpi_scan_handler lpss_handler = {
1321 .ids = acpi_lpss_device_ids,
1322 .attach = acpi_lpss_create_device,
1323 .bind = acpi_lpss_bind,
1324 .unbind = acpi_lpss_unbind,
1325};
1326
1327void __init acpi_lpss_init(void)
1328{
1329 const struct x86_cpu_id *id;
1330 int ret;
1331
1332 ret = lpss_atom_clk_init();
1333 if (ret)
1334 return;
1335
1336 id = x86_match_cpu(lpss_cpu_ids);
1337 if (id)
1338 lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
1339
1340 bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
1341 acpi_scan_add_handler(&lpss_handler);
1342}
1343
1344#else
1345
1346static struct acpi_scan_handler lpss_handler = {
1347 .ids = acpi_lpss_device_ids,
1348};
1349
1350void __init acpi_lpss_init(void)
1351{
1352 acpi_scan_add_handler(&lpss_handler);
1353}
1354
1355#endif /* CONFIG_X86_INTEL_LPSS */