Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/device.h>
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/pci.h>
13#include <linux/dma-mapping.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16
17#include <linux/pm_domain.h>
18#include <linux/pm_runtime.h>
19
20#include <linux/mei.h>
21
22#include "mei_dev.h"
23#include "client.h"
24#include "hw-me-regs.h"
25#include "hw-me.h"
26
27/* mei_pci_tbl - PCI Device ID Table */
28static const struct pci_device_id mei_me_pci_tbl[] = {
29 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)},
30 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)},
31 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)},
32 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)},
33 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)},
34 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)},
35 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)},
36 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)},
37 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)},
38 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)},
39 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)},
40
41 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)},
42 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)},
43 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)},
44 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)},
45 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)},
46 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)},
47 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)},
48 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)},
49 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)},
50
51 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)},
52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)},
53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
55
56 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
57 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
58 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
59 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
60 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
61 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
62 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
63 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
64 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
65 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
66 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
67 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
68 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
69
70 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
71 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
72 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_3, MEI_ME_PCH8_ITOUCH_CFG)},
73 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
74 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
75 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
76
77 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
78 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
79
80 {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
81
82 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
83
84 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
85 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
86 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_3, MEI_ME_PCH8_CFG)},
87
88 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
89 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
90 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
91 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_ITOUCH_CFG)},
92
93 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
94 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
95 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
96 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
97 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
98
99 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
100 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)},
101
102 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
103 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
104
105 {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
106
107 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
108 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
109
110 {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
111
112 {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)},
113
114 {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
115 {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
116 {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
117 {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
118
119 {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
120
121 {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
122 {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
123 {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
124
125 /* required last entry */
126 {0, }
127};
128
129MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
130
131#ifdef CONFIG_PM
132static inline void mei_me_set_pm_domain(struct mei_device *dev);
133static inline void mei_me_unset_pm_domain(struct mei_device *dev);
134#else
135static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
136static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
137#endif /* CONFIG_PM */
138
139static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
140{
141 struct pci_dev *pdev = to_pci_dev(dev->dev);
142
143 return pci_read_config_dword(pdev, where, val);
144}
145
146/**
147 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
148 *
149 * @pdev: PCI device structure
150 * @cfg: per generation config
151 *
152 * Return: true if ME Interface is valid, false otherwise
153 */
154static bool mei_me_quirk_probe(struct pci_dev *pdev,
155 const struct mei_cfg *cfg)
156{
157 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
158 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
159 return false;
160 }
161
162 return true;
163}
164
165/**
166 * mei_me_probe - Device Initialization Routine
167 *
168 * @pdev: PCI device structure
169 * @ent: entry in kcs_pci_tbl
170 *
171 * Return: 0 on success, <0 on failure.
172 */
173static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
174{
175 const struct mei_cfg *cfg;
176 struct mei_device *dev;
177 struct mei_me_hw *hw;
178 unsigned int irqflags;
179 int err;
180
181 cfg = mei_me_get_cfg(ent->driver_data);
182 if (!cfg)
183 return -ENODEV;
184
185 if (!mei_me_quirk_probe(pdev, cfg))
186 return -ENODEV;
187
188 /* enable pci dev */
189 err = pcim_enable_device(pdev);
190 if (err) {
191 dev_err(&pdev->dev, "failed to enable pci device.\n");
192 goto end;
193 }
194 /* set PCI host mastering */
195 pci_set_master(pdev);
196 /* pci request regions and mapping IO device memory for mei driver */
197 err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
198 if (err) {
199 dev_err(&pdev->dev, "failed to get pci regions.\n");
200 goto end;
201 }
202
203 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
204 if (err) {
205 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
206 goto end;
207 }
208
209 /* allocates and initializes the mei dev structure */
210 dev = mei_me_dev_init(&pdev->dev, cfg, false);
211 if (!dev) {
212 err = -ENOMEM;
213 goto end;
214 }
215 hw = to_me_hw(dev);
216 hw->mem_addr = pcim_iomap_table(pdev)[0];
217 hw->read_fws = mei_me_read_fws;
218
219 pci_enable_msi(pdev);
220
221 hw->irq = pdev->irq;
222
223 /* request and enable interrupt */
224 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
225
226 err = request_threaded_irq(pdev->irq,
227 mei_me_irq_quick_handler,
228 mei_me_irq_thread_handler,
229 irqflags, KBUILD_MODNAME, dev);
230 if (err) {
231 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
232 pdev->irq);
233 goto end;
234 }
235
236 if (mei_start(dev)) {
237 dev_err(&pdev->dev, "init hw failure.\n");
238 err = -ENODEV;
239 goto release_irq;
240 }
241
242 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
243 pm_runtime_use_autosuspend(&pdev->dev);
244
245 err = mei_register(dev, &pdev->dev);
246 if (err)
247 goto stop;
248
249 pci_set_drvdata(pdev, dev);
250
251 /*
252 * MEI requires to resume from runtime suspend mode
253 * in order to perform link reset flow upon system suspend.
254 */
255 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
256
257 /*
258 * ME maps runtime suspend/resume to D0i states,
259 * hence we need to go around native PCI runtime service which
260 * eventually brings the device into D3cold/hot state,
261 * but the mei device cannot wake up from D3 unlike from D0i3.
262 * To get around the PCI device native runtime pm,
263 * ME uses runtime pm domain handlers which take precedence
264 * over the driver's pm handlers.
265 */
266 mei_me_set_pm_domain(dev);
267
268 if (mei_pg_is_enabled(dev)) {
269 pm_runtime_put_noidle(&pdev->dev);
270 if (hw->d0i3_supported)
271 pm_runtime_allow(&pdev->dev);
272 }
273
274 dev_dbg(&pdev->dev, "initialization successful.\n");
275
276 return 0;
277
278stop:
279 mei_stop(dev);
280release_irq:
281 mei_cancel_work(dev);
282 mei_disable_interrupts(dev);
283 free_irq(pdev->irq, dev);
284end:
285 dev_err(&pdev->dev, "initialization failed.\n");
286 return err;
287}
288
289/**
290 * mei_me_shutdown - Device Removal Routine
291 *
292 * @pdev: PCI device structure
293 *
294 * mei_me_shutdown is called from the reboot notifier
295 * it's a simplified version of remove so we go down
296 * faster.
297 */
298static void mei_me_shutdown(struct pci_dev *pdev)
299{
300 struct mei_device *dev;
301
302 dev = pci_get_drvdata(pdev);
303 if (!dev)
304 return;
305
306 dev_dbg(&pdev->dev, "shutdown\n");
307 mei_stop(dev);
308
309 mei_me_unset_pm_domain(dev);
310
311 mei_disable_interrupts(dev);
312 free_irq(pdev->irq, dev);
313}
314
315/**
316 * mei_me_remove - Device Removal Routine
317 *
318 * @pdev: PCI device structure
319 *
320 * mei_me_remove is called by the PCI subsystem to alert the driver
321 * that it should release a PCI device.
322 */
323static void mei_me_remove(struct pci_dev *pdev)
324{
325 struct mei_device *dev;
326
327 dev = pci_get_drvdata(pdev);
328 if (!dev)
329 return;
330
331 if (mei_pg_is_enabled(dev))
332 pm_runtime_get_noresume(&pdev->dev);
333
334 dev_dbg(&pdev->dev, "stop\n");
335 mei_stop(dev);
336
337 mei_me_unset_pm_domain(dev);
338
339 mei_disable_interrupts(dev);
340
341 free_irq(pdev->irq, dev);
342
343 mei_deregister(dev);
344}
345
346#ifdef CONFIG_PM_SLEEP
347static int mei_me_pci_prepare(struct device *device)
348{
349 pm_runtime_resume(device);
350 return 0;
351}
352
353static int mei_me_pci_suspend(struct device *device)
354{
355 struct pci_dev *pdev = to_pci_dev(device);
356 struct mei_device *dev = pci_get_drvdata(pdev);
357
358 if (!dev)
359 return -ENODEV;
360
361 dev_dbg(&pdev->dev, "suspend\n");
362
363 mei_stop(dev);
364
365 mei_disable_interrupts(dev);
366
367 free_irq(pdev->irq, dev);
368 pci_disable_msi(pdev);
369
370 return 0;
371}
372
373static int mei_me_pci_resume(struct device *device)
374{
375 struct pci_dev *pdev = to_pci_dev(device);
376 struct mei_device *dev;
377 unsigned int irqflags;
378 int err;
379
380 dev = pci_get_drvdata(pdev);
381 if (!dev)
382 return -ENODEV;
383
384 pci_enable_msi(pdev);
385
386 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
387
388 /* request and enable interrupt */
389 err = request_threaded_irq(pdev->irq,
390 mei_me_irq_quick_handler,
391 mei_me_irq_thread_handler,
392 irqflags, KBUILD_MODNAME, dev);
393
394 if (err) {
395 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
396 pdev->irq);
397 return err;
398 }
399
400 err = mei_restart(dev);
401 if (err)
402 return err;
403
404 /* Start timer if stopped in suspend */
405 schedule_delayed_work(&dev->timer_work, HZ);
406
407 return 0;
408}
409
410static void mei_me_pci_complete(struct device *device)
411{
412 pm_runtime_suspend(device);
413}
414#else /* CONFIG_PM_SLEEP */
415
416#define mei_me_pci_prepare NULL
417#define mei_me_pci_complete NULL
418
419#endif /* !CONFIG_PM_SLEEP */
420
421#ifdef CONFIG_PM
422static int mei_me_pm_runtime_idle(struct device *device)
423{
424 struct mei_device *dev;
425
426 dev_dbg(device, "rpm: me: runtime_idle\n");
427
428 dev = dev_get_drvdata(device);
429 if (!dev)
430 return -ENODEV;
431 if (mei_write_is_idle(dev))
432 pm_runtime_autosuspend(device);
433
434 return -EBUSY;
435}
436
437static int mei_me_pm_runtime_suspend(struct device *device)
438{
439 struct mei_device *dev;
440 int ret;
441
442 dev_dbg(device, "rpm: me: runtime suspend\n");
443
444 dev = dev_get_drvdata(device);
445 if (!dev)
446 return -ENODEV;
447
448 mutex_lock(&dev->device_lock);
449
450 if (mei_write_is_idle(dev))
451 ret = mei_me_pg_enter_sync(dev);
452 else
453 ret = -EAGAIN;
454
455 mutex_unlock(&dev->device_lock);
456
457 dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret);
458
459 if (ret && ret != -EAGAIN)
460 schedule_work(&dev->reset_work);
461
462 return ret;
463}
464
465static int mei_me_pm_runtime_resume(struct device *device)
466{
467 struct mei_device *dev;
468 int ret;
469
470 dev_dbg(device, "rpm: me: runtime resume\n");
471
472 dev = dev_get_drvdata(device);
473 if (!dev)
474 return -ENODEV;
475
476 mutex_lock(&dev->device_lock);
477
478 ret = mei_me_pg_exit_sync(dev);
479
480 mutex_unlock(&dev->device_lock);
481
482 dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret);
483
484 if (ret)
485 schedule_work(&dev->reset_work);
486
487 return ret;
488}
489
490/**
491 * mei_me_set_pm_domain - fill and set pm domain structure for device
492 *
493 * @dev: mei_device
494 */
495static inline void mei_me_set_pm_domain(struct mei_device *dev)
496{
497 struct pci_dev *pdev = to_pci_dev(dev->dev);
498
499 if (pdev->dev.bus && pdev->dev.bus->pm) {
500 dev->pg_domain.ops = *pdev->dev.bus->pm;
501
502 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
503 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
504 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
505
506 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
507 }
508}
509
510/**
511 * mei_me_unset_pm_domain - clean pm domain structure for device
512 *
513 * @dev: mei_device
514 */
515static inline void mei_me_unset_pm_domain(struct mei_device *dev)
516{
517 /* stop using pm callbacks if any */
518 dev_pm_domain_set(dev->dev, NULL);
519}
520
521static const struct dev_pm_ops mei_me_pm_ops = {
522 .prepare = mei_me_pci_prepare,
523 .complete = mei_me_pci_complete,
524 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
525 mei_me_pci_resume)
526 SET_RUNTIME_PM_OPS(
527 mei_me_pm_runtime_suspend,
528 mei_me_pm_runtime_resume,
529 mei_me_pm_runtime_idle)
530};
531
532#define MEI_ME_PM_OPS (&mei_me_pm_ops)
533#else
534#define MEI_ME_PM_OPS NULL
535#endif /* CONFIG_PM */
536/*
537 * PCI driver structure
538 */
539static struct pci_driver mei_me_driver = {
540 .name = KBUILD_MODNAME,
541 .id_table = mei_me_pci_tbl,
542 .probe = mei_me_probe,
543 .remove = mei_me_remove,
544 .shutdown = mei_me_shutdown,
545 .driver.pm = MEI_ME_PM_OPS,
546 .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
547};
548
549module_pci_driver(mei_me_driver);
550
551MODULE_AUTHOR("Intel Corporation");
552MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
553MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/device.h>
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/pci.h>
13#include <linux/sched.h>
14#include <linux/interrupt.h>
15
16#include <linux/pm_domain.h>
17#include <linux/pm_runtime.h>
18
19#include <linux/mei.h>
20
21#include "mei_dev.h"
22#include "client.h"
23#include "hw-me-regs.h"
24#include "hw-me.h"
25
26/* mei_pci_tbl - PCI Device ID Table */
27static const struct pci_device_id mei_me_pci_tbl[] = {
28 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)},
29 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)},
30 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)},
31 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)},
32 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)},
33 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)},
34 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)},
35 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)},
36 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)},
37 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)},
38 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)},
39
40 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)},
41 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)},
42 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)},
43 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)},
44 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)},
45 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)},
46 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)},
47 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)},
48 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)},
49
50 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)},
51 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)},
52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
54
55 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
56 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
57 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
58 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
59 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
60 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
61 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
62 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
63 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
64 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
65 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
66 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
67 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
68
69 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
70 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
71 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_3, MEI_ME_PCH8_ITOUCH_CFG)},
72 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
73 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
74 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
75
76 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
77 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
78
79 {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
80
81 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
82
83 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
84 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
85 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_3, MEI_ME_PCH8_CFG)},
86
87 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
88 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
89 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
90 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_ITOUCH_CFG)},
91
92 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
93 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
94 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
95 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
96 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
97
98 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
99
100 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
101 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
102
103 {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
104
105 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
106 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
107
108 {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
109
110 /* required last entry */
111 {0, }
112};
113
114MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
115
116#ifdef CONFIG_PM
117static inline void mei_me_set_pm_domain(struct mei_device *dev);
118static inline void mei_me_unset_pm_domain(struct mei_device *dev);
119#else
120static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
121static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
122#endif /* CONFIG_PM */
123
124static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
125{
126 struct pci_dev *pdev = to_pci_dev(dev->dev);
127
128 return pci_read_config_dword(pdev, where, val);
129}
130
131/**
132 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
133 *
134 * @pdev: PCI device structure
135 * @cfg: per generation config
136 *
137 * Return: true if ME Interface is valid, false otherwise
138 */
139static bool mei_me_quirk_probe(struct pci_dev *pdev,
140 const struct mei_cfg *cfg)
141{
142 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
143 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
144 return false;
145 }
146
147 return true;
148}
149
150/**
151 * mei_me_probe - Device Initialization Routine
152 *
153 * @pdev: PCI device structure
154 * @ent: entry in kcs_pci_tbl
155 *
156 * Return: 0 on success, <0 on failure.
157 */
158static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
159{
160 const struct mei_cfg *cfg;
161 struct mei_device *dev;
162 struct mei_me_hw *hw;
163 unsigned int irqflags;
164 int err;
165
166 cfg = mei_me_get_cfg(ent->driver_data);
167 if (!cfg)
168 return -ENODEV;
169
170 if (!mei_me_quirk_probe(pdev, cfg))
171 return -ENODEV;
172
173 /* enable pci dev */
174 err = pcim_enable_device(pdev);
175 if (err) {
176 dev_err(&pdev->dev, "failed to enable pci device.\n");
177 goto end;
178 }
179 /* set PCI host mastering */
180 pci_set_master(pdev);
181 /* pci request regions and mapping IO device memory for mei driver */
182 err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
183 if (err) {
184 dev_err(&pdev->dev, "failed to get pci regions.\n");
185 goto end;
186 }
187
188 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
189 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
190
191 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
192 if (err)
193 err = dma_set_coherent_mask(&pdev->dev,
194 DMA_BIT_MASK(32));
195 }
196 if (err) {
197 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
198 goto end;
199 }
200
201 /* allocates and initializes the mei dev structure */
202 dev = mei_me_dev_init(&pdev->dev, cfg);
203 if (!dev) {
204 err = -ENOMEM;
205 goto end;
206 }
207 hw = to_me_hw(dev);
208 hw->mem_addr = pcim_iomap_table(pdev)[0];
209 hw->read_fws = mei_me_read_fws;
210
211 pci_enable_msi(pdev);
212
213 hw->irq = pdev->irq;
214
215 /* request and enable interrupt */
216 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
217
218 err = request_threaded_irq(pdev->irq,
219 mei_me_irq_quick_handler,
220 mei_me_irq_thread_handler,
221 irqflags, KBUILD_MODNAME, dev);
222 if (err) {
223 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
224 pdev->irq);
225 goto end;
226 }
227
228 if (mei_start(dev)) {
229 dev_err(&pdev->dev, "init hw failure.\n");
230 err = -ENODEV;
231 goto release_irq;
232 }
233
234 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
235 pm_runtime_use_autosuspend(&pdev->dev);
236
237 err = mei_register(dev, &pdev->dev);
238 if (err)
239 goto stop;
240
241 pci_set_drvdata(pdev, dev);
242
243 /*
244 * MEI requires to resume from runtime suspend mode
245 * in order to perform link reset flow upon system suspend.
246 */
247 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
248
249 /*
250 * ME maps runtime suspend/resume to D0i states,
251 * hence we need to go around native PCI runtime service which
252 * eventually brings the device into D3cold/hot state,
253 * but the mei device cannot wake up from D3 unlike from D0i3.
254 * To get around the PCI device native runtime pm,
255 * ME uses runtime pm domain handlers which take precedence
256 * over the driver's pm handlers.
257 */
258 mei_me_set_pm_domain(dev);
259
260 if (mei_pg_is_enabled(dev)) {
261 pm_runtime_put_noidle(&pdev->dev);
262 if (hw->d0i3_supported)
263 pm_runtime_allow(&pdev->dev);
264 }
265
266 dev_dbg(&pdev->dev, "initialization successful.\n");
267
268 return 0;
269
270stop:
271 mei_stop(dev);
272release_irq:
273 mei_cancel_work(dev);
274 mei_disable_interrupts(dev);
275 free_irq(pdev->irq, dev);
276end:
277 dev_err(&pdev->dev, "initialization failed.\n");
278 return err;
279}
280
281/**
282 * mei_me_shutdown - Device Removal Routine
283 *
284 * @pdev: PCI device structure
285 *
286 * mei_me_shutdown is called from the reboot notifier
287 * it's a simplified version of remove so we go down
288 * faster.
289 */
290static void mei_me_shutdown(struct pci_dev *pdev)
291{
292 struct mei_device *dev;
293
294 dev = pci_get_drvdata(pdev);
295 if (!dev)
296 return;
297
298 dev_dbg(&pdev->dev, "shutdown\n");
299 mei_stop(dev);
300
301 mei_me_unset_pm_domain(dev);
302
303 mei_disable_interrupts(dev);
304 free_irq(pdev->irq, dev);
305}
306
307/**
308 * mei_me_remove - Device Removal Routine
309 *
310 * @pdev: PCI device structure
311 *
312 * mei_me_remove is called by the PCI subsystem to alert the driver
313 * that it should release a PCI device.
314 */
315static void mei_me_remove(struct pci_dev *pdev)
316{
317 struct mei_device *dev;
318
319 dev = pci_get_drvdata(pdev);
320 if (!dev)
321 return;
322
323 if (mei_pg_is_enabled(dev))
324 pm_runtime_get_noresume(&pdev->dev);
325
326 dev_dbg(&pdev->dev, "stop\n");
327 mei_stop(dev);
328
329 mei_me_unset_pm_domain(dev);
330
331 mei_disable_interrupts(dev);
332
333 free_irq(pdev->irq, dev);
334
335 mei_deregister(dev);
336}
337
338#ifdef CONFIG_PM_SLEEP
339static int mei_me_pci_suspend(struct device *device)
340{
341 struct pci_dev *pdev = to_pci_dev(device);
342 struct mei_device *dev = pci_get_drvdata(pdev);
343
344 if (!dev)
345 return -ENODEV;
346
347 dev_dbg(&pdev->dev, "suspend\n");
348
349 mei_stop(dev);
350
351 mei_disable_interrupts(dev);
352
353 free_irq(pdev->irq, dev);
354 pci_disable_msi(pdev);
355
356 return 0;
357}
358
359static int mei_me_pci_resume(struct device *device)
360{
361 struct pci_dev *pdev = to_pci_dev(device);
362 struct mei_device *dev;
363 unsigned int irqflags;
364 int err;
365
366 dev = pci_get_drvdata(pdev);
367 if (!dev)
368 return -ENODEV;
369
370 pci_enable_msi(pdev);
371
372 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
373
374 /* request and enable interrupt */
375 err = request_threaded_irq(pdev->irq,
376 mei_me_irq_quick_handler,
377 mei_me_irq_thread_handler,
378 irqflags, KBUILD_MODNAME, dev);
379
380 if (err) {
381 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
382 pdev->irq);
383 return err;
384 }
385
386 err = mei_restart(dev);
387 if (err)
388 return err;
389
390 /* Start timer if stopped in suspend */
391 schedule_delayed_work(&dev->timer_work, HZ);
392
393 return 0;
394}
395#endif /* CONFIG_PM_SLEEP */
396
397#ifdef CONFIG_PM
398static int mei_me_pm_runtime_idle(struct device *device)
399{
400 struct mei_device *dev;
401
402 dev_dbg(device, "rpm: me: runtime_idle\n");
403
404 dev = dev_get_drvdata(device);
405 if (!dev)
406 return -ENODEV;
407 if (mei_write_is_idle(dev))
408 pm_runtime_autosuspend(device);
409
410 return -EBUSY;
411}
412
413static int mei_me_pm_runtime_suspend(struct device *device)
414{
415 struct mei_device *dev;
416 int ret;
417
418 dev_dbg(device, "rpm: me: runtime suspend\n");
419
420 dev = dev_get_drvdata(device);
421 if (!dev)
422 return -ENODEV;
423
424 mutex_lock(&dev->device_lock);
425
426 if (mei_write_is_idle(dev))
427 ret = mei_me_pg_enter_sync(dev);
428 else
429 ret = -EAGAIN;
430
431 mutex_unlock(&dev->device_lock);
432
433 dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret);
434
435 if (ret && ret != -EAGAIN)
436 schedule_work(&dev->reset_work);
437
438 return ret;
439}
440
441static int mei_me_pm_runtime_resume(struct device *device)
442{
443 struct mei_device *dev;
444 int ret;
445
446 dev_dbg(device, "rpm: me: runtime resume\n");
447
448 dev = dev_get_drvdata(device);
449 if (!dev)
450 return -ENODEV;
451
452 mutex_lock(&dev->device_lock);
453
454 ret = mei_me_pg_exit_sync(dev);
455
456 mutex_unlock(&dev->device_lock);
457
458 dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret);
459
460 if (ret)
461 schedule_work(&dev->reset_work);
462
463 return ret;
464}
465
466/**
467 * mei_me_set_pm_domain - fill and set pm domain structure for device
468 *
469 * @dev: mei_device
470 */
471static inline void mei_me_set_pm_domain(struct mei_device *dev)
472{
473 struct pci_dev *pdev = to_pci_dev(dev->dev);
474
475 if (pdev->dev.bus && pdev->dev.bus->pm) {
476 dev->pg_domain.ops = *pdev->dev.bus->pm;
477
478 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
479 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
480 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
481
482 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
483 }
484}
485
486/**
487 * mei_me_unset_pm_domain - clean pm domain structure for device
488 *
489 * @dev: mei_device
490 */
491static inline void mei_me_unset_pm_domain(struct mei_device *dev)
492{
493 /* stop using pm callbacks if any */
494 dev_pm_domain_set(dev->dev, NULL);
495}
496
497static const struct dev_pm_ops mei_me_pm_ops = {
498 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
499 mei_me_pci_resume)
500 SET_RUNTIME_PM_OPS(
501 mei_me_pm_runtime_suspend,
502 mei_me_pm_runtime_resume,
503 mei_me_pm_runtime_idle)
504};
505
506#define MEI_ME_PM_OPS (&mei_me_pm_ops)
507#else
508#define MEI_ME_PM_OPS NULL
509#endif /* CONFIG_PM */
510/*
511 * PCI driver structure
512 */
513static struct pci_driver mei_me_driver = {
514 .name = KBUILD_MODNAME,
515 .id_table = mei_me_pci_tbl,
516 .probe = mei_me_probe,
517 .remove = mei_me_remove,
518 .shutdown = mei_me_shutdown,
519 .driver.pm = MEI_ME_PM_OPS,
520 .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
521};
522
523module_pci_driver(mei_me_driver);
524
525MODULE_AUTHOR("Intel Corporation");
526MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
527MODULE_LICENSE("GPL v2");