Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2013-2017, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/device.h>
10#include <linux/fs.h>
11#include <linux/errno.h>
12#include <linux/types.h>
13#include <linux/pci.h>
14#include <linux/init.h>
15#include <linux/sched.h>
16#include <linux/uuid.h>
17#include <linux/jiffies.h>
18#include <linux/interrupt.h>
19#include <linux/workqueue.h>
20#include <linux/pm_domain.h>
21#include <linux/pm_runtime.h>
22
23#include <linux/mei.h>
24
25
26#include "mei_dev.h"
27#include "hw-txe.h"
28
29static const struct pci_device_id mei_txe_pci_tbl[] = {
30 {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
31 {PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
32
33 {0, }
34};
35MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
36
37#ifdef CONFIG_PM
38static inline void mei_txe_set_pm_domain(struct mei_device *dev);
39static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
40#else
41static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
42static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
43#endif /* CONFIG_PM */
44
45/**
46 * mei_txe_probe - Device Initialization Routine
47 *
48 * @pdev: PCI device structure
49 * @ent: entry in mei_txe_pci_tbl
50 *
51 * Return: 0 on success, <0 on failure.
52 */
53static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
54{
55 struct mei_device *dev;
56 struct mei_txe_hw *hw;
57 const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
58 int err;
59
60 /* enable pci dev */
61 err = pcim_enable_device(pdev);
62 if (err) {
63 dev_err(&pdev->dev, "failed to enable pci device.\n");
64 goto end;
65 }
66 /* set PCI host mastering */
67 pci_set_master(pdev);
68 /* pci request regions and mapping IO device memory for mei driver */
69 err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
70 if (err) {
71 dev_err(&pdev->dev, "failed to get pci regions.\n");
72 goto end;
73 }
74
75 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
76 if (err) {
77 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
78 if (err) {
79 dev_err(&pdev->dev, "No suitable DMA available.\n");
80 goto end;
81 }
82 }
83
84 /* allocates and initializes the mei dev structure */
85 dev = mei_txe_dev_init(pdev);
86 if (!dev) {
87 err = -ENOMEM;
88 goto end;
89 }
90 hw = to_txe_hw(dev);
91 hw->mem_addr = pcim_iomap_table(pdev);
92
93 pci_enable_msi(pdev);
94
95 /* clear spurious interrupts */
96 mei_clear_interrupts(dev);
97
98 /* request and enable interrupt */
99 if (pci_dev_msi_enabled(pdev))
100 err = request_threaded_irq(pdev->irq,
101 NULL,
102 mei_txe_irq_thread_handler,
103 IRQF_ONESHOT, KBUILD_MODNAME, dev);
104 else
105 err = request_threaded_irq(pdev->irq,
106 mei_txe_irq_quick_handler,
107 mei_txe_irq_thread_handler,
108 IRQF_SHARED, KBUILD_MODNAME, dev);
109 if (err) {
110 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
111 pdev->irq);
112 goto end;
113 }
114
115 if (mei_start(dev)) {
116 dev_err(&pdev->dev, "init hw failure.\n");
117 err = -ENODEV;
118 goto release_irq;
119 }
120
121 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
122 pm_runtime_use_autosuspend(&pdev->dev);
123
124 err = mei_register(dev, &pdev->dev);
125 if (err)
126 goto stop;
127
128 pci_set_drvdata(pdev, dev);
129
130 /*
131 * MEI requires to resume from runtime suspend mode
132 * in order to perform link reset flow upon system suspend.
133 */
134 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
135
136 /*
137 * TXE maps runtime suspend/resume to own power gating states,
138 * hence we need to go around native PCI runtime service which
139 * eventually brings the device into D3cold/hot state.
140 * But the TXE device cannot wake up from D3 unlike from own
141 * power gating. To get around PCI device native runtime pm,
142 * TXE uses runtime pm domain handlers which take precedence.
143 */
144 mei_txe_set_pm_domain(dev);
145
146 pm_runtime_put_noidle(&pdev->dev);
147
148 return 0;
149
150stop:
151 mei_stop(dev);
152release_irq:
153 mei_cancel_work(dev);
154 mei_disable_interrupts(dev);
155 free_irq(pdev->irq, dev);
156end:
157 dev_err(&pdev->dev, "initialization failed.\n");
158 return err;
159}
160
161/**
162 * mei_txe_remove - Device Shutdown Routine
163 *
164 * @pdev: PCI device structure
165 *
166 * mei_txe_shutdown is called from the reboot notifier
167 * it's a simplified version of remove so we go down
168 * faster.
169 */
170static void mei_txe_shutdown(struct pci_dev *pdev)
171{
172 struct mei_device *dev;
173
174 dev = pci_get_drvdata(pdev);
175 if (!dev)
176 return;
177
178 dev_dbg(&pdev->dev, "shutdown\n");
179 mei_stop(dev);
180
181 mei_txe_unset_pm_domain(dev);
182
183 mei_disable_interrupts(dev);
184 free_irq(pdev->irq, dev);
185}
186
187/**
188 * mei_txe_remove - Device Removal Routine
189 *
190 * @pdev: PCI device structure
191 *
192 * mei_remove is called by the PCI subsystem to alert the driver
193 * that it should release a PCI device.
194 */
195static void mei_txe_remove(struct pci_dev *pdev)
196{
197 struct mei_device *dev;
198
199 dev = pci_get_drvdata(pdev);
200 if (!dev) {
201 dev_err(&pdev->dev, "mei: dev == NULL\n");
202 return;
203 }
204
205 pm_runtime_get_noresume(&pdev->dev);
206
207 mei_stop(dev);
208
209 mei_txe_unset_pm_domain(dev);
210
211 mei_disable_interrupts(dev);
212 free_irq(pdev->irq, dev);
213
214 mei_deregister(dev);
215}
216
217
218#ifdef CONFIG_PM_SLEEP
219static int mei_txe_pci_suspend(struct device *device)
220{
221 struct pci_dev *pdev = to_pci_dev(device);
222 struct mei_device *dev = pci_get_drvdata(pdev);
223
224 if (!dev)
225 return -ENODEV;
226
227 dev_dbg(&pdev->dev, "suspend\n");
228
229 mei_stop(dev);
230
231 mei_disable_interrupts(dev);
232
233 free_irq(pdev->irq, dev);
234 pci_disable_msi(pdev);
235
236 return 0;
237}
238
239static int mei_txe_pci_resume(struct device *device)
240{
241 struct pci_dev *pdev = to_pci_dev(device);
242 struct mei_device *dev;
243 int err;
244
245 dev = pci_get_drvdata(pdev);
246 if (!dev)
247 return -ENODEV;
248
249 pci_enable_msi(pdev);
250
251 mei_clear_interrupts(dev);
252
253 /* request and enable interrupt */
254 if (pci_dev_msi_enabled(pdev))
255 err = request_threaded_irq(pdev->irq,
256 NULL,
257 mei_txe_irq_thread_handler,
258 IRQF_ONESHOT, KBUILD_MODNAME, dev);
259 else
260 err = request_threaded_irq(pdev->irq,
261 mei_txe_irq_quick_handler,
262 mei_txe_irq_thread_handler,
263 IRQF_SHARED, KBUILD_MODNAME, dev);
264 if (err) {
265 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
266 pdev->irq);
267 return err;
268 }
269
270 err = mei_restart(dev);
271
272 return err;
273}
274#endif /* CONFIG_PM_SLEEP */
275
276#ifdef CONFIG_PM
277static int mei_txe_pm_runtime_idle(struct device *device)
278{
279 struct mei_device *dev;
280
281 dev_dbg(device, "rpm: txe: runtime_idle\n");
282
283 dev = dev_get_drvdata(device);
284 if (!dev)
285 return -ENODEV;
286 if (mei_write_is_idle(dev))
287 pm_runtime_autosuspend(device);
288
289 return -EBUSY;
290}
291static int mei_txe_pm_runtime_suspend(struct device *device)
292{
293 struct mei_device *dev;
294 int ret;
295
296 dev_dbg(device, "rpm: txe: runtime suspend\n");
297
298 dev = dev_get_drvdata(device);
299 if (!dev)
300 return -ENODEV;
301
302 mutex_lock(&dev->device_lock);
303
304 if (mei_write_is_idle(dev))
305 ret = mei_txe_aliveness_set_sync(dev, 0);
306 else
307 ret = -EAGAIN;
308
309 /* keep irq on we are staying in D0 */
310
311 dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
312
313 mutex_unlock(&dev->device_lock);
314
315 if (ret && ret != -EAGAIN)
316 schedule_work(&dev->reset_work);
317
318 return ret;
319}
320
321static int mei_txe_pm_runtime_resume(struct device *device)
322{
323 struct mei_device *dev;
324 int ret;
325
326 dev_dbg(device, "rpm: txe: runtime resume\n");
327
328 dev = dev_get_drvdata(device);
329 if (!dev)
330 return -ENODEV;
331
332 mutex_lock(&dev->device_lock);
333
334 mei_enable_interrupts(dev);
335
336 ret = mei_txe_aliveness_set_sync(dev, 1);
337
338 mutex_unlock(&dev->device_lock);
339
340 dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
341
342 if (ret)
343 schedule_work(&dev->reset_work);
344
345 return ret;
346}
347
348/**
349 * mei_txe_set_pm_domain - fill and set pm domain structure for device
350 *
351 * @dev: mei_device
352 */
353static inline void mei_txe_set_pm_domain(struct mei_device *dev)
354{
355 struct pci_dev *pdev = to_pci_dev(dev->dev);
356
357 if (pdev->dev.bus && pdev->dev.bus->pm) {
358 dev->pg_domain.ops = *pdev->dev.bus->pm;
359
360 dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
361 dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
362 dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
363
364 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
365 }
366}
367
368/**
369 * mei_txe_unset_pm_domain - clean pm domain structure for device
370 *
371 * @dev: mei_device
372 */
373static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
374{
375 /* stop using pm callbacks if any */
376 dev_pm_domain_set(dev->dev, NULL);
377}
378
379static const struct dev_pm_ops mei_txe_pm_ops = {
380 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
381 mei_txe_pci_resume)
382 SET_RUNTIME_PM_OPS(
383 mei_txe_pm_runtime_suspend,
384 mei_txe_pm_runtime_resume,
385 mei_txe_pm_runtime_idle)
386};
387
388#define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
389#else
390#define MEI_TXE_PM_OPS NULL
391#endif /* CONFIG_PM */
392
393/*
394 * PCI driver structure
395 */
396static struct pci_driver mei_txe_driver = {
397 .name = KBUILD_MODNAME,
398 .id_table = mei_txe_pci_tbl,
399 .probe = mei_txe_probe,
400 .remove = mei_txe_remove,
401 .shutdown = mei_txe_shutdown,
402 .driver.pm = MEI_TXE_PM_OPS,
403};
404
405module_pci_driver(mei_txe_driver);
406
407MODULE_AUTHOR("Intel Corporation");
408MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
409MODULE_LICENSE("GPL v2");