Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
  4 * Intel Management Engine Interface (Intel MEI) Linux driver
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/module.h>
  8#include <linux/kernel.h>
  9#include <linux/device.h>
 
 10#include <linux/errno.h>
 11#include <linux/types.h>
 12#include <linux/pci.h>
 13#include <linux/init.h>
 14#include <linux/sched.h>
 
 
 15#include <linux/interrupt.h>
 16#include <linux/workqueue.h>
 17#include <linux/pm_domain.h>
 18#include <linux/pm_runtime.h>
 19
 20#include <linux/mei.h>
 21
 22
 23#include "mei_dev.h"
 24#include "hw-txe.h"
 25
 26static const struct pci_device_id mei_txe_pci_tbl[] = {
 27	{PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
 28	{PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
 29
 30	{0, }
 31};
 32MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
 33
 34#ifdef CONFIG_PM
 35static inline void mei_txe_set_pm_domain(struct mei_device *dev);
 36static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
 37#else
 38static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
 39static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
 40#endif /* CONFIG_PM */
 41
 
 
 
 
 
 
 
 
 
 
 
 42/**
 43 * mei_txe_probe - Device Initialization Routine
 44 *
 45 * @pdev: PCI device structure
 46 * @ent: entry in mei_txe_pci_tbl
 47 *
 48 * Return: 0 on success, <0 on failure.
 49 */
 50static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 51{
 52	struct mei_device *dev;
 53	struct mei_txe_hw *hw;
 54	const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
 55	int err;
 
 56
 57	/* enable pci dev */
 58	err = pcim_enable_device(pdev);
 59	if (err) {
 60		dev_err(&pdev->dev, "failed to enable pci device.\n");
 61		goto end;
 62	}
 63	/* set PCI host mastering  */
 64	pci_set_master(pdev);
 65	/* pci request regions and mapping IO device memory for mei driver */
 66	err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
 67	if (err) {
 68		dev_err(&pdev->dev, "failed to get pci regions.\n");
 69		goto end;
 70	}
 71
 72	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
 73	if (err) {
 74		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 75		if (err) {
 76			dev_err(&pdev->dev, "No suitable DMA available.\n");
 77			goto end;
 78		}
 79	}
 80
 81	/* allocates and initializes the mei dev structure */
 82	dev = mei_txe_dev_init(pdev);
 83	if (!dev) {
 84		err = -ENOMEM;
 85		goto end;
 86	}
 87	hw = to_txe_hw(dev);
 88	hw->mem_addr = pcim_iomap_table(pdev);
 
 
 
 
 
 
 
 
 
 
 89
 90	pci_enable_msi(pdev);
 91
 92	/* clear spurious interrupts */
 93	mei_clear_interrupts(dev);
 94
 95	/* request and enable interrupt  */
 96	if (pci_dev_msi_enabled(pdev))
 97		err = request_threaded_irq(pdev->irq,
 98			NULL,
 99			mei_txe_irq_thread_handler,
100			IRQF_ONESHOT, KBUILD_MODNAME, dev);
101	else
102		err = request_threaded_irq(pdev->irq,
103			mei_txe_irq_quick_handler,
104			mei_txe_irq_thread_handler,
105			IRQF_SHARED, KBUILD_MODNAME, dev);
106	if (err) {
107		dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
108			pdev->irq);
109		goto end;
110	}
111
112	if (mei_start(dev)) {
113		dev_err(&pdev->dev, "init hw failure.\n");
114		err = -ENODEV;
115		goto release_irq;
116	}
117
118	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
119	pm_runtime_use_autosuspend(&pdev->dev);
120
121	err = mei_register(dev, &pdev->dev);
122	if (err)
123		goto stop;
124
125	pci_set_drvdata(pdev, dev);
126
127	/*
128	 * MEI requires to resume from runtime suspend mode
129	 * in order to perform link reset flow upon system suspend.
130	 */
131	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
132
133	/*
134	 * TXE maps runtime suspend/resume to own power gating states,
135	 * hence we need to go around native PCI runtime service which
136	 * eventually brings the device into D3cold/hot state.
137	 * But the TXE device cannot wake up from D3 unlike from own
138	 * power gating. To get around PCI device native runtime pm,
139	 * TXE uses runtime pm domain handlers which take precedence.
140	 */
141	mei_txe_set_pm_domain(dev);
142
143	pm_runtime_put_noidle(&pdev->dev);
144
145	return 0;
146
147stop:
148	mei_stop(dev);
149release_irq:
 
150	mei_cancel_work(dev);
 
 
151	mei_disable_interrupts(dev);
 
152	free_irq(pdev->irq, dev);
 
 
 
 
 
 
 
 
 
 
153end:
154	dev_err(&pdev->dev, "initialization failed.\n");
155	return err;
156}
157
158/**
159 * mei_txe_shutdown- Device Shutdown Routine
160 *
161 * @pdev: PCI device structure
162 *
163 *  mei_txe_shutdown is called from the reboot notifier
164 *  it's a simplified version of remove so we go down
165 *  faster.
166 */
167static void mei_txe_shutdown(struct pci_dev *pdev)
168{
169	struct mei_device *dev;
170
171	dev = pci_get_drvdata(pdev);
172	if (!dev)
173		return;
174
175	dev_dbg(&pdev->dev, "shutdown\n");
176	mei_stop(dev);
177
178	mei_txe_unset_pm_domain(dev);
179
180	mei_disable_interrupts(dev);
181	free_irq(pdev->irq, dev);
182}
183
184/**
185 * mei_txe_remove - Device Removal Routine
186 *
187 * @pdev: PCI device structure
188 *
189 * mei_remove is called by the PCI subsystem to alert the driver
190 * that it should release a PCI device.
191 */
192static void mei_txe_remove(struct pci_dev *pdev)
193{
194	struct mei_device *dev;
 
195
196	dev = pci_get_drvdata(pdev);
197	if (!dev) {
198		dev_err(&pdev->dev, "mei: dev == NULL\n");
199		return;
200	}
201
202	pm_runtime_get_noresume(&pdev->dev);
203
 
 
204	mei_stop(dev);
205
206	mei_txe_unset_pm_domain(dev);
 
207
 
208	mei_disable_interrupts(dev);
209	free_irq(pdev->irq, dev);
 
 
 
 
 
210
211	mei_deregister(dev);
 
 
 
 
 
212}
213
214
215#ifdef CONFIG_PM_SLEEP
216static int mei_txe_pci_suspend(struct device *device)
217{
218	struct pci_dev *pdev = to_pci_dev(device);
219	struct mei_device *dev = pci_get_drvdata(pdev);
220
221	if (!dev)
222		return -ENODEV;
223
224	dev_dbg(&pdev->dev, "suspend\n");
225
226	mei_stop(dev);
227
228	mei_disable_interrupts(dev);
229
230	free_irq(pdev->irq, dev);
231	pci_disable_msi(pdev);
232
233	return 0;
234}
235
236static int mei_txe_pci_resume(struct device *device)
237{
238	struct pci_dev *pdev = to_pci_dev(device);
239	struct mei_device *dev;
240	int err;
241
242	dev = pci_get_drvdata(pdev);
243	if (!dev)
244		return -ENODEV;
245
246	pci_enable_msi(pdev);
247
248	mei_clear_interrupts(dev);
249
250	/* request and enable interrupt */
251	if (pci_dev_msi_enabled(pdev))
252		err = request_threaded_irq(pdev->irq,
253			NULL,
254			mei_txe_irq_thread_handler,
255			IRQF_ONESHOT, KBUILD_MODNAME, dev);
256	else
257		err = request_threaded_irq(pdev->irq,
258			mei_txe_irq_quick_handler,
259			mei_txe_irq_thread_handler,
260			IRQF_SHARED, KBUILD_MODNAME, dev);
261	if (err) {
262		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
263				pdev->irq);
264		return err;
265	}
266
267	err = mei_restart(dev);
268
269	return err;
270}
271#endif /* CONFIG_PM_SLEEP */
272
273#ifdef CONFIG_PM
274static int mei_txe_pm_runtime_idle(struct device *device)
275{
 
276	struct mei_device *dev;
277
278	dev_dbg(device, "rpm: txe: runtime_idle\n");
279
280	dev = dev_get_drvdata(device);
281	if (!dev)
282		return -ENODEV;
283	if (mei_write_is_idle(dev))
284		pm_runtime_autosuspend(device);
285
286	return -EBUSY;
287}
288static int mei_txe_pm_runtime_suspend(struct device *device)
289{
 
290	struct mei_device *dev;
291	int ret;
292
293	dev_dbg(device, "rpm: txe: runtime suspend\n");
294
295	dev = dev_get_drvdata(device);
296	if (!dev)
297		return -ENODEV;
298
299	mutex_lock(&dev->device_lock);
300
301	if (mei_write_is_idle(dev))
302		ret = mei_txe_aliveness_set_sync(dev, 0);
303	else
304		ret = -EAGAIN;
305
306	/* keep irq on we are staying in D0 */
 
 
 
 
 
 
 
 
307
308	dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
309
310	mutex_unlock(&dev->device_lock);
311
312	if (ret && ret != -EAGAIN)
313		schedule_work(&dev->reset_work);
314
315	return ret;
316}
317
318static int mei_txe_pm_runtime_resume(struct device *device)
319{
 
320	struct mei_device *dev;
321	int ret;
322
323	dev_dbg(device, "rpm: txe: runtime resume\n");
324
325	dev = dev_get_drvdata(device);
326	if (!dev)
327		return -ENODEV;
328
329	mutex_lock(&dev->device_lock);
330
331	mei_enable_interrupts(dev);
332
333	ret = mei_txe_aliveness_set_sync(dev, 1);
334
335	mutex_unlock(&dev->device_lock);
336
337	dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
338
339	if (ret)
340		schedule_work(&dev->reset_work);
341
342	return ret;
343}
344
345/**
346 * mei_txe_set_pm_domain - fill and set pm domain structure for device
347 *
348 * @dev: mei_device
349 */
350static inline void mei_txe_set_pm_domain(struct mei_device *dev)
351{
352	struct pci_dev *pdev  = to_pci_dev(dev->dev);
353
354	if (pdev->dev.bus && pdev->dev.bus->pm) {
355		dev->pg_domain.ops = *pdev->dev.bus->pm;
356
357		dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
358		dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
359		dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
360
361		dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
362	}
363}
364
365/**
366 * mei_txe_unset_pm_domain - clean pm domain structure for device
367 *
368 * @dev: mei_device
369 */
370static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
371{
372	/* stop using pm callbacks if any */
373	dev_pm_domain_set(dev->dev, NULL);
374}
375
376static const struct dev_pm_ops mei_txe_pm_ops = {
377	SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
378				mei_txe_pci_resume)
379	SET_RUNTIME_PM_OPS(
380		mei_txe_pm_runtime_suspend,
381		mei_txe_pm_runtime_resume,
382		mei_txe_pm_runtime_idle)
383};
384
385#define MEI_TXE_PM_OPS	(&mei_txe_pm_ops)
386#else
387#define MEI_TXE_PM_OPS	NULL
388#endif /* CONFIG_PM */
389
390/*
391 *  PCI driver structure
392 */
393static struct pci_driver mei_txe_driver = {
394	.name = KBUILD_MODNAME,
395	.id_table = mei_txe_pci_tbl,
396	.probe = mei_txe_probe,
397	.remove = mei_txe_remove,
398	.shutdown = mei_txe_shutdown,
399	.driver.pm = MEI_TXE_PM_OPS,
400};
401
402module_pci_driver(mei_txe_driver);
403
404MODULE_AUTHOR("Intel Corporation");
405MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
406MODULE_LICENSE("GPL v2");
v4.10.11
 
  1/*
  2 *
  3 * Intel Management Engine Interface (Intel MEI) Linux driver
  4 * Copyright (c) 2013-2014, Intel Corporation.
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms and conditions of the GNU General Public License,
  8 * version 2, as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope it will be useful, but WITHOUT
 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 13 * more details.
 14 *
 15 */
 16
 17#include <linux/module.h>
 18#include <linux/kernel.h>
 19#include <linux/device.h>
 20#include <linux/fs.h>
 21#include <linux/errno.h>
 22#include <linux/types.h>
 23#include <linux/pci.h>
 24#include <linux/init.h>
 25#include <linux/sched.h>
 26#include <linux/uuid.h>
 27#include <linux/jiffies.h>
 28#include <linux/interrupt.h>
 29#include <linux/workqueue.h>
 30#include <linux/pm_domain.h>
 31#include <linux/pm_runtime.h>
 32
 33#include <linux/mei.h>
 34
 35
 36#include "mei_dev.h"
 37#include "hw-txe.h"
 38
 39static const struct pci_device_id mei_txe_pci_tbl[] = {
 40	{PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
 41	{PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
 42
 43	{0, }
 44};
 45MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
 46
 47#ifdef CONFIG_PM
 48static inline void mei_txe_set_pm_domain(struct mei_device *dev);
 49static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
 50#else
 51static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
 52static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
 53#endif /* CONFIG_PM */
 54
 55static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
 56{
 57	int i;
 58
 59	for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
 60		if (hw->mem_addr[i]) {
 61			pci_iounmap(pdev, hw->mem_addr[i]);
 62			hw->mem_addr[i] = NULL;
 63		}
 64	}
 65}
 66/**
 67 * mei_txe_probe - Device Initialization Routine
 68 *
 69 * @pdev: PCI device structure
 70 * @ent: entry in mei_txe_pci_tbl
 71 *
 72 * Return: 0 on success, <0 on failure.
 73 */
 74static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 75{
 76	struct mei_device *dev;
 77	struct mei_txe_hw *hw;
 
 78	int err;
 79	int i;
 80
 81	/* enable pci dev */
 82	err = pci_enable_device(pdev);
 83	if (err) {
 84		dev_err(&pdev->dev, "failed to enable pci device.\n");
 85		goto end;
 86	}
 87	/* set PCI host mastering  */
 88	pci_set_master(pdev);
 89	/* pci request regions for mei driver */
 90	err = pci_request_regions(pdev, KBUILD_MODNAME);
 91	if (err) {
 92		dev_err(&pdev->dev, "failed to get pci regions.\n");
 93		goto disable_device;
 94	}
 95
 96	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
 97	if (err) {
 98		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 99		if (err) {
100			dev_err(&pdev->dev, "No suitable DMA available.\n");
101			goto release_regions;
102		}
103	}
104
105	/* allocates and initializes the mei dev structure */
106	dev = mei_txe_dev_init(pdev);
107	if (!dev) {
108		err = -ENOMEM;
109		goto release_regions;
110	}
111	hw = to_txe_hw(dev);
112
113	/* mapping  IO device memory */
114	for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
115		hw->mem_addr[i] = pci_iomap(pdev, i, 0);
116		if (!hw->mem_addr[i]) {
117			dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
118			err = -ENOMEM;
119			goto free_device;
120		}
121	}
122
123
124	pci_enable_msi(pdev);
125
126	/* clear spurious interrupts */
127	mei_clear_interrupts(dev);
128
129	/* request and enable interrupt  */
130	if (pci_dev_msi_enabled(pdev))
131		err = request_threaded_irq(pdev->irq,
132			NULL,
133			mei_txe_irq_thread_handler,
134			IRQF_ONESHOT, KBUILD_MODNAME, dev);
135	else
136		err = request_threaded_irq(pdev->irq,
137			mei_txe_irq_quick_handler,
138			mei_txe_irq_thread_handler,
139			IRQF_SHARED, KBUILD_MODNAME, dev);
140	if (err) {
141		dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
142			pdev->irq);
143		goto free_device;
144	}
145
146	if (mei_start(dev)) {
147		dev_err(&pdev->dev, "init hw failure.\n");
148		err = -ENODEV;
149		goto release_irq;
150	}
151
152	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
153	pm_runtime_use_autosuspend(&pdev->dev);
154
155	err = mei_register(dev, &pdev->dev);
156	if (err)
157		goto stop;
158
159	pci_set_drvdata(pdev, dev);
160
161	/*
162	* For not wake-able HW runtime pm framework
163	* can't be used on pci device level.
164	* Use domain runtime pm callbacks instead.
165	*/
166	if (!pci_dev_run_wake(pdev))
167		mei_txe_set_pm_domain(dev);
 
 
 
 
 
 
 
 
168
169	pm_runtime_put_noidle(&pdev->dev);
170
171	return 0;
172
173stop:
174	mei_stop(dev);
175release_irq:
176
177	mei_cancel_work(dev);
178
179	/* disable interrupts */
180	mei_disable_interrupts(dev);
181
182	free_irq(pdev->irq, dev);
183	pci_disable_msi(pdev);
184
185free_device:
186	mei_txe_pci_iounmap(pdev, hw);
187
188	kfree(dev);
189release_regions:
190	pci_release_regions(pdev);
191disable_device:
192	pci_disable_device(pdev);
193end:
194	dev_err(&pdev->dev, "initialization failed.\n");
195	return err;
196}
197
198/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199 * mei_txe_remove - Device Removal Routine
200 *
201 * @pdev: PCI device structure
202 *
203 * mei_remove is called by the PCI subsystem to alert the driver
204 * that it should release a PCI device.
205 */
206static void mei_txe_remove(struct pci_dev *pdev)
207{
208	struct mei_device *dev;
209	struct mei_txe_hw *hw;
210
211	dev = pci_get_drvdata(pdev);
212	if (!dev) {
213		dev_err(&pdev->dev, "mei: dev =NULL\n");
214		return;
215	}
216
217	pm_runtime_get_noresume(&pdev->dev);
218
219	hw = to_txe_hw(dev);
220
221	mei_stop(dev);
222
223	if (!pci_dev_run_wake(pdev))
224		mei_txe_unset_pm_domain(dev);
225
226	/* disable interrupts */
227	mei_disable_interrupts(dev);
228	free_irq(pdev->irq, dev);
229	pci_disable_msi(pdev);
230
231	pci_set_drvdata(pdev, NULL);
232
233	mei_txe_pci_iounmap(pdev, hw);
234
235	mei_deregister(dev);
236
237	kfree(dev);
238
239	pci_release_regions(pdev);
240	pci_disable_device(pdev);
241}
242
243
244#ifdef CONFIG_PM_SLEEP
245static int mei_txe_pci_suspend(struct device *device)
246{
247	struct pci_dev *pdev = to_pci_dev(device);
248	struct mei_device *dev = pci_get_drvdata(pdev);
249
250	if (!dev)
251		return -ENODEV;
252
253	dev_dbg(&pdev->dev, "suspend\n");
254
255	mei_stop(dev);
256
257	mei_disable_interrupts(dev);
258
259	free_irq(pdev->irq, dev);
260	pci_disable_msi(pdev);
261
262	return 0;
263}
264
265static int mei_txe_pci_resume(struct device *device)
266{
267	struct pci_dev *pdev = to_pci_dev(device);
268	struct mei_device *dev;
269	int err;
270
271	dev = pci_get_drvdata(pdev);
272	if (!dev)
273		return -ENODEV;
274
275	pci_enable_msi(pdev);
276
277	mei_clear_interrupts(dev);
278
279	/* request and enable interrupt */
280	if (pci_dev_msi_enabled(pdev))
281		err = request_threaded_irq(pdev->irq,
282			NULL,
283			mei_txe_irq_thread_handler,
284			IRQF_ONESHOT, KBUILD_MODNAME, dev);
285	else
286		err = request_threaded_irq(pdev->irq,
287			mei_txe_irq_quick_handler,
288			mei_txe_irq_thread_handler,
289			IRQF_SHARED, KBUILD_MODNAME, dev);
290	if (err) {
291		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
292				pdev->irq);
293		return err;
294	}
295
296	err = mei_restart(dev);
297
298	return err;
299}
300#endif /* CONFIG_PM_SLEEP */
301
302#ifdef CONFIG_PM
303static int mei_txe_pm_runtime_idle(struct device *device)
304{
305	struct pci_dev *pdev = to_pci_dev(device);
306	struct mei_device *dev;
307
308	dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n");
309
310	dev = pci_get_drvdata(pdev);
311	if (!dev)
312		return -ENODEV;
313	if (mei_write_is_idle(dev))
314		pm_runtime_autosuspend(device);
315
316	return -EBUSY;
317}
318static int mei_txe_pm_runtime_suspend(struct device *device)
319{
320	struct pci_dev *pdev = to_pci_dev(device);
321	struct mei_device *dev;
322	int ret;
323
324	dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n");
325
326	dev = pci_get_drvdata(pdev);
327	if (!dev)
328		return -ENODEV;
329
330	mutex_lock(&dev->device_lock);
331
332	if (mei_write_is_idle(dev))
333		ret = mei_txe_aliveness_set_sync(dev, 0);
334	else
335		ret = -EAGAIN;
336
337	/*
338	 * If everything is okay we're about to enter PCI low
339	 * power state (D3) therefor we need to disable the
340	 * interrupts towards host.
341	 * However if device is not wakeable we do not enter
342	 * D-low state and we need to keep the interrupt kicking
343	 */
344	if (!ret && pci_dev_run_wake(pdev))
345		mei_disable_interrupts(dev);
346
347	dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
348
349	mutex_unlock(&dev->device_lock);
350
351	if (ret && ret != -EAGAIN)
352		schedule_work(&dev->reset_work);
353
354	return ret;
355}
356
357static int mei_txe_pm_runtime_resume(struct device *device)
358{
359	struct pci_dev *pdev = to_pci_dev(device);
360	struct mei_device *dev;
361	int ret;
362
363	dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n");
364
365	dev = pci_get_drvdata(pdev);
366	if (!dev)
367		return -ENODEV;
368
369	mutex_lock(&dev->device_lock);
370
371	mei_enable_interrupts(dev);
372
373	ret = mei_txe_aliveness_set_sync(dev, 1);
374
375	mutex_unlock(&dev->device_lock);
376
377	dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret);
378
379	if (ret)
380		schedule_work(&dev->reset_work);
381
382	return ret;
383}
384
385/**
386 * mei_txe_set_pm_domain - fill and set pm domain structure for device
387 *
388 * @dev: mei_device
389 */
390static inline void mei_txe_set_pm_domain(struct mei_device *dev)
391{
392	struct pci_dev *pdev  = to_pci_dev(dev->dev);
393
394	if (pdev->dev.bus && pdev->dev.bus->pm) {
395		dev->pg_domain.ops = *pdev->dev.bus->pm;
396
397		dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
398		dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
399		dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
400
401		dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
402	}
403}
404
405/**
406 * mei_txe_unset_pm_domain - clean pm domain structure for device
407 *
408 * @dev: mei_device
409 */
410static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
411{
412	/* stop using pm callbacks if any */
413	dev_pm_domain_set(dev->dev, NULL);
414}
415
416static const struct dev_pm_ops mei_txe_pm_ops = {
417	SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
418				mei_txe_pci_resume)
419	SET_RUNTIME_PM_OPS(
420		mei_txe_pm_runtime_suspend,
421		mei_txe_pm_runtime_resume,
422		mei_txe_pm_runtime_idle)
423};
424
425#define MEI_TXE_PM_OPS	(&mei_txe_pm_ops)
426#else
427#define MEI_TXE_PM_OPS	NULL
428#endif /* CONFIG_PM */
429
430/*
431 *  PCI driver structure
432 */
433static struct pci_driver mei_txe_driver = {
434	.name = KBUILD_MODNAME,
435	.id_table = mei_txe_pci_tbl,
436	.probe = mei_txe_probe,
437	.remove = mei_txe_remove,
438	.shutdown = mei_txe_remove,
439	.driver.pm = MEI_TXE_PM_OPS,
440};
441
442module_pci_driver(mei_txe_driver);
443
444MODULE_AUTHOR("Intel Corporation");
445MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
446MODULE_LICENSE("GPL v2");