Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 Infineon Technologies AG
4 * Copyright (C) 2016 STMicroelectronics SAS
5 *
6 * Authors:
7 * Peter Huewe <peter.huewe@infineon.com>
8 * Christophe Ricard <christophe-h.ricard@st.com>
9 *
10 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
11 *
12 * Device driver for TCG/TCPA TPM (trusted platform module).
13 * Specifications at www.trustedcomputinggroup.org
14 *
15 * This device driver implements the TPM interface as defined in
16 * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
17 * SPI access_.
18 *
19 * It is based on the original tpm_tis device driver from Leendert van
20 * Dorn and Kyleen Hall and Jarko Sakkinnen.
21 */
22
23#include <linux/acpi.h>
24#include <linux/completion.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/slab.h>
30
31#include <linux/of.h>
32#include <linux/spi/spi.h>
33#include <linux/tpm.h>
34
35#include "tpm.h"
36#include "tpm_tis_core.h"
37#include "tpm_tis_spi.h"
38
39#define MAX_SPI_FRAMESIZE 64
40
41/*
42 * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
43 * keep trying to read from the device until MISO goes high indicating the
44 * wait state has ended.
45 *
46 * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
47 */
48static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
49 struct spi_transfer *spi_xfer)
50{
51 struct spi_message m;
52 int ret, i;
53
54 if ((phy->iobuf[3] & 0x01) == 0) {
55 // handle SPI wait states
56 for (i = 0; i < TPM_RETRY; i++) {
57 spi_xfer->len = 1;
58 spi_message_init(&m);
59 spi_message_add_tail(spi_xfer, &m);
60 ret = spi_sync_locked(phy->spi_device, &m);
61 if (ret < 0)
62 return ret;
63 if (phy->iobuf[0] & 0x01)
64 break;
65 }
66
67 if (i == TPM_RETRY)
68 return -ETIMEDOUT;
69 }
70
71 return 0;
72}
73
74/*
75 * Half duplex controller with support for TPM wait state detection like
76 * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow
77 * control. Each phase sent in different transfer for controller to idenity
78 * phase.
79 */
80static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data, u32 addr,
81 u16 len, u8 *in, const u8 *out)
82{
83 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
84 struct spi_transfer spi_xfer[3];
85 struct spi_message m;
86 u8 transfer_len;
87 int ret;
88
89 while (len) {
90 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
91
92 spi_message_init(&m);
93 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
94 phy->iobuf[1] = 0xd4;
95 phy->iobuf[2] = addr >> 8;
96 phy->iobuf[3] = addr;
97
98 memset(&spi_xfer, 0, sizeof(spi_xfer));
99
100 spi_xfer[0].tx_buf = phy->iobuf;
101 spi_xfer[0].len = 1;
102 spi_message_add_tail(&spi_xfer[0], &m);
103
104 spi_xfer[1].tx_buf = phy->iobuf + 1;
105 spi_xfer[1].len = 3;
106 spi_message_add_tail(&spi_xfer[1], &m);
107
108 if (out) {
109 spi_xfer[2].tx_buf = &phy->iobuf[4];
110 spi_xfer[2].rx_buf = NULL;
111 memcpy(&phy->iobuf[4], out, transfer_len);
112 out += transfer_len;
113 }
114
115 if (in) {
116 spi_xfer[2].tx_buf = NULL;
117 spi_xfer[2].rx_buf = &phy->iobuf[4];
118 }
119
120 spi_xfer[2].len = transfer_len;
121 spi_message_add_tail(&spi_xfer[2], &m);
122
123 reinit_completion(&phy->ready);
124
125 ret = spi_sync(phy->spi_device, &m);
126 if (ret < 0)
127 return ret;
128
129 if (in) {
130 memcpy(in, &phy->iobuf[4], transfer_len);
131 in += transfer_len;
132 }
133
134 len -= transfer_len;
135 }
136
137 return ret;
138}
139
140static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr,
141 u16 len, u8 *in, const u8 *out)
142{
143 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
144 int ret = 0;
145 struct spi_message m;
146 struct spi_transfer spi_xfer;
147 u8 transfer_len;
148
149 spi_bus_lock(phy->spi_device->master);
150
151 while (len) {
152 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
153
154 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
155 phy->iobuf[1] = 0xd4;
156 phy->iobuf[2] = addr >> 8;
157 phy->iobuf[3] = addr;
158
159 memset(&spi_xfer, 0, sizeof(spi_xfer));
160 spi_xfer.tx_buf = phy->iobuf;
161 spi_xfer.rx_buf = phy->iobuf;
162 spi_xfer.len = 4;
163 spi_xfer.cs_change = 1;
164
165 spi_message_init(&m);
166 spi_message_add_tail(&spi_xfer, &m);
167 ret = spi_sync_locked(phy->spi_device, &m);
168 if (ret < 0)
169 goto exit;
170
171 /* Flow control transfers are receive only */
172 spi_xfer.tx_buf = NULL;
173 ret = phy->flow_control(phy, &spi_xfer);
174 if (ret < 0)
175 goto exit;
176
177 spi_xfer.cs_change = 0;
178 spi_xfer.len = transfer_len;
179 spi_xfer.delay.value = 5;
180 spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
181
182 if (out) {
183 spi_xfer.tx_buf = phy->iobuf;
184 spi_xfer.rx_buf = NULL;
185 memcpy(phy->iobuf, out, transfer_len);
186 out += transfer_len;
187 }
188
189 spi_message_init(&m);
190 spi_message_add_tail(&spi_xfer, &m);
191 reinit_completion(&phy->ready);
192 ret = spi_sync_locked(phy->spi_device, &m);
193 if (ret < 0)
194 goto exit;
195
196 if (in) {
197 memcpy(in, phy->iobuf, transfer_len);
198 in += transfer_len;
199 }
200
201 len -= transfer_len;
202 }
203
204exit:
205 if (ret < 0) {
206 /* Deactivate chip select */
207 memset(&spi_xfer, 0, sizeof(spi_xfer));
208 spi_message_init(&m);
209 spi_message_add_tail(&spi_xfer, &m);
210 spi_sync_locked(phy->spi_device, &m);
211 }
212
213 spi_bus_unlock(phy->spi_device->master);
214 return ret;
215}
216
217int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
218 u8 *in, const u8 *out)
219{
220 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
221 struct spi_controller *ctlr = phy->spi_device->controller;
222
223 /*
224 * TPM flow control over SPI requires full duplex support.
225 * Send entire message to a half duplex controller to handle
226 * wait polling in controller.
227 * Set TPM HW flow control flag..
228 */
229 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
230 return tpm_tis_spi_transfer_half(data, addr, len, in, out);
231 else
232 return tpm_tis_spi_transfer_full(data, addr, len, in, out);
233}
234
235static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
236 u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
237{
238 return tpm_tis_spi_transfer(data, addr, len, result, NULL);
239}
240
241static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
242 u16 len, const u8 *value, enum tpm_tis_io_mode io_mode)
243{
244 return tpm_tis_spi_transfer(data, addr, len, NULL, value);
245}
246
247int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
248 int irq, const struct tpm_tis_phy_ops *phy_ops)
249{
250 phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
251 if (!phy->iobuf)
252 return -ENOMEM;
253
254 phy->spi_device = spi;
255
256 return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
257}
258
259static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
260 .read_bytes = tpm_tis_spi_read_bytes,
261 .write_bytes = tpm_tis_spi_write_bytes,
262};
263
264static int tpm_tis_spi_probe(struct spi_device *dev)
265{
266 struct tpm_tis_spi_phy *phy;
267 int irq;
268
269 phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
270 GFP_KERNEL);
271 if (!phy)
272 return -ENOMEM;
273
274 phy->flow_control = tpm_tis_spi_flow_control;
275
276 if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
277 dev->mode |= SPI_TPM_HW_FLOW;
278
279 /* If the SPI device has an IRQ then use that */
280 if (dev->irq > 0)
281 irq = dev->irq;
282 else
283 irq = -1;
284
285 init_completion(&phy->ready);
286 return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
287}
288
289typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
290
291static int tpm_tis_spi_driver_probe(struct spi_device *spi)
292{
293 const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
294 tpm_tis_spi_probe_func probe_func;
295
296 probe_func = of_device_get_match_data(&spi->dev);
297 if (!probe_func) {
298 if (spi_dev_id) {
299 probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
300 if (!probe_func)
301 return -ENODEV;
302 } else
303 probe_func = tpm_tis_spi_probe;
304 }
305
306 return probe_func(spi);
307}
308
309static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
310
311static void tpm_tis_spi_remove(struct spi_device *dev)
312{
313 struct tpm_chip *chip = spi_get_drvdata(dev);
314
315 tpm_chip_unregister(chip);
316 tpm_tis_remove(chip);
317}
318
319static const struct spi_device_id tpm_tis_spi_id[] = {
320 { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
321 { "slb9670", (unsigned long)tpm_tis_spi_probe },
322 { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
323 { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe },
324 { "cr50", (unsigned long)cr50_spi_probe },
325 {}
326};
327MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
328
329static const struct of_device_id of_tis_spi_match[] __maybe_unused = {
330 { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
331 { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
332 { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
333 { .compatible = "google,cr50", .data = cr50_spi_probe },
334 {}
335};
336MODULE_DEVICE_TABLE(of, of_tis_spi_match);
337
338static const struct acpi_device_id acpi_tis_spi_match[] __maybe_unused = {
339 {"SMO0768", 0},
340 {}
341};
342MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
343
344static struct spi_driver tpm_tis_spi_driver = {
345 .driver = {
346 .name = "tpm_tis_spi",
347 .pm = &tpm_tis_pm,
348 .of_match_table = of_match_ptr(of_tis_spi_match),
349 .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
350 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
351 },
352 .probe = tpm_tis_spi_driver_probe,
353 .remove = tpm_tis_spi_remove,
354 .id_table = tpm_tis_spi_id,
355};
356module_spi_driver(tpm_tis_spi_driver);
357
358MODULE_DESCRIPTION("TPM Driver for native SPI access");
359MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 Infineon Technologies AG
4 * Copyright (C) 2016 STMicroelectronics SAS
5 *
6 * Authors:
7 * Peter Huewe <peter.huewe@infineon.com>
8 * Christophe Ricard <christophe-h.ricard@st.com>
9 *
10 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
11 *
12 * Device driver for TCG/TCPA TPM (trusted platform module).
13 * Specifications at www.trustedcomputinggroup.org
14 *
15 * This device driver implements the TPM interface as defined in
16 * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
17 * SPI access_.
18 *
19 * It is based on the original tpm_tis device driver from Leendert van
20 * Dorn and Kyleen Hall and Jarko Sakkinnen.
21 */
22
23#include <linux/acpi.h>
24#include <linux/completion.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/slab.h>
30
31#include <linux/of.h>
32#include <linux/spi/spi.h>
33#include <linux/tpm.h>
34
35#include "tpm.h"
36#include "tpm_tis_core.h"
37#include "tpm_tis_spi.h"
38
39#define MAX_SPI_FRAMESIZE 64
40#define SPI_HDRSIZE 4
41
42/*
43 * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
44 * keep trying to read from the device until MISO goes high indicating the
45 * wait state has ended.
46 *
47 * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
48 */
49static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
50 struct spi_transfer *spi_xfer)
51{
52 struct spi_message m;
53 int ret, i;
54
55 if ((phy->iobuf[3] & 0x01) == 0) {
56 // handle SPI wait states
57 for (i = 0; i < TPM_RETRY; i++) {
58 spi_xfer->len = 1;
59 spi_message_init(&m);
60 spi_message_add_tail(spi_xfer, &m);
61 ret = spi_sync_locked(phy->spi_device, &m);
62 if (ret < 0)
63 return ret;
64 if (phy->iobuf[0] & 0x01)
65 break;
66 }
67
68 if (i == TPM_RETRY)
69 return -ETIMEDOUT;
70 }
71
72 return 0;
73}
74
75/*
76 * Half duplex controller with support for TPM wait state detection like
77 * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow
78 * control. Each phase sent in different transfer for controller to idenity
79 * phase.
80 */
81static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data, u32 addr,
82 u16 len, u8 *in, const u8 *out)
83{
84 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
85 struct spi_transfer spi_xfer[3];
86 struct spi_message m;
87 u8 transfer_len;
88 int ret;
89
90 while (len) {
91 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
92
93 spi_message_init(&m);
94 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
95 phy->iobuf[1] = 0xd4;
96 phy->iobuf[2] = addr >> 8;
97 phy->iobuf[3] = addr;
98
99 memset(&spi_xfer, 0, sizeof(spi_xfer));
100
101 spi_xfer[0].tx_buf = phy->iobuf;
102 spi_xfer[0].len = 1;
103 spi_message_add_tail(&spi_xfer[0], &m);
104
105 spi_xfer[1].tx_buf = phy->iobuf + 1;
106 spi_xfer[1].len = 3;
107 spi_message_add_tail(&spi_xfer[1], &m);
108
109 if (out) {
110 spi_xfer[2].tx_buf = &phy->iobuf[4];
111 spi_xfer[2].rx_buf = NULL;
112 memcpy(&phy->iobuf[4], out, transfer_len);
113 out += transfer_len;
114 }
115
116 if (in) {
117 spi_xfer[2].tx_buf = NULL;
118 spi_xfer[2].rx_buf = &phy->iobuf[4];
119 }
120
121 spi_xfer[2].len = transfer_len;
122 spi_message_add_tail(&spi_xfer[2], &m);
123
124 reinit_completion(&phy->ready);
125
126 ret = spi_sync(phy->spi_device, &m);
127 if (ret < 0)
128 return ret;
129
130 if (in) {
131 memcpy(in, &phy->iobuf[4], transfer_len);
132 in += transfer_len;
133 }
134
135 len -= transfer_len;
136 }
137
138 return ret;
139}
140
141static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr,
142 u16 len, u8 *in, const u8 *out)
143{
144 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
145 int ret = 0;
146 struct spi_message m;
147 struct spi_transfer spi_xfer;
148 u8 transfer_len;
149
150 spi_bus_lock(phy->spi_device->controller);
151
152 while (len) {
153 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
154
155 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
156 phy->iobuf[1] = 0xd4;
157 phy->iobuf[2] = addr >> 8;
158 phy->iobuf[3] = addr;
159
160 memset(&spi_xfer, 0, sizeof(spi_xfer));
161 spi_xfer.tx_buf = phy->iobuf;
162 spi_xfer.rx_buf = phy->iobuf;
163 spi_xfer.len = 4;
164 spi_xfer.cs_change = 1;
165
166 spi_message_init(&m);
167 spi_message_add_tail(&spi_xfer, &m);
168 ret = spi_sync_locked(phy->spi_device, &m);
169 if (ret < 0)
170 goto exit;
171
172 /* Flow control transfers are receive only */
173 spi_xfer.tx_buf = NULL;
174 ret = phy->flow_control(phy, &spi_xfer);
175 if (ret < 0)
176 goto exit;
177
178 spi_xfer.cs_change = 0;
179 spi_xfer.len = transfer_len;
180 spi_xfer.delay.value = 5;
181 spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
182
183 if (out) {
184 spi_xfer.tx_buf = phy->iobuf;
185 spi_xfer.rx_buf = NULL;
186 memcpy(phy->iobuf, out, transfer_len);
187 out += transfer_len;
188 }
189
190 spi_message_init(&m);
191 spi_message_add_tail(&spi_xfer, &m);
192 reinit_completion(&phy->ready);
193 ret = spi_sync_locked(phy->spi_device, &m);
194 if (ret < 0)
195 goto exit;
196
197 if (in) {
198 memcpy(in, phy->iobuf, transfer_len);
199 in += transfer_len;
200 }
201
202 len -= transfer_len;
203 }
204
205exit:
206 if (ret < 0) {
207 /* Deactivate chip select */
208 memset(&spi_xfer, 0, sizeof(spi_xfer));
209 spi_message_init(&m);
210 spi_message_add_tail(&spi_xfer, &m);
211 spi_sync_locked(phy->spi_device, &m);
212 }
213
214 spi_bus_unlock(phy->spi_device->controller);
215 return ret;
216}
217
218int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
219 u8 *in, const u8 *out)
220{
221 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
222 struct spi_controller *ctlr = phy->spi_device->controller;
223
224 /*
225 * TPM flow control over SPI requires full duplex support.
226 * Send entire message to a half duplex controller to handle
227 * wait polling in controller.
228 * Set TPM HW flow control flag..
229 */
230 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
231 return tpm_tis_spi_transfer_half(data, addr, len, in, out);
232 else
233 return tpm_tis_spi_transfer_full(data, addr, len, in, out);
234}
235
236static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
237 u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
238{
239 return tpm_tis_spi_transfer(data, addr, len, result, NULL);
240}
241
242static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
243 u16 len, const u8 *value, enum tpm_tis_io_mode io_mode)
244{
245 return tpm_tis_spi_transfer(data, addr, len, NULL, value);
246}
247
248int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
249 int irq, const struct tpm_tis_phy_ops *phy_ops)
250{
251 phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL);
252 if (!phy->iobuf)
253 return -ENOMEM;
254
255 phy->spi_device = spi;
256
257 return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
258}
259
260static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
261 .read_bytes = tpm_tis_spi_read_bytes,
262 .write_bytes = tpm_tis_spi_write_bytes,
263};
264
265static int tpm_tis_spi_probe(struct spi_device *dev)
266{
267 struct tpm_tis_spi_phy *phy;
268 int irq;
269
270 phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
271 GFP_KERNEL);
272 if (!phy)
273 return -ENOMEM;
274
275 phy->flow_control = tpm_tis_spi_flow_control;
276
277 if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
278 dev->mode |= SPI_TPM_HW_FLOW;
279
280 /* If the SPI device has an IRQ then use that */
281 if (dev->irq > 0)
282 irq = dev->irq;
283 else
284 irq = -1;
285
286 init_completion(&phy->ready);
287 return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
288}
289
290typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
291
292static int tpm_tis_spi_driver_probe(struct spi_device *spi)
293{
294 const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
295 tpm_tis_spi_probe_func probe_func;
296
297 probe_func = of_device_get_match_data(&spi->dev);
298 if (!probe_func) {
299 if (spi_dev_id) {
300 probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
301 if (!probe_func)
302 return -ENODEV;
303 } else
304 probe_func = tpm_tis_spi_probe;
305 }
306
307 return probe_func(spi);
308}
309
310static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
311
312static void tpm_tis_spi_remove(struct spi_device *dev)
313{
314 struct tpm_chip *chip = spi_get_drvdata(dev);
315
316 tpm_chip_unregister(chip);
317 tpm_tis_remove(chip);
318}
319
320static const struct spi_device_id tpm_tis_spi_id[] = {
321 { "attpm20p", (unsigned long)tpm_tis_spi_probe },
322 { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
323 { "slb9670", (unsigned long)tpm_tis_spi_probe },
324 { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
325 { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe },
326 { "cr50", (unsigned long)cr50_spi_probe },
327 {}
328};
329MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
330
331static const struct of_device_id of_tis_spi_match[] __maybe_unused = {
332 { .compatible = "atmel,attpm20p", .data = tpm_tis_spi_probe },
333 { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
334 { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
335 { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
336 { .compatible = "google,cr50", .data = cr50_spi_probe },
337 {}
338};
339MODULE_DEVICE_TABLE(of, of_tis_spi_match);
340
341static const struct acpi_device_id acpi_tis_spi_match[] __maybe_unused = {
342 {"SMO0768", 0},
343 {}
344};
345MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
346
347static struct spi_driver tpm_tis_spi_driver = {
348 .driver = {
349 .name = "tpm_tis_spi",
350 .pm = &tpm_tis_pm,
351 .of_match_table = of_match_ptr(of_tis_spi_match),
352 .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
353 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
354 },
355 .probe = tpm_tis_spi_driver_probe,
356 .remove = tpm_tis_spi_remove,
357 .id_table = tpm_tis_spi_id,
358};
359module_spi_driver(tpm_tis_spi_driver);
360
361MODULE_DESCRIPTION("TPM Driver for native SPI access");
362MODULE_LICENSE("GPL");