Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2// PCI1xxxx SPI driver
3// Copyright (C) 2022 Microchip Technology Inc.
4// Authors: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>
5// Kumaravel Thiagarajan <Kumaravel.Thiagarajan@microchip.com>
6
7
8#include <linux/module.h>
9#include <linux/pci.h>
10#include <linux/spi/spi.h>
11#include <linux/delay.h>
12
13#define DRV_NAME "spi-pci1xxxx"
14
15#define SYS_FREQ_DEFAULT (62500000)
16
17#define PCI1XXXX_SPI_MAX_CLOCK_HZ (30000000)
18#define PCI1XXXX_SPI_CLK_20MHZ (20000000)
19#define PCI1XXXX_SPI_CLK_15MHZ (15000000)
20#define PCI1XXXX_SPI_CLK_12MHZ (12000000)
21#define PCI1XXXX_SPI_CLK_10MHZ (10000000)
22#define PCI1XXXX_SPI_MIN_CLOCK_HZ (2000000)
23
24#define PCI1XXXX_SPI_BUFFER_SIZE (320)
25
26#define SPI_MST_CTL_DEVSEL_MASK (GENMASK(27, 25))
27#define SPI_MST_CTL_CMD_LEN_MASK (GENMASK(16, 8))
28#define SPI_MST_CTL_SPEED_MASK (GENMASK(7, 5))
29#define SPI_MSI_VECTOR_SEL_MASK (GENMASK(4, 4))
30
31#define SPI_MST_CTL_FORCE_CE (BIT(4))
32#define SPI_MST_CTL_MODE_SEL (BIT(2))
33#define SPI_MST_CTL_GO (BIT(0))
34
35#define SPI_MST1_ADDR_BASE (0x800)
36
37/* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */
38
39#define SPI_MST_CMD_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x00)
40#define SPI_MST_RSP_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x200)
41#define SPI_MST_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x400)
42#define SPI_MST_EVENT_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x420)
43#define SPI_MST_EVENT_MASK_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x424)
44#define SPI_MST_PAD_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x460)
45#define SPIALERT_MST_DB_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x464)
46#define SPIALERT_MST_VAL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x468)
47#define SPI_PCI_CTRL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x480)
48
49#define PCI1XXXX_IRQ_FLAGS (IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE)
50#define SPI_MAX_DATA_LEN 320
51
52#define PCI1XXXX_SPI_TIMEOUT (msecs_to_jiffies(100))
53
54#define SPI_INTR BIT(8)
55#define SPI_FORCE_CE BIT(4)
56
57#define SPI_CHIP_SEL_COUNT 7
58#define VENDOR_ID_MCHP 0x1055
59
60#define SPI_SUSPEND_CONFIG 0x101
61#define SPI_RESUME_CONFIG 0x303
62
63struct pci1xxxx_spi_internal {
64 u8 hw_inst;
65 bool spi_xfer_in_progress;
66 int irq;
67 struct completion spi_xfer_done;
68 struct spi_master *spi_host;
69 struct pci1xxxx_spi *parent;
70 struct {
71 unsigned int dev_sel : 3;
72 unsigned int msi_vector_sel : 1;
73 } prev_val;
74};
75
76struct pci1xxxx_spi {
77 struct pci_dev *dev;
78 u8 total_hw_instances;
79 void __iomem *reg_base;
80 struct pci1xxxx_spi_internal *spi_int[];
81};
82
83static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
84 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
85 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
86 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
87 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
88 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
89 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
90 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
91 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
92 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
93 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
94 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
95 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
96 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
97 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
98 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
99 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
100 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
101 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
102 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
103 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
104 { 0, }
105};
106
107MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
108
109static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable)
110{
111 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi->controller);
112 struct pci1xxxx_spi *par = p->parent;
113 u32 regval;
114
115 /* Set the DEV_SEL bits of the SPI_MST_CTL_REG */
116 regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
117 if (enable) {
118 regval &= ~SPI_MST_CTL_DEVSEL_MASK;
119 regval |= (spi->chip_select << 25);
120 writel(regval,
121 par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
122 } else {
123 regval &= ~(spi->chip_select << 25);
124 writel(regval,
125 par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
126
127 }
128}
129
130static u8 pci1xxxx_get_clock_div(u32 hz)
131{
132 u8 val = 0;
133
134 if (hz >= PCI1XXXX_SPI_MAX_CLOCK_HZ)
135 val = 2;
136 else if ((hz < PCI1XXXX_SPI_MAX_CLOCK_HZ) && (hz >= PCI1XXXX_SPI_CLK_20MHZ))
137 val = 3;
138 else if ((hz < PCI1XXXX_SPI_CLK_20MHZ) && (hz >= PCI1XXXX_SPI_CLK_15MHZ))
139 val = 4;
140 else if ((hz < PCI1XXXX_SPI_CLK_15MHZ) && (hz >= PCI1XXXX_SPI_CLK_12MHZ))
141 val = 5;
142 else if ((hz < PCI1XXXX_SPI_CLK_12MHZ) && (hz >= PCI1XXXX_SPI_CLK_10MHZ))
143 val = 6;
144 else if ((hz < PCI1XXXX_SPI_CLK_10MHZ) && (hz >= PCI1XXXX_SPI_MIN_CLOCK_HZ))
145 val = 7;
146 else
147 val = 2;
148
149 return val;
150}
151
152static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
153 struct spi_device *spi, struct spi_transfer *xfer)
154{
155 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
156 int mode, len, loop_iter, transfer_len;
157 struct pci1xxxx_spi *par = p->parent;
158 unsigned long bytes_transfered;
159 unsigned long bytes_recvd;
160 unsigned long loop_count;
161 u8 *rx_buf, result;
162 const u8 *tx_buf;
163 u32 regval;
164 u8 clkdiv;
165
166 p->spi_xfer_in_progress = true;
167 mode = spi->mode;
168 clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
169 tx_buf = xfer->tx_buf;
170 rx_buf = xfer->rx_buf;
171 transfer_len = xfer->len;
172 regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
173 writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
174
175 if (tx_buf) {
176 bytes_transfered = 0;
177 bytes_recvd = 0;
178 loop_count = transfer_len / SPI_MAX_DATA_LEN;
179 if (transfer_len % SPI_MAX_DATA_LEN != 0)
180 loop_count += 1;
181
182 for (loop_iter = 0; loop_iter < loop_count; loop_iter++) {
183 len = SPI_MAX_DATA_LEN;
184 if ((transfer_len % SPI_MAX_DATA_LEN != 0) &&
185 (loop_iter == loop_count - 1))
186 len = transfer_len % SPI_MAX_DATA_LEN;
187
188 reinit_completion(&p->spi_xfer_done);
189 memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst),
190 &tx_buf[bytes_transfered], len);
191 bytes_transfered += len;
192 regval = readl(par->reg_base +
193 SPI_MST_CTL_REG_OFFSET(p->hw_inst));
194 regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
195 SPI_MST_CTL_SPEED_MASK);
196
197 if (mode == SPI_MODE_3)
198 regval |= SPI_MST_CTL_MODE_SEL;
199 else
200 regval &= ~SPI_MST_CTL_MODE_SEL;
201
202 regval |= ((clkdiv << 5) | SPI_FORCE_CE | (len << 8));
203 regval &= ~SPI_MST_CTL_CMD_LEN_MASK;
204 writel(regval, par->reg_base +
205 SPI_MST_CTL_REG_OFFSET(p->hw_inst));
206 regval = readl(par->reg_base +
207 SPI_MST_CTL_REG_OFFSET(p->hw_inst));
208 regval |= SPI_MST_CTL_GO;
209 writel(regval, par->reg_base +
210 SPI_MST_CTL_REG_OFFSET(p->hw_inst));
211
212 /* Wait for DMA_TERM interrupt */
213 result = wait_for_completion_timeout(&p->spi_xfer_done,
214 PCI1XXXX_SPI_TIMEOUT);
215 if (!result)
216 return -ETIMEDOUT;
217
218 if (rx_buf) {
219 memcpy_fromio(&rx_buf[bytes_recvd], par->reg_base +
220 SPI_MST_RSP_BUF_OFFSET(p->hw_inst), len);
221 bytes_recvd += len;
222 }
223 }
224 }
225
226 regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
227 regval &= ~SPI_FORCE_CE;
228 writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
229 p->spi_xfer_in_progress = false;
230
231 return 0;
232}
233
234static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
235{
236 struct pci1xxxx_spi_internal *p = dev;
237 irqreturn_t spi_int_fired = IRQ_NONE;
238 u32 regval;
239
240 /* Clear the SPI GO_BIT Interrupt */
241 regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
242 if (regval & SPI_INTR) {
243 /* Clear xfer_done */
244 complete(&p->spi_xfer_done);
245 spi_int_fired = IRQ_HANDLED;
246 }
247
248 writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
249
250 return spi_int_fired;
251}
252
253static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
254{
255 u8 hw_inst_cnt, iter, start, only_sec_inst;
256 struct pci1xxxx_spi_internal *spi_sub_ptr;
257 struct device *dev = &pdev->dev;
258 struct pci1xxxx_spi *spi_bus;
259 struct spi_master *spi_host;
260 u32 regval;
261 int ret;
262
263 hw_inst_cnt = ent->driver_data & 0x0f;
264 start = (ent->driver_data & 0xf0) >> 4;
265 if (start == 1)
266 only_sec_inst = 1;
267 else
268 only_sec_inst = 0;
269
270 spi_bus = devm_kzalloc(&pdev->dev,
271 struct_size(spi_bus, spi_int, hw_inst_cnt),
272 GFP_KERNEL);
273 if (!spi_bus)
274 return -ENOMEM;
275
276 spi_bus->dev = pdev;
277 spi_bus->total_hw_instances = hw_inst_cnt;
278 pci_set_master(pdev);
279
280 for (iter = 0; iter < hw_inst_cnt; iter++) {
281 spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
282 sizeof(struct pci1xxxx_spi_internal),
283 GFP_KERNEL);
284 spi_sub_ptr = spi_bus->spi_int[iter];
285 spi_sub_ptr->spi_host = devm_spi_alloc_master(dev, sizeof(struct spi_master));
286 if (!spi_sub_ptr->spi_host)
287 return -ENOMEM;
288
289 spi_sub_ptr->parent = spi_bus;
290 spi_sub_ptr->spi_xfer_in_progress = false;
291
292 if (!iter) {
293 ret = pcim_enable_device(pdev);
294 if (ret)
295 return -ENOMEM;
296
297 ret = pci_request_regions(pdev, DRV_NAME);
298 if (ret)
299 return -ENOMEM;
300
301 spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
302 if (!spi_bus->reg_base) {
303 ret = -EINVAL;
304 goto error;
305 }
306
307 ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt,
308 PCI_IRQ_ALL_TYPES);
309 if (ret < 0) {
310 dev_err(&pdev->dev, "Error allocating MSI vectors\n");
311 goto error;
312 }
313
314 init_completion(&spi_sub_ptr->spi_xfer_done);
315 /* Initialize Interrupts - SPI_INT */
316 regval = readl(spi_bus->reg_base +
317 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
318 regval &= ~SPI_INTR;
319 writel(regval, spi_bus->reg_base +
320 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
321 spi_sub_ptr->irq = pci_irq_vector(pdev, 0);
322
323 ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
324 pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
325 pci_name(pdev), spi_sub_ptr);
326 if (ret < 0) {
327 dev_err(&pdev->dev, "Unable to request irq : %d",
328 spi_sub_ptr->irq);
329 ret = -ENODEV;
330 goto error;
331 }
332
333 /* This register is only applicable for 1st instance */
334 regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
335 if (!only_sec_inst)
336 regval |= (BIT(4));
337 else
338 regval &= ~(BIT(4));
339
340 writel(regval, spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
341 }
342
343 spi_sub_ptr->hw_inst = start++;
344
345 if (iter == 1) {
346 init_completion(&spi_sub_ptr->spi_xfer_done);
347 /* Initialize Interrupts - SPI_INT */
348 regval = readl(spi_bus->reg_base +
349 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
350 regval &= ~SPI_INTR;
351 writel(regval, spi_bus->reg_base +
352 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
353 spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
354 ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
355 pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
356 pci_name(pdev), spi_sub_ptr);
357 if (ret < 0) {
358 dev_err(&pdev->dev, "Unable to request irq : %d",
359 spi_sub_ptr->irq);
360 ret = -ENODEV;
361 goto error;
362 }
363 }
364
365 spi_host = spi_sub_ptr->spi_host;
366 spi_host->num_chipselect = SPI_CHIP_SEL_COUNT;
367 spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL |
368 SPI_TX_DUAL | SPI_LOOP;
369 spi_host->transfer_one = pci1xxxx_spi_transfer_one;
370 spi_host->set_cs = pci1xxxx_spi_set_cs;
371 spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
372 spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;
373 spi_host->min_speed_hz = PCI1XXXX_SPI_MIN_CLOCK_HZ;
374 spi_host->flags = SPI_MASTER_MUST_TX;
375 spi_master_set_devdata(spi_host, spi_sub_ptr);
376 ret = devm_spi_register_master(dev, spi_host);
377 if (ret)
378 goto error;
379 }
380 pci_set_drvdata(pdev, spi_bus);
381
382 return 0;
383
384error:
385 pci_release_regions(pdev);
386 return ret;
387}
388
389static void store_restore_config(struct pci1xxxx_spi *spi_ptr,
390 struct pci1xxxx_spi_internal *spi_sub_ptr,
391 u8 inst, bool store)
392{
393 u32 regval;
394
395 if (store) {
396 regval = readl(spi_ptr->reg_base +
397 SPI_MST_CTL_REG_OFFSET(spi_sub_ptr->hw_inst));
398 regval &= SPI_MST_CTL_DEVSEL_MASK;
399 spi_sub_ptr->prev_val.dev_sel = (regval >> 25) & 7;
400 regval = readl(spi_ptr->reg_base +
401 SPI_PCI_CTRL_REG_OFFSET(spi_sub_ptr->hw_inst));
402 regval &= SPI_MSI_VECTOR_SEL_MASK;
403 spi_sub_ptr->prev_val.msi_vector_sel = (regval >> 4) & 1;
404 } else {
405 regval = readl(spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
406 regval &= ~SPI_MST_CTL_DEVSEL_MASK;
407 regval |= (spi_sub_ptr->prev_val.dev_sel << 25);
408 writel(regval,
409 spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
410 writel((spi_sub_ptr->prev_val.msi_vector_sel << 4),
411 spi_ptr->reg_base + SPI_PCI_CTRL_REG_OFFSET(inst));
412 }
413}
414
415static int pci1xxxx_spi_resume(struct device *dev)
416{
417 struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
418 struct pci1xxxx_spi_internal *spi_sub_ptr;
419 u32 regval = SPI_RESUME_CONFIG;
420 u8 iter;
421
422 for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
423 spi_sub_ptr = spi_ptr->spi_int[iter];
424 spi_master_resume(spi_sub_ptr->spi_host);
425 writel(regval, spi_ptr->reg_base +
426 SPI_MST_EVENT_MASK_REG_OFFSET(iter));
427
428 /* Restore config at resume */
429 store_restore_config(spi_ptr, spi_sub_ptr, iter, 0);
430 }
431
432 return 0;
433}
434
435static int pci1xxxx_spi_suspend(struct device *dev)
436{
437 struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
438 struct pci1xxxx_spi_internal *spi_sub_ptr;
439 u32 reg1 = SPI_SUSPEND_CONFIG;
440 u8 iter;
441
442 for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
443 spi_sub_ptr = spi_ptr->spi_int[iter];
444
445 while (spi_sub_ptr->spi_xfer_in_progress)
446 msleep(20);
447
448 /* Store existing config before suspend */
449 store_restore_config(spi_ptr, spi_sub_ptr, iter, 1);
450 spi_master_suspend(spi_sub_ptr->spi_host);
451 writel(reg1, spi_ptr->reg_base +
452 SPI_MST_EVENT_MASK_REG_OFFSET(iter));
453 }
454
455 return 0;
456}
457
458static DEFINE_SIMPLE_DEV_PM_OPS(spi_pm_ops, pci1xxxx_spi_suspend,
459 pci1xxxx_spi_resume);
460
461static struct pci_driver pci1xxxx_spi_driver = {
462 .name = DRV_NAME,
463 .id_table = pci1xxxx_spi_pci_id_table,
464 .probe = pci1xxxx_spi_probe,
465 .driver = {
466 .pm = pm_sleep_ptr(&spi_pm_ops),
467 },
468};
469
470module_pci_driver(pci1xxxx_spi_driver);
471
472MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx SPI bus driver");
473MODULE_AUTHOR("Tharun Kumar P<tharunkumar.pasumarthi@microchip.com>");
474MODULE_AUTHOR("Kumaravel Thiagarajan<kumaravel.thiagarajan@microchip.com>");
475MODULE_LICENSE("GPL v2");