Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for the Intel SCU IPC mechanism
4 *
5 * (C) Copyright 2008-2010,2015 Intel Corporation
6 * Author: Sreedhara DS (sreedhara.ds@intel.com)
7 *
8 * SCU running in ARC processor communicates with other entity running in IA
9 * core through IPC mechanism which in turn messaging between IA core ad SCU.
10 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
11 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
12 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
13 * along with other APIs.
14 */
15
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24
25#include <asm/intel_scu_ipc.h>
26
27/* IPC defines the following message types */
28#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
29
30/* Command id associated with message IPCMSG_PCNTRL */
31#define IPC_CMD_PCNTRL_W 0 /* Register write */
32#define IPC_CMD_PCNTRL_R 1 /* Register read */
33#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
34
35/*
36 * IPC register summary
37 *
38 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
39 * To read or write information to the SCU, driver writes to IPC-1 memory
40 * mapped registers. The following is the IPC mechanism
41 *
42 * 1. IA core cDMI interface claims this transaction and converts it to a
43 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
44 *
45 * 2. South Complex cDMI block receives this message and writes it to
46 * the IPC-1 register block, causing an interrupt to the SCU
47 *
48 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
49 * message handler is called within firmware.
50 */
51
52#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
53#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
54#define IPC_IOC 0x100 /* IPC command register IOC bit */
55
56struct intel_scu_ipc_dev {
57 struct device dev;
58 struct resource mem;
59 struct module *owner;
60 int irq;
61 void __iomem *ipc_base;
62 struct completion cmd_complete;
63};
64
65#define IPC_STATUS 0x04
66#define IPC_STATUS_IRQ BIT(2)
67#define IPC_STATUS_ERR BIT(1)
68#define IPC_STATUS_BUSY BIT(0)
69
70/*
71 * IPC Write/Read Buffers:
72 * 16 byte buffer for sending and receiving data to and from SCU.
73 */
74#define IPC_WRITE_BUFFER 0x80
75#define IPC_READ_BUFFER 0x90
76
77/* Timeout in jiffies */
78#define IPC_TIMEOUT (10 * HZ)
79
80static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
81static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
82
83static struct class intel_scu_ipc_class = {
84 .name = "intel_scu_ipc",
85 .owner = THIS_MODULE,
86};
87
88/**
89 * intel_scu_ipc_dev_get() - Get SCU IPC instance
90 *
91 * The recommended new API takes SCU IPC instance as parameter and this
92 * function can be called by driver to get the instance. This also makes
93 * sure the driver providing the IPC functionality cannot be unloaded
94 * while the caller has the instance.
95 *
96 * Call intel_scu_ipc_dev_put() to release the instance.
97 *
98 * Returns %NULL if SCU IPC is not currently available.
99 */
100struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
101{
102 struct intel_scu_ipc_dev *scu = NULL;
103
104 mutex_lock(&ipclock);
105 if (ipcdev) {
106 get_device(&ipcdev->dev);
107 /*
108 * Prevent the IPC provider from being unloaded while it
109 * is being used.
110 */
111 if (!try_module_get(ipcdev->owner))
112 put_device(&ipcdev->dev);
113 else
114 scu = ipcdev;
115 }
116
117 mutex_unlock(&ipclock);
118 return scu;
119}
120EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
121
122/**
123 * intel_scu_ipc_dev_put() - Put SCU IPC instance
124 * @scu: SCU IPC instance
125 *
126 * This function releases the SCU IPC instance retrieved from
127 * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
128 * unloaded.
129 */
130void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
131{
132 if (scu) {
133 module_put(scu->owner);
134 put_device(&scu->dev);
135 }
136}
137EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
138
139struct intel_scu_ipc_devres {
140 struct intel_scu_ipc_dev *scu;
141};
142
143static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
144{
145 struct intel_scu_ipc_devres *dr = res;
146 struct intel_scu_ipc_dev *scu = dr->scu;
147
148 intel_scu_ipc_dev_put(scu);
149}
150
151/**
152 * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
153 * @dev: Device requesting the SCU IPC device
154 *
155 * The recommended new API takes SCU IPC instance as parameter and this
156 * function can be called by driver to get the instance. This also makes
157 * sure the driver providing the IPC functionality cannot be unloaded
158 * while the caller has the instance.
159 *
160 * Returns %NULL if SCU IPC is not currently available.
161 */
162struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
163{
164 struct intel_scu_ipc_devres *dr;
165 struct intel_scu_ipc_dev *scu;
166
167 dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
168 if (!dr)
169 return NULL;
170
171 scu = intel_scu_ipc_dev_get();
172 if (!scu) {
173 devres_free(dr);
174 return NULL;
175 }
176
177 dr->scu = scu;
178 devres_add(dev, dr);
179
180 return scu;
181}
182EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
183
184/*
185 * Send ipc command
186 * Command Register (Write Only):
187 * A write to this register results in an interrupt to the SCU core processor
188 * Format:
189 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
190 */
191static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
192{
193 reinit_completion(&scu->cmd_complete);
194 writel(cmd | IPC_IOC, scu->ipc_base);
195}
196
197/*
198 * Write ipc data
199 * IPC Write Buffer (Write Only):
200 * 16-byte buffer for sending data associated with IPC command to
201 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
202 */
203static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
204{
205 writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
206}
207
208/*
209 * Status Register (Read Only):
210 * Driver will read this register to get the ready/busy status of the IPC
211 * block and error status of the IPC command that was just processed by SCU
212 * Format:
213 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
214 */
215static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
216{
217 return __raw_readl(scu->ipc_base + IPC_STATUS);
218}
219
220/* Read ipc byte data */
221static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
222{
223 return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
224}
225
226/* Read ipc u32 data */
227static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
228{
229 return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
230}
231
232/* Wait till scu status is busy */
233static inline int busy_loop(struct intel_scu_ipc_dev *scu)
234{
235 unsigned long end = jiffies + IPC_TIMEOUT;
236
237 do {
238 u32 status;
239
240 status = ipc_read_status(scu);
241 if (!(status & IPC_STATUS_BUSY))
242 return (status & IPC_STATUS_ERR) ? -EIO : 0;
243
244 usleep_range(50, 100);
245 } while (time_before(jiffies, end));
246
247 return -ETIMEDOUT;
248}
249
250/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
251static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
252{
253 int status;
254
255 if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
256 return -ETIMEDOUT;
257
258 status = ipc_read_status(scu);
259 if (status & IPC_STATUS_ERR)
260 return -EIO;
261
262 return 0;
263}
264
265static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
266{
267 return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
268}
269
270/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
271static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
272 u32 count, u32 op, u32 id)
273{
274 int nc;
275 u32 offset = 0;
276 int err;
277 u8 cbuf[IPC_WWBUF_SIZE];
278 u32 *wbuf = (u32 *)&cbuf;
279
280 memset(cbuf, 0, sizeof(cbuf));
281
282 mutex_lock(&ipclock);
283 if (!scu)
284 scu = ipcdev;
285 if (!scu) {
286 mutex_unlock(&ipclock);
287 return -ENODEV;
288 }
289
290 for (nc = 0; nc < count; nc++, offset += 2) {
291 cbuf[offset] = addr[nc];
292 cbuf[offset + 1] = addr[nc] >> 8;
293 }
294
295 if (id == IPC_CMD_PCNTRL_R) {
296 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
297 ipc_data_writel(scu, wbuf[nc], offset);
298 ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
299 } else if (id == IPC_CMD_PCNTRL_W) {
300 for (nc = 0; nc < count; nc++, offset += 1)
301 cbuf[offset] = data[nc];
302 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
303 ipc_data_writel(scu, wbuf[nc], offset);
304 ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
305 } else if (id == IPC_CMD_PCNTRL_M) {
306 cbuf[offset] = data[0];
307 cbuf[offset + 1] = data[1];
308 ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
309 ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
310 }
311
312 err = intel_scu_ipc_check_status(scu);
313 if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
314 /* Workaround: values are read as 0 without memcpy_fromio */
315 memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
316 for (nc = 0; nc < count; nc++)
317 data[nc] = ipc_data_readb(scu, nc);
318 }
319 mutex_unlock(&ipclock);
320 return err;
321}
322
323/**
324 * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
325 * @scu: Optional SCU IPC instance
326 * @addr: Register on SCU
327 * @data: Return pointer for read byte
328 *
329 * Read a single register. Returns %0 on success or an error code. All
330 * locking between SCU accesses is handled for the caller.
331 *
332 * This function may sleep.
333 */
334int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
335{
336 return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
337}
338EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
339
340/**
341 * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
342 * @scu: Optional SCU IPC instance
343 * @addr: Register on SCU
344 * @data: Byte to write
345 *
346 * Write a single register. Returns %0 on success or an error code. All
347 * locking between SCU accesses is handled for the caller.
348 *
349 * This function may sleep.
350 */
351int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
352{
353 return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
354}
355EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
356
357/**
358 * intel_scu_ipc_dev_readv() - Read a set of registers
359 * @scu: Optional SCU IPC instance
360 * @addr: Register list
361 * @data: Bytes to return
362 * @len: Length of array
363 *
364 * Read registers. Returns %0 on success or an error code. All locking
365 * between SCU accesses is handled for the caller.
366 *
367 * The largest array length permitted by the hardware is 5 items.
368 *
369 * This function may sleep.
370 */
371int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
372 size_t len)
373{
374 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
375}
376EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
377
378/**
379 * intel_scu_ipc_dev_writev() - Write a set of registers
380 * @scu: Optional SCU IPC instance
381 * @addr: Register list
382 * @data: Bytes to write
383 * @len: Length of array
384 *
385 * Write registers. Returns %0 on success or an error code. All locking
386 * between SCU accesses is handled for the caller.
387 *
388 * The largest array length permitted by the hardware is 5 items.
389 *
390 * This function may sleep.
391 */
392int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
393 size_t len)
394{
395 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
396}
397EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
398
399/**
400 * intel_scu_ipc_dev_update() - Update a register
401 * @scu: Optional SCU IPC instance
402 * @addr: Register address
403 * @data: Bits to update
404 * @mask: Mask of bits to update
405 *
406 * Read-modify-write power control unit register. The first data argument
407 * must be register value and second is mask value mask is a bitmap that
408 * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
409 * modify this bit. returns %0 on success or an error code.
410 *
411 * This function may sleep. Locking between SCU accesses is handled
412 * for the caller.
413 */
414int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
415 u8 mask)
416{
417 u8 tmp[2] = { data, mask };
418 return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
419}
420EXPORT_SYMBOL(intel_scu_ipc_dev_update);
421
422/**
423 * intel_scu_ipc_dev_simple_command() - Send a simple command
424 * @scu: Optional SCU IPC instance
425 * @cmd: Command
426 * @sub: Sub type
427 *
428 * Issue a simple command to the SCU. Do not use this interface if you must
429 * then access data as any data values may be overwritten by another SCU
430 * access by the time this function returns.
431 *
432 * This function may sleep. Locking for SCU accesses is handled for the
433 * caller.
434 */
435int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
436 int sub)
437{
438 u32 cmdval;
439 int err;
440
441 mutex_lock(&ipclock);
442 if (!scu)
443 scu = ipcdev;
444 if (!scu) {
445 mutex_unlock(&ipclock);
446 return -ENODEV;
447 }
448 scu = ipcdev;
449 cmdval = sub << 12 | cmd;
450 ipc_command(scu, cmdval);
451 err = intel_scu_ipc_check_status(scu);
452 mutex_unlock(&ipclock);
453 if (err)
454 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
455 return err;
456}
457EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
458
459/**
460 * intel_scu_ipc_dev_command_with_size() - Command with data
461 * @scu: Optional SCU IPC instance
462 * @cmd: Command
463 * @sub: Sub type
464 * @in: Input data
465 * @inlen: Input length in bytes
466 * @size: Input size written to the IPC command register in whatever
467 * units (dword, byte) the particular firmware requires. Normally
468 * should be the same as @inlen.
469 * @out: Output data
470 * @outlen: Output length in bytes
471 *
472 * Issue a command to the SCU which involves data transfers. Do the
473 * data copies under the lock but leave it for the caller to interpret.
474 */
475int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
476 int sub, const void *in, size_t inlen,
477 size_t size, void *out, size_t outlen)
478{
479 size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
480 size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
481 u32 cmdval, inbuf[4] = {};
482 int i, err;
483
484 if (inbuflen > 4 || outbuflen > 4)
485 return -EINVAL;
486
487 mutex_lock(&ipclock);
488 if (!scu)
489 scu = ipcdev;
490 if (!scu) {
491 mutex_unlock(&ipclock);
492 return -ENODEV;
493 }
494
495 memcpy(inbuf, in, inlen);
496 for (i = 0; i < inbuflen; i++)
497 ipc_data_writel(scu, inbuf[i], 4 * i);
498
499 cmdval = (size << 16) | (sub << 12) | cmd;
500 ipc_command(scu, cmdval);
501 err = intel_scu_ipc_check_status(scu);
502
503 if (!err) {
504 u32 outbuf[4] = {};
505
506 for (i = 0; i < outbuflen; i++)
507 outbuf[i] = ipc_data_readl(scu, 4 * i);
508
509 memcpy(out, outbuf, outlen);
510 }
511
512 mutex_unlock(&ipclock);
513 if (err)
514 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
515 return err;
516}
517EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
518
519/*
520 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
521 * When ioc bit is set to 1, caller api must wait for interrupt handler called
522 * which in turn unlocks the caller api. Currently this is not used
523 *
524 * This is edge triggered so we need take no action to clear anything
525 */
526static irqreturn_t ioc(int irq, void *dev_id)
527{
528 struct intel_scu_ipc_dev *scu = dev_id;
529 int status = ipc_read_status(scu);
530
531 writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
532 complete(&scu->cmd_complete);
533
534 return IRQ_HANDLED;
535}
536
537static void intel_scu_ipc_release(struct device *dev)
538{
539 struct intel_scu_ipc_dev *scu;
540
541 scu = container_of(dev, struct intel_scu_ipc_dev, dev);
542 if (scu->irq > 0)
543 free_irq(scu->irq, scu);
544 iounmap(scu->ipc_base);
545 release_mem_region(scu->mem.start, resource_size(&scu->mem));
546 kfree(scu);
547}
548
549/**
550 * __intel_scu_ipc_register() - Register SCU IPC device
551 * @parent: Parent device
552 * @scu_data: Data used to configure SCU IPC
553 * @owner: Module registering the SCU IPC device
554 *
555 * Call this function to register SCU IPC mechanism under @parent.
556 * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
557 * failure. The caller may use the returned instance if it needs to do
558 * SCU IPC calls itself.
559 */
560struct intel_scu_ipc_dev *
561__intel_scu_ipc_register(struct device *parent,
562 const struct intel_scu_ipc_data *scu_data,
563 struct module *owner)
564{
565 int err;
566 struct intel_scu_ipc_dev *scu;
567 void __iomem *ipc_base;
568
569 mutex_lock(&ipclock);
570 /* We support only one IPC */
571 if (ipcdev) {
572 err = -EBUSY;
573 goto err_unlock;
574 }
575
576 scu = kzalloc(sizeof(*scu), GFP_KERNEL);
577 if (!scu) {
578 err = -ENOMEM;
579 goto err_unlock;
580 }
581
582 scu->owner = owner;
583 scu->dev.parent = parent;
584 scu->dev.class = &intel_scu_ipc_class;
585 scu->dev.release = intel_scu_ipc_release;
586
587 if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
588 "intel_scu_ipc")) {
589 err = -EBUSY;
590 goto err_free;
591 }
592
593 ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
594 if (!ipc_base) {
595 err = -ENOMEM;
596 goto err_release;
597 }
598
599 scu->ipc_base = ipc_base;
600 scu->mem = scu_data->mem;
601 scu->irq = scu_data->irq;
602 init_completion(&scu->cmd_complete);
603
604 if (scu->irq > 0) {
605 err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
606 if (err)
607 goto err_unmap;
608 }
609
610 /*
611 * After this point intel_scu_ipc_release() takes care of
612 * releasing the SCU IPC resources once refcount drops to zero.
613 */
614 dev_set_name(&scu->dev, "intel_scu_ipc");
615 err = device_register(&scu->dev);
616 if (err) {
617 put_device(&scu->dev);
618 goto err_unlock;
619 }
620
621 /* Assign device at last */
622 ipcdev = scu;
623 mutex_unlock(&ipclock);
624
625 return scu;
626
627err_unmap:
628 iounmap(ipc_base);
629err_release:
630 release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
631err_free:
632 kfree(scu);
633err_unlock:
634 mutex_unlock(&ipclock);
635
636 return ERR_PTR(err);
637}
638EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
639
640/**
641 * intel_scu_ipc_unregister() - Unregister SCU IPC
642 * @scu: SCU IPC handle
643 *
644 * This unregisters the SCU IPC device and releases the acquired
645 * resources once the refcount goes to zero.
646 */
647void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
648{
649 mutex_lock(&ipclock);
650 if (!WARN_ON(!ipcdev)) {
651 ipcdev = NULL;
652 device_unregister(&scu->dev);
653 }
654 mutex_unlock(&ipclock);
655}
656EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
657
658static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
659{
660 struct intel_scu_ipc_devres *dr = res;
661 struct intel_scu_ipc_dev *scu = dr->scu;
662
663 intel_scu_ipc_unregister(scu);
664}
665
666/**
667 * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
668 * @parent: Parent device
669 * @scu_data: Data used to configure SCU IPC
670 * @owner: Module registering the SCU IPC device
671 *
672 * Call this function to register managed SCU IPC mechanism under
673 * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
674 * case of failure. The caller may use the returned instance if it needs
675 * to do SCU IPC calls itself.
676 */
677struct intel_scu_ipc_dev *
678__devm_intel_scu_ipc_register(struct device *parent,
679 const struct intel_scu_ipc_data *scu_data,
680 struct module *owner)
681{
682 struct intel_scu_ipc_devres *dr;
683 struct intel_scu_ipc_dev *scu;
684
685 dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
686 if (!dr)
687 return NULL;
688
689 scu = __intel_scu_ipc_register(parent, scu_data, owner);
690 if (IS_ERR(scu)) {
691 devres_free(dr);
692 return scu;
693 }
694
695 dr->scu = scu;
696 devres_add(parent, dr);
697
698 return scu;
699}
700EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
701
702static int __init intel_scu_ipc_init(void)
703{
704 return class_register(&intel_scu_ipc_class);
705}
706subsys_initcall(intel_scu_ipc_init);
707
708static void __exit intel_scu_ipc_exit(void)
709{
710 class_unregister(&intel_scu_ipc_class);
711}
712module_exit(intel_scu_ipc_exit);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for the Intel SCU IPC mechanism
4 *
5 * (C) Copyright 2008-2010,2015 Intel Corporation
6 * Author: Sreedhara DS (sreedhara.ds@intel.com)
7 *
8 * SCU running in ARC processor communicates with other entity running in IA
9 * core through IPC mechanism which in turn messaging between IA core ad SCU.
10 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
11 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
12 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
13 * along with other APIs.
14 */
15
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/pm.h>
23#include <linux/sfi.h>
24
25#include <asm/intel-mid.h>
26#include <asm/intel_scu_ipc.h>
27
28/* IPC defines the following message types */
29#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
30#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
31#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
32#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
33#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
34
35/* Command id associated with message IPCMSG_PCNTRL */
36#define IPC_CMD_PCNTRL_W 0 /* Register write */
37#define IPC_CMD_PCNTRL_R 1 /* Register read */
38#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
39
40/*
41 * IPC register summary
42 *
43 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
44 * To read or write information to the SCU, driver writes to IPC-1 memory
45 * mapped registers. The following is the IPC mechanism
46 *
47 * 1. IA core cDMI interface claims this transaction and converts it to a
48 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
49 *
50 * 2. South Complex cDMI block receives this message and writes it to
51 * the IPC-1 register block, causing an interrupt to the SCU
52 *
53 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
54 * message handler is called within firmware.
55 */
56
57#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
58#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
59#define IPC_IOC 0x100 /* IPC command register IOC bit */
60
61#define PCI_DEVICE_ID_LINCROFT 0x082a
62#define PCI_DEVICE_ID_PENWELL 0x080e
63#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
64#define PCI_DEVICE_ID_TANGIER 0x11a0
65
66/* intel scu ipc driver data */
67struct intel_scu_ipc_pdata_t {
68 u32 i2c_base;
69 u32 i2c_len;
70 u8 irq_mode;
71};
72
73static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
74 .i2c_base = 0xff12b000,
75 .i2c_len = 0x10,
76 .irq_mode = 0,
77};
78
79/* Penwell and Cloverview */
80static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
81 .i2c_base = 0xff12b000,
82 .i2c_len = 0x10,
83 .irq_mode = 1,
84};
85
86static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
87 .i2c_base = 0xff00d000,
88 .i2c_len = 0x10,
89 .irq_mode = 0,
90};
91
92struct intel_scu_ipc_dev {
93 struct device *dev;
94 void __iomem *ipc_base;
95 void __iomem *i2c_base;
96 struct completion cmd_complete;
97 u8 irq_mode;
98};
99
100static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
101
102/*
103 * IPC Read Buffer (Read Only):
104 * 16 byte buffer for receiving data from SCU, if IPC command
105 * processing results in response data
106 */
107#define IPC_READ_BUFFER 0x90
108
109#define IPC_I2C_CNTRL_ADDR 0
110#define I2C_DATA_ADDR 0x04
111
112static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
113
114/*
115 * Send ipc command
116 * Command Register (Write Only):
117 * A write to this register results in an interrupt to the SCU core processor
118 * Format:
119 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
120 */
121static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
122{
123 if (scu->irq_mode) {
124 reinit_completion(&scu->cmd_complete);
125 writel(cmd | IPC_IOC, scu->ipc_base);
126 }
127 writel(cmd, scu->ipc_base);
128}
129
130/*
131 * Write ipc data
132 * IPC Write Buffer (Write Only):
133 * 16-byte buffer for sending data associated with IPC command to
134 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
135 */
136static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
137{
138 writel(data, scu->ipc_base + 0x80 + offset);
139}
140
141/*
142 * Status Register (Read Only):
143 * Driver will read this register to get the ready/busy status of the IPC
144 * block and error status of the IPC command that was just processed by SCU
145 * Format:
146 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
147 */
148static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
149{
150 return __raw_readl(scu->ipc_base + 0x04);
151}
152
153/* Read ipc byte data */
154static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
155{
156 return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
157}
158
159/* Read ipc u32 data */
160static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
161{
162 return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
163}
164
165/* Wait till scu status is busy */
166static inline int busy_loop(struct intel_scu_ipc_dev *scu)
167{
168 u32 status = ipc_read_status(scu);
169 u32 loop_count = 100000;
170
171 /* break if scu doesn't reset busy bit after huge retry */
172 while ((status & BIT(0)) && --loop_count) {
173 udelay(1); /* scu processing time is in few u secods */
174 status = ipc_read_status(scu);
175 }
176
177 if (status & BIT(0)) {
178 dev_err(scu->dev, "IPC timed out");
179 return -ETIMEDOUT;
180 }
181
182 if (status & BIT(1))
183 return -EIO;
184
185 return 0;
186}
187
188/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
189static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
190{
191 int status;
192
193 if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) {
194 dev_err(scu->dev, "IPC timed out\n");
195 return -ETIMEDOUT;
196 }
197
198 status = ipc_read_status(scu);
199 if (status & BIT(1))
200 return -EIO;
201
202 return 0;
203}
204
205static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
206{
207 return scu->irq_mode ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
208}
209
210/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
211static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
212{
213 struct intel_scu_ipc_dev *scu = &ipcdev;
214 int nc;
215 u32 offset = 0;
216 int err;
217 u8 cbuf[IPC_WWBUF_SIZE];
218 u32 *wbuf = (u32 *)&cbuf;
219
220 memset(cbuf, 0, sizeof(cbuf));
221
222 mutex_lock(&ipclock);
223
224 if (scu->dev == NULL) {
225 mutex_unlock(&ipclock);
226 return -ENODEV;
227 }
228
229 for (nc = 0; nc < count; nc++, offset += 2) {
230 cbuf[offset] = addr[nc];
231 cbuf[offset + 1] = addr[nc] >> 8;
232 }
233
234 if (id == IPC_CMD_PCNTRL_R) {
235 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
236 ipc_data_writel(scu, wbuf[nc], offset);
237 ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
238 } else if (id == IPC_CMD_PCNTRL_W) {
239 for (nc = 0; nc < count; nc++, offset += 1)
240 cbuf[offset] = data[nc];
241 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
242 ipc_data_writel(scu, wbuf[nc], offset);
243 ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
244 } else if (id == IPC_CMD_PCNTRL_M) {
245 cbuf[offset] = data[0];
246 cbuf[offset + 1] = data[1];
247 ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
248 ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
249 }
250
251 err = intel_scu_ipc_check_status(scu);
252 if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
253 /* Workaround: values are read as 0 without memcpy_fromio */
254 memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
255 for (nc = 0; nc < count; nc++)
256 data[nc] = ipc_data_readb(scu, nc);
257 }
258 mutex_unlock(&ipclock);
259 return err;
260}
261
262/**
263 * intel_scu_ipc_ioread8 - read a word via the SCU
264 * @addr: register on SCU
265 * @data: return pointer for read byte
266 *
267 * Read a single register. Returns 0 on success or an error code. All
268 * locking between SCU accesses is handled for the caller.
269 *
270 * This function may sleep.
271 */
272int intel_scu_ipc_ioread8(u16 addr, u8 *data)
273{
274 return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
275}
276EXPORT_SYMBOL(intel_scu_ipc_ioread8);
277
278/**
279 * intel_scu_ipc_ioread16 - read a word via the SCU
280 * @addr: register on SCU
281 * @data: return pointer for read word
282 *
283 * Read a register pair. Returns 0 on success or an error code. All
284 * locking between SCU accesses is handled for the caller.
285 *
286 * This function may sleep.
287 */
288int intel_scu_ipc_ioread16(u16 addr, u16 *data)
289{
290 u16 x[2] = {addr, addr + 1};
291 return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
292}
293EXPORT_SYMBOL(intel_scu_ipc_ioread16);
294
295/**
296 * intel_scu_ipc_ioread32 - read a dword via the SCU
297 * @addr: register on SCU
298 * @data: return pointer for read dword
299 *
300 * Read four registers. Returns 0 on success or an error code. All
301 * locking between SCU accesses is handled for the caller.
302 *
303 * This function may sleep.
304 */
305int intel_scu_ipc_ioread32(u16 addr, u32 *data)
306{
307 u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
308 return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
309}
310EXPORT_SYMBOL(intel_scu_ipc_ioread32);
311
312/**
313 * intel_scu_ipc_iowrite8 - write a byte via the SCU
314 * @addr: register on SCU
315 * @data: byte to write
316 *
317 * Write a single register. Returns 0 on success or an error code. All
318 * locking between SCU accesses is handled for the caller.
319 *
320 * This function may sleep.
321 */
322int intel_scu_ipc_iowrite8(u16 addr, u8 data)
323{
324 return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
325}
326EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
327
328/**
329 * intel_scu_ipc_iowrite16 - write a word via the SCU
330 * @addr: register on SCU
331 * @data: word to write
332 *
333 * Write two registers. Returns 0 on success or an error code. All
334 * locking between SCU accesses is handled for the caller.
335 *
336 * This function may sleep.
337 */
338int intel_scu_ipc_iowrite16(u16 addr, u16 data)
339{
340 u16 x[2] = {addr, addr + 1};
341 return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
342}
343EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
344
345/**
346 * intel_scu_ipc_iowrite32 - write a dword via the SCU
347 * @addr: register on SCU
348 * @data: dword to write
349 *
350 * Write four registers. Returns 0 on success or an error code. All
351 * locking between SCU accesses is handled for the caller.
352 *
353 * This function may sleep.
354 */
355int intel_scu_ipc_iowrite32(u16 addr, u32 data)
356{
357 u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
358 return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
359}
360EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
361
362/**
363 * intel_scu_ipc_readvv - read a set of registers
364 * @addr: register list
365 * @data: bytes to return
366 * @len: length of array
367 *
368 * Read registers. Returns 0 on success or an error code. All
369 * locking between SCU accesses is handled for the caller.
370 *
371 * The largest array length permitted by the hardware is 5 items.
372 *
373 * This function may sleep.
374 */
375int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
376{
377 return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
378}
379EXPORT_SYMBOL(intel_scu_ipc_readv);
380
381/**
382 * intel_scu_ipc_writev - write a set of registers
383 * @addr: register list
384 * @data: bytes to write
385 * @len: length of array
386 *
387 * Write registers. Returns 0 on success or an error code. All
388 * locking between SCU accesses is handled for the caller.
389 *
390 * The largest array length permitted by the hardware is 5 items.
391 *
392 * This function may sleep.
393 *
394 */
395int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
396{
397 return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
398}
399EXPORT_SYMBOL(intel_scu_ipc_writev);
400
401/**
402 * intel_scu_ipc_update_register - r/m/w a register
403 * @addr: register address
404 * @bits: bits to update
405 * @mask: mask of bits to update
406 *
407 * Read-modify-write power control unit register. The first data argument
408 * must be register value and second is mask value
409 * mask is a bitmap that indicates which bits to update.
410 * 0 = masked. Don't modify this bit, 1 = modify this bit.
411 * returns 0 on success or an error code.
412 *
413 * This function may sleep. Locking between SCU accesses is handled
414 * for the caller.
415 */
416int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
417{
418 u8 data[2] = { bits, mask };
419 return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
420}
421EXPORT_SYMBOL(intel_scu_ipc_update_register);
422
423/**
424 * intel_scu_ipc_simple_command - send a simple command
425 * @cmd: command
426 * @sub: sub type
427 *
428 * Issue a simple command to the SCU. Do not use this interface if
429 * you must then access data as any data values may be overwritten
430 * by another SCU access by the time this function returns.
431 *
432 * This function may sleep. Locking for SCU accesses is handled for
433 * the caller.
434 */
435int intel_scu_ipc_simple_command(int cmd, int sub)
436{
437 struct intel_scu_ipc_dev *scu = &ipcdev;
438 int err;
439
440 mutex_lock(&ipclock);
441 if (scu->dev == NULL) {
442 mutex_unlock(&ipclock);
443 return -ENODEV;
444 }
445 ipc_command(scu, sub << 12 | cmd);
446 err = intel_scu_ipc_check_status(scu);
447 mutex_unlock(&ipclock);
448 return err;
449}
450EXPORT_SYMBOL(intel_scu_ipc_simple_command);
451
452/**
453 * intel_scu_ipc_command - command with data
454 * @cmd: command
455 * @sub: sub type
456 * @in: input data
457 * @inlen: input length in dwords
458 * @out: output data
459 * @outlein: output length in dwords
460 *
461 * Issue a command to the SCU which involves data transfers. Do the
462 * data copies under the lock but leave it for the caller to interpret
463 */
464int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
465 u32 *out, int outlen)
466{
467 struct intel_scu_ipc_dev *scu = &ipcdev;
468 int i, err;
469
470 mutex_lock(&ipclock);
471 if (scu->dev == NULL) {
472 mutex_unlock(&ipclock);
473 return -ENODEV;
474 }
475
476 for (i = 0; i < inlen; i++)
477 ipc_data_writel(scu, *in++, 4 * i);
478
479 ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
480 err = intel_scu_ipc_check_status(scu);
481
482 if (!err) {
483 for (i = 0; i < outlen; i++)
484 *out++ = ipc_data_readl(scu, 4 * i);
485 }
486
487 mutex_unlock(&ipclock);
488 return err;
489}
490EXPORT_SYMBOL(intel_scu_ipc_command);
491
492#define IPC_SPTR 0x08
493#define IPC_DPTR 0x0C
494
495/**
496 * intel_scu_ipc_raw_command() - IPC command with data and pointers
497 * @cmd: IPC command code.
498 * @sub: IPC command sub type.
499 * @in: input data of this IPC command.
500 * @inlen: input data length in dwords.
501 * @out: output data of this IPC command.
502 * @outlen: output data length in dwords.
503 * @sptr: data writing to SPTR register.
504 * @dptr: data writing to DPTR register.
505 *
506 * Send an IPC command to SCU with input/output data and source/dest pointers.
507 *
508 * Return: an IPC error code or 0 on success.
509 */
510int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
511 u32 *out, int outlen, u32 dptr, u32 sptr)
512{
513 struct intel_scu_ipc_dev *scu = &ipcdev;
514 int inbuflen = DIV_ROUND_UP(inlen, 4);
515 u32 inbuf[4];
516 int i, err;
517
518 /* Up to 16 bytes */
519 if (inbuflen > 4)
520 return -EINVAL;
521
522 mutex_lock(&ipclock);
523 if (scu->dev == NULL) {
524 mutex_unlock(&ipclock);
525 return -ENODEV;
526 }
527
528 writel(dptr, scu->ipc_base + IPC_DPTR);
529 writel(sptr, scu->ipc_base + IPC_SPTR);
530
531 /*
532 * SRAM controller doesn't support 8-bit writes, it only
533 * supports 32-bit writes, so we have to copy input data into
534 * the temporary buffer, and SCU FW will use the inlen to
535 * determine the actual input data length in the temporary
536 * buffer.
537 */
538 memcpy(inbuf, in, inlen);
539
540 for (i = 0; i < inbuflen; i++)
541 ipc_data_writel(scu, inbuf[i], 4 * i);
542
543 ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
544 err = intel_scu_ipc_check_status(scu);
545 if (!err) {
546 for (i = 0; i < outlen; i++)
547 *out++ = ipc_data_readl(scu, 4 * i);
548 }
549
550 mutex_unlock(&ipclock);
551 return err;
552}
553EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command);
554
555/* I2C commands */
556#define IPC_I2C_WRITE 1 /* I2C Write command */
557#define IPC_I2C_READ 2 /* I2C Read command */
558
559/**
560 * intel_scu_ipc_i2c_cntrl - I2C read/write operations
561 * @addr: I2C address + command bits
562 * @data: data to read/write
563 *
564 * Perform an an I2C read/write operation via the SCU. All locking is
565 * handled for the caller. This function may sleep.
566 *
567 * Returns an error code or 0 on success.
568 *
569 * This has to be in the IPC driver for the locking.
570 */
571int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
572{
573 struct intel_scu_ipc_dev *scu = &ipcdev;
574 u32 cmd = 0;
575
576 mutex_lock(&ipclock);
577 if (scu->dev == NULL) {
578 mutex_unlock(&ipclock);
579 return -ENODEV;
580 }
581 cmd = (addr >> 24) & 0xFF;
582 if (cmd == IPC_I2C_READ) {
583 writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
584 /* Write not getting updated without delay */
585 usleep_range(1000, 2000);
586 *data = readl(scu->i2c_base + I2C_DATA_ADDR);
587 } else if (cmd == IPC_I2C_WRITE) {
588 writel(*data, scu->i2c_base + I2C_DATA_ADDR);
589 usleep_range(1000, 2000);
590 writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
591 } else {
592 dev_err(scu->dev,
593 "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
594
595 mutex_unlock(&ipclock);
596 return -EIO;
597 }
598 mutex_unlock(&ipclock);
599 return 0;
600}
601EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
602
603/*
604 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
605 * When ioc bit is set to 1, caller api must wait for interrupt handler called
606 * which in turn unlocks the caller api. Currently this is not used
607 *
608 * This is edge triggered so we need take no action to clear anything
609 */
610static irqreturn_t ioc(int irq, void *dev_id)
611{
612 struct intel_scu_ipc_dev *scu = dev_id;
613
614 if (scu->irq_mode)
615 complete(&scu->cmd_complete);
616
617 return IRQ_HANDLED;
618}
619
620/**
621 * ipc_probe - probe an Intel SCU IPC
622 * @pdev: the PCI device matching
623 * @id: entry in the match table
624 *
625 * Enable and install an intel SCU IPC. This appears in the PCI space
626 * but uses some hard coded addresses as well.
627 */
628static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
629{
630 int err;
631 struct intel_scu_ipc_dev *scu = &ipcdev;
632 struct intel_scu_ipc_pdata_t *pdata;
633
634 if (scu->dev) /* We support only one SCU */
635 return -EBUSY;
636
637 pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
638 if (!pdata)
639 return -ENODEV;
640
641 scu->irq_mode = pdata->irq_mode;
642
643 err = pcim_enable_device(pdev);
644 if (err)
645 return err;
646
647 err = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
648 if (err)
649 return err;
650
651 init_completion(&scu->cmd_complete);
652
653 scu->ipc_base = pcim_iomap_table(pdev)[0];
654
655 scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
656 if (!scu->i2c_base)
657 return -ENOMEM;
658
659 err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
660 scu);
661 if (err)
662 return err;
663
664 /* Assign device at last */
665 scu->dev = &pdev->dev;
666
667 intel_scu_devices_create();
668
669 pci_set_drvdata(pdev, scu);
670 return 0;
671}
672
673#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata}
674
675static const struct pci_device_id pci_ids[] = {
676 SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata),
677 SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata),
678 SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata),
679 SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata),
680 {}
681};
682
683static struct pci_driver ipc_driver = {
684 .driver = {
685 .suppress_bind_attrs = true,
686 },
687 .name = "intel_scu_ipc",
688 .id_table = pci_ids,
689 .probe = ipc_probe,
690};
691builtin_pci_driver(ipc_driver);