Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for the Intel SCU IPC mechanism
4 *
5 * (C) Copyright 2008-2010,2015 Intel Corporation
6 * Author: Sreedhara DS (sreedhara.ds@intel.com)
7 *
8 * SCU running in ARC processor communicates with other entity running in IA
9 * core through IPC mechanism which in turn messaging between IA core ad SCU.
10 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
11 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
12 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
13 * along with other APIs.
14 */
15
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/iopoll.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25
26#include <asm/intel_scu_ipc.h>
27
28/* IPC defines the following message types */
29#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
30
31/* Command id associated with message IPCMSG_PCNTRL */
32#define IPC_CMD_PCNTRL_W 0 /* Register write */
33#define IPC_CMD_PCNTRL_R 1 /* Register read */
34#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
35
36/*
37 * IPC register summary
38 *
39 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
40 * To read or write information to the SCU, driver writes to IPC-1 memory
41 * mapped registers. The following is the IPC mechanism
42 *
43 * 1. IA core cDMI interface claims this transaction and converts it to a
44 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
45 *
46 * 2. South Complex cDMI block receives this message and writes it to
47 * the IPC-1 register block, causing an interrupt to the SCU
48 *
49 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
50 * message handler is called within firmware.
51 */
52
53#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
54#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
55#define IPC_IOC 0x100 /* IPC command register IOC bit */
56
57struct intel_scu_ipc_dev {
58 struct device dev;
59 struct resource mem;
60 struct module *owner;
61 int irq;
62 void __iomem *ipc_base;
63 struct completion cmd_complete;
64};
65
66#define IPC_STATUS 0x04
67#define IPC_STATUS_IRQ BIT(2)
68#define IPC_STATUS_ERR BIT(1)
69#define IPC_STATUS_BUSY BIT(0)
70
71/*
72 * IPC Write/Read Buffers:
73 * 16 byte buffer for sending and receiving data to and from SCU.
74 */
75#define IPC_WRITE_BUFFER 0x80
76#define IPC_READ_BUFFER 0x90
77
78/* Timeout in jiffies */
79#define IPC_TIMEOUT (10 * HZ)
80
81static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
82static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
83
84static struct class intel_scu_ipc_class = {
85 .name = "intel_scu_ipc",
86};
87
88/**
89 * intel_scu_ipc_dev_get() - Get SCU IPC instance
90 *
91 * The recommended new API takes SCU IPC instance as parameter and this
92 * function can be called by driver to get the instance. This also makes
93 * sure the driver providing the IPC functionality cannot be unloaded
94 * while the caller has the instance.
95 *
96 * Call intel_scu_ipc_dev_put() to release the instance.
97 *
98 * Returns %NULL if SCU IPC is not currently available.
99 */
100struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
101{
102 struct intel_scu_ipc_dev *scu = NULL;
103
104 mutex_lock(&ipclock);
105 if (ipcdev) {
106 get_device(&ipcdev->dev);
107 /*
108 * Prevent the IPC provider from being unloaded while it
109 * is being used.
110 */
111 if (!try_module_get(ipcdev->owner))
112 put_device(&ipcdev->dev);
113 else
114 scu = ipcdev;
115 }
116
117 mutex_unlock(&ipclock);
118 return scu;
119}
120EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
121
122/**
123 * intel_scu_ipc_dev_put() - Put SCU IPC instance
124 * @scu: SCU IPC instance
125 *
126 * This function releases the SCU IPC instance retrieved from
127 * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
128 * unloaded.
129 */
130void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
131{
132 if (scu) {
133 module_put(scu->owner);
134 put_device(&scu->dev);
135 }
136}
137EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
138
139struct intel_scu_ipc_devres {
140 struct intel_scu_ipc_dev *scu;
141};
142
143static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
144{
145 struct intel_scu_ipc_devres *dr = res;
146 struct intel_scu_ipc_dev *scu = dr->scu;
147
148 intel_scu_ipc_dev_put(scu);
149}
150
151/**
152 * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
153 * @dev: Device requesting the SCU IPC device
154 *
155 * The recommended new API takes SCU IPC instance as parameter and this
156 * function can be called by driver to get the instance. This also makes
157 * sure the driver providing the IPC functionality cannot be unloaded
158 * while the caller has the instance.
159 *
160 * Returns %NULL if SCU IPC is not currently available.
161 */
162struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
163{
164 struct intel_scu_ipc_devres *dr;
165 struct intel_scu_ipc_dev *scu;
166
167 dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
168 if (!dr)
169 return NULL;
170
171 scu = intel_scu_ipc_dev_get();
172 if (!scu) {
173 devres_free(dr);
174 return NULL;
175 }
176
177 dr->scu = scu;
178 devres_add(dev, dr);
179
180 return scu;
181}
182EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
183
184/*
185 * Send ipc command
186 * Command Register (Write Only):
187 * A write to this register results in an interrupt to the SCU core processor
188 * Format:
189 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
190 */
191static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
192{
193 reinit_completion(&scu->cmd_complete);
194 writel(cmd | IPC_IOC, scu->ipc_base);
195}
196
197/*
198 * Write ipc data
199 * IPC Write Buffer (Write Only):
200 * 16-byte buffer for sending data associated with IPC command to
201 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
202 */
203static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
204{
205 writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
206}
207
208/*
209 * Status Register (Read Only):
210 * Driver will read this register to get the ready/busy status of the IPC
211 * block and error status of the IPC command that was just processed by SCU
212 * Format:
213 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
214 */
215static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
216{
217 return __raw_readl(scu->ipc_base + IPC_STATUS);
218}
219
220/* Read ipc byte data */
221static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
222{
223 return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
224}
225
226/* Read ipc u32 data */
227static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
228{
229 return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
230}
231
232/* Wait till scu status is busy */
233static inline int busy_loop(struct intel_scu_ipc_dev *scu)
234{
235 u8 status;
236 int err;
237
238 err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
239 100, jiffies_to_usecs(IPC_TIMEOUT));
240 if (err)
241 return err;
242
243 return (status & IPC_STATUS_ERR) ? -EIO : 0;
244}
245
246/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
247static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
248{
249 int status;
250
251 wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
252
253 status = ipc_read_status(scu);
254 if (status & IPC_STATUS_BUSY)
255 return -ETIMEDOUT;
256
257 if (status & IPC_STATUS_ERR)
258 return -EIO;
259
260 return 0;
261}
262
263static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
264{
265 return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
266}
267
268static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
269{
270 u8 status;
271
272 if (!scu)
273 scu = ipcdev;
274 if (!scu)
275 return ERR_PTR(-ENODEV);
276
277 status = ipc_read_status(scu);
278 if (status & IPC_STATUS_BUSY) {
279 dev_dbg(&scu->dev, "device is busy\n");
280 return ERR_PTR(-EBUSY);
281 }
282
283 return scu;
284}
285
286/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
287static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
288 u32 count, u32 op, u32 id)
289{
290 int nc;
291 u32 offset = 0;
292 int err;
293 u8 cbuf[IPC_WWBUF_SIZE];
294 u32 *wbuf = (u32 *)&cbuf;
295
296 memset(cbuf, 0, sizeof(cbuf));
297
298 mutex_lock(&ipclock);
299 scu = intel_scu_ipc_get(scu);
300 if (IS_ERR(scu)) {
301 mutex_unlock(&ipclock);
302 return PTR_ERR(scu);
303 }
304
305 for (nc = 0; nc < count; nc++, offset += 2) {
306 cbuf[offset] = addr[nc];
307 cbuf[offset + 1] = addr[nc] >> 8;
308 }
309
310 if (id == IPC_CMD_PCNTRL_R) {
311 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
312 ipc_data_writel(scu, wbuf[nc], offset);
313 ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
314 } else if (id == IPC_CMD_PCNTRL_W) {
315 for (nc = 0; nc < count; nc++, offset += 1)
316 cbuf[offset] = data[nc];
317 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
318 ipc_data_writel(scu, wbuf[nc], offset);
319 ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
320 } else if (id == IPC_CMD_PCNTRL_M) {
321 cbuf[offset] = data[0];
322 cbuf[offset + 1] = data[1];
323 ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
324 ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
325 }
326
327 err = intel_scu_ipc_check_status(scu);
328 if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
329 /* Workaround: values are read as 0 without memcpy_fromio */
330 memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
331 for (nc = 0; nc < count; nc++)
332 data[nc] = ipc_data_readb(scu, nc);
333 }
334 mutex_unlock(&ipclock);
335 return err;
336}
337
338/**
339 * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
340 * @scu: Optional SCU IPC instance
341 * @addr: Register on SCU
342 * @data: Return pointer for read byte
343 *
344 * Read a single register. Returns %0 on success or an error code. All
345 * locking between SCU accesses is handled for the caller.
346 *
347 * This function may sleep.
348 */
349int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
350{
351 return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
352}
353EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
354
355/**
356 * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
357 * @scu: Optional SCU IPC instance
358 * @addr: Register on SCU
359 * @data: Byte to write
360 *
361 * Write a single register. Returns %0 on success or an error code. All
362 * locking between SCU accesses is handled for the caller.
363 *
364 * This function may sleep.
365 */
366int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
367{
368 return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
369}
370EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
371
372/**
373 * intel_scu_ipc_dev_readv() - Read a set of registers
374 * @scu: Optional SCU IPC instance
375 * @addr: Register list
376 * @data: Bytes to return
377 * @len: Length of array
378 *
379 * Read registers. Returns %0 on success or an error code. All locking
380 * between SCU accesses is handled for the caller.
381 *
382 * The largest array length permitted by the hardware is 5 items.
383 *
384 * This function may sleep.
385 */
386int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
387 size_t len)
388{
389 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
390}
391EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
392
393/**
394 * intel_scu_ipc_dev_writev() - Write a set of registers
395 * @scu: Optional SCU IPC instance
396 * @addr: Register list
397 * @data: Bytes to write
398 * @len: Length of array
399 *
400 * Write registers. Returns %0 on success or an error code. All locking
401 * between SCU accesses is handled for the caller.
402 *
403 * The largest array length permitted by the hardware is 5 items.
404 *
405 * This function may sleep.
406 */
407int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
408 size_t len)
409{
410 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
411}
412EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
413
414/**
415 * intel_scu_ipc_dev_update() - Update a register
416 * @scu: Optional SCU IPC instance
417 * @addr: Register address
418 * @data: Bits to update
419 * @mask: Mask of bits to update
420 *
421 * Read-modify-write power control unit register. The first data argument
422 * must be register value and second is mask value mask is a bitmap that
423 * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
424 * modify this bit. returns %0 on success or an error code.
425 *
426 * This function may sleep. Locking between SCU accesses is handled
427 * for the caller.
428 */
429int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
430 u8 mask)
431{
432 u8 tmp[2] = { data, mask };
433 return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
434}
435EXPORT_SYMBOL(intel_scu_ipc_dev_update);
436
437/**
438 * intel_scu_ipc_dev_simple_command() - Send a simple command
439 * @scu: Optional SCU IPC instance
440 * @cmd: Command
441 * @sub: Sub type
442 *
443 * Issue a simple command to the SCU. Do not use this interface if you must
444 * then access data as any data values may be overwritten by another SCU
445 * access by the time this function returns.
446 *
447 * This function may sleep. Locking for SCU accesses is handled for the
448 * caller.
449 */
450int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
451 int sub)
452{
453 u32 cmdval;
454 int err;
455
456 mutex_lock(&ipclock);
457 scu = intel_scu_ipc_get(scu);
458 if (IS_ERR(scu)) {
459 mutex_unlock(&ipclock);
460 return PTR_ERR(scu);
461 }
462
463 cmdval = sub << 12 | cmd;
464 ipc_command(scu, cmdval);
465 err = intel_scu_ipc_check_status(scu);
466 mutex_unlock(&ipclock);
467 if (err)
468 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
469 return err;
470}
471EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
472
473/**
474 * intel_scu_ipc_dev_command_with_size() - Command with data
475 * @scu: Optional SCU IPC instance
476 * @cmd: Command
477 * @sub: Sub type
478 * @in: Input data
479 * @inlen: Input length in bytes
480 * @size: Input size written to the IPC command register in whatever
481 * units (dword, byte) the particular firmware requires. Normally
482 * should be the same as @inlen.
483 * @out: Output data
484 * @outlen: Output length in bytes
485 *
486 * Issue a command to the SCU which involves data transfers. Do the
487 * data copies under the lock but leave it for the caller to interpret.
488 */
489int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
490 int sub, const void *in, size_t inlen,
491 size_t size, void *out, size_t outlen)
492{
493 size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
494 size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
495 u32 cmdval, inbuf[4] = {};
496 int i, err;
497
498 if (inbuflen > 4 || outbuflen > 4)
499 return -EINVAL;
500
501 mutex_lock(&ipclock);
502 scu = intel_scu_ipc_get(scu);
503 if (IS_ERR(scu)) {
504 mutex_unlock(&ipclock);
505 return PTR_ERR(scu);
506 }
507
508 memcpy(inbuf, in, inlen);
509 for (i = 0; i < inbuflen; i++)
510 ipc_data_writel(scu, inbuf[i], 4 * i);
511
512 cmdval = (size << 16) | (sub << 12) | cmd;
513 ipc_command(scu, cmdval);
514 err = intel_scu_ipc_check_status(scu);
515
516 if (!err) {
517 u32 outbuf[4] = {};
518
519 for (i = 0; i < outbuflen; i++)
520 outbuf[i] = ipc_data_readl(scu, 4 * i);
521
522 memcpy(out, outbuf, outlen);
523 }
524
525 mutex_unlock(&ipclock);
526 if (err)
527 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
528 return err;
529}
530EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
531
532/*
533 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
534 * When ioc bit is set to 1, caller api must wait for interrupt handler called
535 * which in turn unlocks the caller api. Currently this is not used
536 *
537 * This is edge triggered so we need take no action to clear anything
538 */
539static irqreturn_t ioc(int irq, void *dev_id)
540{
541 struct intel_scu_ipc_dev *scu = dev_id;
542 int status = ipc_read_status(scu);
543
544 writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
545 complete(&scu->cmd_complete);
546
547 return IRQ_HANDLED;
548}
549
550static void intel_scu_ipc_release(struct device *dev)
551{
552 struct intel_scu_ipc_dev *scu;
553
554 scu = container_of(dev, struct intel_scu_ipc_dev, dev);
555 if (scu->irq > 0)
556 free_irq(scu->irq, scu);
557 iounmap(scu->ipc_base);
558 release_mem_region(scu->mem.start, resource_size(&scu->mem));
559 kfree(scu);
560}
561
562/**
563 * __intel_scu_ipc_register() - Register SCU IPC device
564 * @parent: Parent device
565 * @scu_data: Data used to configure SCU IPC
566 * @owner: Module registering the SCU IPC device
567 *
568 * Call this function to register SCU IPC mechanism under @parent.
569 * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
570 * failure. The caller may use the returned instance if it needs to do
571 * SCU IPC calls itself.
572 */
573struct intel_scu_ipc_dev *
574__intel_scu_ipc_register(struct device *parent,
575 const struct intel_scu_ipc_data *scu_data,
576 struct module *owner)
577{
578 int err;
579 struct intel_scu_ipc_dev *scu;
580 void __iomem *ipc_base;
581
582 mutex_lock(&ipclock);
583 /* We support only one IPC */
584 if (ipcdev) {
585 err = -EBUSY;
586 goto err_unlock;
587 }
588
589 scu = kzalloc(sizeof(*scu), GFP_KERNEL);
590 if (!scu) {
591 err = -ENOMEM;
592 goto err_unlock;
593 }
594
595 scu->owner = owner;
596 scu->dev.parent = parent;
597 scu->dev.class = &intel_scu_ipc_class;
598 scu->dev.release = intel_scu_ipc_release;
599
600 if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
601 "intel_scu_ipc")) {
602 err = -EBUSY;
603 goto err_free;
604 }
605
606 ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
607 if (!ipc_base) {
608 err = -ENOMEM;
609 goto err_release;
610 }
611
612 scu->ipc_base = ipc_base;
613 scu->mem = scu_data->mem;
614 scu->irq = scu_data->irq;
615 init_completion(&scu->cmd_complete);
616
617 if (scu->irq > 0) {
618 err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
619 if (err)
620 goto err_unmap;
621 }
622
623 /*
624 * After this point intel_scu_ipc_release() takes care of
625 * releasing the SCU IPC resources once refcount drops to zero.
626 */
627 dev_set_name(&scu->dev, "intel_scu_ipc");
628 err = device_register(&scu->dev);
629 if (err) {
630 put_device(&scu->dev);
631 goto err_unlock;
632 }
633
634 /* Assign device at last */
635 ipcdev = scu;
636 mutex_unlock(&ipclock);
637
638 return scu;
639
640err_unmap:
641 iounmap(ipc_base);
642err_release:
643 release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
644err_free:
645 kfree(scu);
646err_unlock:
647 mutex_unlock(&ipclock);
648
649 return ERR_PTR(err);
650}
651EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
652
653/**
654 * intel_scu_ipc_unregister() - Unregister SCU IPC
655 * @scu: SCU IPC handle
656 *
657 * This unregisters the SCU IPC device and releases the acquired
658 * resources once the refcount goes to zero.
659 */
660void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
661{
662 mutex_lock(&ipclock);
663 if (!WARN_ON(!ipcdev)) {
664 ipcdev = NULL;
665 device_unregister(&scu->dev);
666 }
667 mutex_unlock(&ipclock);
668}
669EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
670
671static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
672{
673 struct intel_scu_ipc_devres *dr = res;
674 struct intel_scu_ipc_dev *scu = dr->scu;
675
676 intel_scu_ipc_unregister(scu);
677}
678
679/**
680 * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
681 * @parent: Parent device
682 * @scu_data: Data used to configure SCU IPC
683 * @owner: Module registering the SCU IPC device
684 *
685 * Call this function to register managed SCU IPC mechanism under
686 * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
687 * case of failure. The caller may use the returned instance if it needs
688 * to do SCU IPC calls itself.
689 */
690struct intel_scu_ipc_dev *
691__devm_intel_scu_ipc_register(struct device *parent,
692 const struct intel_scu_ipc_data *scu_data,
693 struct module *owner)
694{
695 struct intel_scu_ipc_devres *dr;
696 struct intel_scu_ipc_dev *scu;
697
698 dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
699 if (!dr)
700 return NULL;
701
702 scu = __intel_scu_ipc_register(parent, scu_data, owner);
703 if (IS_ERR(scu)) {
704 devres_free(dr);
705 return scu;
706 }
707
708 dr->scu = scu;
709 devres_add(parent, dr);
710
711 return scu;
712}
713EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
714
715static int __init intel_scu_ipc_init(void)
716{
717 return class_register(&intel_scu_ipc_class);
718}
719subsys_initcall(intel_scu_ipc_init);
720
721static void __exit intel_scu_ipc_exit(void)
722{
723 class_unregister(&intel_scu_ipc_class);
724}
725module_exit(intel_scu_ipc_exit);
1/*
2 * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
3 *
4 * (C) Copyright 2008-2010 Intel Corporation
5 * Author: Sreedhara DS (sreedhara.ds@intel.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * SCU running in ARC processor communicates with other entity running in IA
13 * core through IPC mechanism which in turn messaging between IA core ad SCU.
14 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
15 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
16 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
17 * along with other APIs.
18 */
19#include <linux/delay.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/device.h>
23#include <linux/pm.h>
24#include <linux/pci.h>
25#include <linux/interrupt.h>
26#include <linux/sfi.h>
27#include <linux/module.h>
28#include <asm/intel-mid.h>
29#include <asm/intel_scu_ipc.h>
30
31/* IPC defines the following message types */
32#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
33#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
34#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
35#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
36#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
37
38/* Command id associated with message IPCMSG_PCNTRL */
39#define IPC_CMD_PCNTRL_W 0 /* Register write */
40#define IPC_CMD_PCNTRL_R 1 /* Register read */
41#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
42
43/*
44 * IPC register summary
45 *
46 * IPC register blocks are memory mapped at fixed address of 0xFF11C000
47 * To read or write information to the SCU, driver writes to IPC-1 memory
48 * mapped registers (base address 0xFF11C000). The following is the IPC
49 * mechanism
50 *
51 * 1. IA core cDMI interface claims this transaction and converts it to a
52 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
53 *
54 * 2. South Complex cDMI block receives this message and writes it to
55 * the IPC-1 register block, causing an interrupt to the SCU
56 *
57 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
58 * message handler is called within firmware.
59 */
60
61#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
62#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
63#define IPC_IOC 0x100 /* IPC command register IOC bit */
64
65#define PCI_DEVICE_ID_LINCROFT 0x082a
66#define PCI_DEVICE_ID_PENWELL 0x080e
67#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
68#define PCI_DEVICE_ID_TANGIER 0x11a0
69
70/* intel scu ipc driver data*/
71struct intel_scu_ipc_pdata_t {
72 u32 ipc_base;
73 u32 i2c_base;
74 u32 ipc_len;
75 u32 i2c_len;
76 u8 irq_mode;
77};
78
79static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
80 .ipc_base = 0xff11c000,
81 .i2c_base = 0xff12b000,
82 .ipc_len = 0x100,
83 .i2c_len = 0x10,
84 .irq_mode = 0,
85};
86
87/* Penwell and Cloverview */
88static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
89 .ipc_base = 0xff11c000,
90 .i2c_base = 0xff12b000,
91 .ipc_len = 0x100,
92 .i2c_len = 0x10,
93 .irq_mode = 1,
94};
95
96static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
97 .ipc_base = 0xff009000,
98 .i2c_base = 0xff00d000,
99 .ipc_len = 0x100,
100 .i2c_len = 0x10,
101 .irq_mode = 0,
102};
103
104static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
105static void ipc_remove(struct pci_dev *pdev);
106
107struct intel_scu_ipc_dev {
108 struct pci_dev *pdev;
109 void __iomem *ipc_base;
110 void __iomem *i2c_base;
111 struct completion cmd_complete;
112 u8 irq_mode;
113};
114
115static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
116
117static int platform; /* Platform type */
118
119/*
120 * IPC Read Buffer (Read Only):
121 * 16 byte buffer for receiving data from SCU, if IPC command
122 * processing results in response data
123 */
124#define IPC_READ_BUFFER 0x90
125
126#define IPC_I2C_CNTRL_ADDR 0
127#define I2C_DATA_ADDR 0x04
128
129static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
130
131/*
132 * Command Register (Write Only):
133 * A write to this register results in an interrupt to the SCU core processor
134 * Format:
135 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
136 */
137static inline void ipc_command(u32 cmd) /* Send ipc command */
138{
139 if (ipcdev.irq_mode) {
140 reinit_completion(&ipcdev.cmd_complete);
141 writel(cmd | IPC_IOC, ipcdev.ipc_base);
142 }
143 writel(cmd, ipcdev.ipc_base);
144}
145
146/*
147 * IPC Write Buffer (Write Only):
148 * 16-byte buffer for sending data associated with IPC command to
149 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
150 */
151static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
152{
153 writel(data, ipcdev.ipc_base + 0x80 + offset);
154}
155
156/*
157 * Status Register (Read Only):
158 * Driver will read this register to get the ready/busy status of the IPC
159 * block and error status of the IPC command that was just processed by SCU
160 * Format:
161 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
162 */
163
164static inline u8 ipc_read_status(void)
165{
166 return __raw_readl(ipcdev.ipc_base + 0x04);
167}
168
169static inline u8 ipc_data_readb(u32 offset) /* Read ipc byte data */
170{
171 return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
172}
173
174static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */
175{
176 return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
177}
178
179static inline int busy_loop(void) /* Wait till scu status is busy */
180{
181 u32 status = 0;
182 u32 loop_count = 0;
183
184 status = ipc_read_status();
185 while (status & 1) {
186 udelay(1); /* scu processing time is in few u secods */
187 status = ipc_read_status();
188 loop_count++;
189 /* break if scu doesn't reset busy bit after huge retry */
190 if (loop_count > 100000) {
191 dev_err(&ipcdev.pdev->dev, "IPC timed out");
192 return -ETIMEDOUT;
193 }
194 }
195 if ((status >> 1) & 1)
196 return -EIO;
197
198 return 0;
199}
200
201/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
202static inline int ipc_wait_for_interrupt(void)
203{
204 int status;
205
206 if (!wait_for_completion_timeout(&ipcdev.cmd_complete, 3 * HZ)) {
207 struct device *dev = &ipcdev.pdev->dev;
208 dev_err(dev, "IPC timed out\n");
209 return -ETIMEDOUT;
210 }
211
212 status = ipc_read_status();
213
214 if ((status >> 1) & 1)
215 return -EIO;
216
217 return 0;
218}
219
220int intel_scu_ipc_check_status(void)
221{
222 return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop();
223}
224
225/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
226static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
227{
228 int nc;
229 u32 offset = 0;
230 int err;
231 u8 cbuf[IPC_WWBUF_SIZE] = { };
232 u32 *wbuf = (u32 *)&cbuf;
233
234 mutex_lock(&ipclock);
235
236 memset(cbuf, 0, sizeof(cbuf));
237
238 if (ipcdev.pdev == NULL) {
239 mutex_unlock(&ipclock);
240 return -ENODEV;
241 }
242
243 for (nc = 0; nc < count; nc++, offset += 2) {
244 cbuf[offset] = addr[nc];
245 cbuf[offset + 1] = addr[nc] >> 8;
246 }
247
248 if (id == IPC_CMD_PCNTRL_R) {
249 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
250 ipc_data_writel(wbuf[nc], offset);
251 ipc_command((count*2) << 16 | id << 12 | 0 << 8 | op);
252 } else if (id == IPC_CMD_PCNTRL_W) {
253 for (nc = 0; nc < count; nc++, offset += 1)
254 cbuf[offset] = data[nc];
255 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
256 ipc_data_writel(wbuf[nc], offset);
257 ipc_command((count*3) << 16 | id << 12 | 0 << 8 | op);
258 } else if (id == IPC_CMD_PCNTRL_M) {
259 cbuf[offset] = data[0];
260 cbuf[offset + 1] = data[1];
261 ipc_data_writel(wbuf[0], 0); /* Write wbuff */
262 ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
263 }
264
265 err = intel_scu_ipc_check_status();
266 if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
267 /* Workaround: values are read as 0 without memcpy_fromio */
268 memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16);
269 for (nc = 0; nc < count; nc++)
270 data[nc] = ipc_data_readb(nc);
271 }
272 mutex_unlock(&ipclock);
273 return err;
274}
275
276/**
277 * intel_scu_ipc_ioread8 - read a word via the SCU
278 * @addr: register on SCU
279 * @data: return pointer for read byte
280 *
281 * Read a single register. Returns 0 on success or an error code. All
282 * locking between SCU accesses is handled for the caller.
283 *
284 * This function may sleep.
285 */
286int intel_scu_ipc_ioread8(u16 addr, u8 *data)
287{
288 return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
289}
290EXPORT_SYMBOL(intel_scu_ipc_ioread8);
291
292/**
293 * intel_scu_ipc_ioread16 - read a word via the SCU
294 * @addr: register on SCU
295 * @data: return pointer for read word
296 *
297 * Read a register pair. Returns 0 on success or an error code. All
298 * locking between SCU accesses is handled for the caller.
299 *
300 * This function may sleep.
301 */
302int intel_scu_ipc_ioread16(u16 addr, u16 *data)
303{
304 u16 x[2] = {addr, addr + 1 };
305 return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
306}
307EXPORT_SYMBOL(intel_scu_ipc_ioread16);
308
309/**
310 * intel_scu_ipc_ioread32 - read a dword via the SCU
311 * @addr: register on SCU
312 * @data: return pointer for read dword
313 *
314 * Read four registers. Returns 0 on success or an error code. All
315 * locking between SCU accesses is handled for the caller.
316 *
317 * This function may sleep.
318 */
319int intel_scu_ipc_ioread32(u16 addr, u32 *data)
320{
321 u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
322 return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
323}
324EXPORT_SYMBOL(intel_scu_ipc_ioread32);
325
326/**
327 * intel_scu_ipc_iowrite8 - write a byte via the SCU
328 * @addr: register on SCU
329 * @data: byte to write
330 *
331 * Write a single register. Returns 0 on success or an error code. All
332 * locking between SCU accesses is handled for the caller.
333 *
334 * This function may sleep.
335 */
336int intel_scu_ipc_iowrite8(u16 addr, u8 data)
337{
338 return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
339}
340EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
341
342/**
343 * intel_scu_ipc_iowrite16 - write a word via the SCU
344 * @addr: register on SCU
345 * @data: word to write
346 *
347 * Write two registers. Returns 0 on success or an error code. All
348 * locking between SCU accesses is handled for the caller.
349 *
350 * This function may sleep.
351 */
352int intel_scu_ipc_iowrite16(u16 addr, u16 data)
353{
354 u16 x[2] = {addr, addr + 1 };
355 return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
356}
357EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
358
359/**
360 * intel_scu_ipc_iowrite32 - write a dword via the SCU
361 * @addr: register on SCU
362 * @data: dword to write
363 *
364 * Write four registers. Returns 0 on success or an error code. All
365 * locking between SCU accesses is handled for the caller.
366 *
367 * This function may sleep.
368 */
369int intel_scu_ipc_iowrite32(u16 addr, u32 data)
370{
371 u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
372 return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
373}
374EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
375
376/**
377 * intel_scu_ipc_readvv - read a set of registers
378 * @addr: register list
379 * @data: bytes to return
380 * @len: length of array
381 *
382 * Read registers. Returns 0 on success or an error code. All
383 * locking between SCU accesses is handled for the caller.
384 *
385 * The largest array length permitted by the hardware is 5 items.
386 *
387 * This function may sleep.
388 */
389int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
390{
391 return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
392}
393EXPORT_SYMBOL(intel_scu_ipc_readv);
394
395/**
396 * intel_scu_ipc_writev - write a set of registers
397 * @addr: register list
398 * @data: bytes to write
399 * @len: length of array
400 *
401 * Write registers. Returns 0 on success or an error code. All
402 * locking between SCU accesses is handled for the caller.
403 *
404 * The largest array length permitted by the hardware is 5 items.
405 *
406 * This function may sleep.
407 *
408 */
409int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
410{
411 return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
412}
413EXPORT_SYMBOL(intel_scu_ipc_writev);
414
415
416/**
417 * intel_scu_ipc_update_register - r/m/w a register
418 * @addr: register address
419 * @bits: bits to update
420 * @mask: mask of bits to update
421 *
422 * Read-modify-write power control unit register. The first data argument
423 * must be register value and second is mask value
424 * mask is a bitmap that indicates which bits to update.
425 * 0 = masked. Don't modify this bit, 1 = modify this bit.
426 * returns 0 on success or an error code.
427 *
428 * This function may sleep. Locking between SCU accesses is handled
429 * for the caller.
430 */
431int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
432{
433 u8 data[2] = { bits, mask };
434 return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
435}
436EXPORT_SYMBOL(intel_scu_ipc_update_register);
437
438/**
439 * intel_scu_ipc_simple_command - send a simple command
440 * @cmd: command
441 * @sub: sub type
442 *
443 * Issue a simple command to the SCU. Do not use this interface if
444 * you must then access data as any data values may be overwritten
445 * by another SCU access by the time this function returns.
446 *
447 * This function may sleep. Locking for SCU accesses is handled for
448 * the caller.
449 */
450int intel_scu_ipc_simple_command(int cmd, int sub)
451{
452 int err;
453
454 mutex_lock(&ipclock);
455 if (ipcdev.pdev == NULL) {
456 mutex_unlock(&ipclock);
457 return -ENODEV;
458 }
459 ipc_command(sub << 12 | cmd);
460 err = intel_scu_ipc_check_status();
461 mutex_unlock(&ipclock);
462 return err;
463}
464EXPORT_SYMBOL(intel_scu_ipc_simple_command);
465
466/**
467 * intel_scu_ipc_command - command with data
468 * @cmd: command
469 * @sub: sub type
470 * @in: input data
471 * @inlen: input length in dwords
472 * @out: output data
473 * @outlein: output length in dwords
474 *
475 * Issue a command to the SCU which involves data transfers. Do the
476 * data copies under the lock but leave it for the caller to interpret
477 */
478
479int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
480 u32 *out, int outlen)
481{
482 int i, err;
483
484 mutex_lock(&ipclock);
485 if (ipcdev.pdev == NULL) {
486 mutex_unlock(&ipclock);
487 return -ENODEV;
488 }
489
490 for (i = 0; i < inlen; i++)
491 ipc_data_writel(*in++, 4 * i);
492
493 ipc_command((inlen << 16) | (sub << 12) | cmd);
494 err = intel_scu_ipc_check_status();
495
496 if (!err) {
497 for (i = 0; i < outlen; i++)
498 *out++ = ipc_data_readl(4 * i);
499 }
500
501 mutex_unlock(&ipclock);
502 return err;
503}
504EXPORT_SYMBOL(intel_scu_ipc_command);
505
506/*I2C commands */
507#define IPC_I2C_WRITE 1 /* I2C Write command */
508#define IPC_I2C_READ 2 /* I2C Read command */
509
510/**
511 * intel_scu_ipc_i2c_cntrl - I2C read/write operations
512 * @addr: I2C address + command bits
513 * @data: data to read/write
514 *
515 * Perform an an I2C read/write operation via the SCU. All locking is
516 * handled for the caller. This function may sleep.
517 *
518 * Returns an error code or 0 on success.
519 *
520 * This has to be in the IPC driver for the locking.
521 */
522int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
523{
524 u32 cmd = 0;
525
526 mutex_lock(&ipclock);
527 if (ipcdev.pdev == NULL) {
528 mutex_unlock(&ipclock);
529 return -ENODEV;
530 }
531 cmd = (addr >> 24) & 0xFF;
532 if (cmd == IPC_I2C_READ) {
533 writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
534 /* Write not getting updated without delay */
535 mdelay(1);
536 *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
537 } else if (cmd == IPC_I2C_WRITE) {
538 writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
539 mdelay(1);
540 writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
541 } else {
542 dev_err(&ipcdev.pdev->dev,
543 "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
544
545 mutex_unlock(&ipclock);
546 return -EIO;
547 }
548 mutex_unlock(&ipclock);
549 return 0;
550}
551EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
552
553/*
554 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
555 * When ioc bit is set to 1, caller api must wait for interrupt handler called
556 * which in turn unlocks the caller api. Currently this is not used
557 *
558 * This is edge triggered so we need take no action to clear anything
559 */
560static irqreturn_t ioc(int irq, void *dev_id)
561{
562 if (ipcdev.irq_mode)
563 complete(&ipcdev.cmd_complete);
564
565 return IRQ_HANDLED;
566}
567
568/**
569 * ipc_probe - probe an Intel SCU IPC
570 * @dev: the PCI device matching
571 * @id: entry in the match table
572 *
573 * Enable and install an intel SCU IPC. This appears in the PCI space
574 * but uses some hard coded addresses as well.
575 */
576static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
577{
578 int err;
579 struct intel_scu_ipc_pdata_t *pdata;
580 resource_size_t pci_resource;
581
582 if (ipcdev.pdev) /* We support only one SCU */
583 return -EBUSY;
584
585 pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
586
587 ipcdev.pdev = pci_dev_get(dev);
588 ipcdev.irq_mode = pdata->irq_mode;
589
590 err = pci_enable_device(dev);
591 if (err)
592 return err;
593
594 err = pci_request_regions(dev, "intel_scu_ipc");
595 if (err)
596 return err;
597
598 pci_resource = pci_resource_start(dev, 0);
599 if (!pci_resource)
600 return -ENOMEM;
601
602 init_completion(&ipcdev.cmd_complete);
603
604 if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
605 return -EBUSY;
606
607 ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len);
608 if (!ipcdev.ipc_base)
609 return -ENOMEM;
610
611 ipcdev.i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
612 if (!ipcdev.i2c_base) {
613 iounmap(ipcdev.ipc_base);
614 return -ENOMEM;
615 }
616
617 intel_scu_devices_create();
618
619 return 0;
620}
621
622/**
623 * ipc_remove - remove a bound IPC device
624 * @pdev: PCI device
625 *
626 * In practice the SCU is not removable but this function is also
627 * called for each device on a module unload or cleanup which is the
628 * path that will get used.
629 *
630 * Free up the mappings and release the PCI resources
631 */
632static void ipc_remove(struct pci_dev *pdev)
633{
634 free_irq(pdev->irq, &ipcdev);
635 pci_release_regions(pdev);
636 pci_dev_put(ipcdev.pdev);
637 iounmap(ipcdev.ipc_base);
638 iounmap(ipcdev.i2c_base);
639 ipcdev.pdev = NULL;
640 intel_scu_devices_destroy();
641}
642
643static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
644 {
645 PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT),
646 (kernel_ulong_t)&intel_scu_ipc_lincroft_pdata,
647 }, {
648 PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL),
649 (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
650 }, {
651 PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CLOVERVIEW),
652 (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
653 }, {
654 PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER),
655 (kernel_ulong_t)&intel_scu_ipc_tangier_pdata,
656 }, {
657 0,
658 }
659};
660MODULE_DEVICE_TABLE(pci, pci_ids);
661
662static struct pci_driver ipc_driver = {
663 .name = "intel_scu_ipc",
664 .id_table = pci_ids,
665 .probe = ipc_probe,
666 .remove = ipc_remove,
667};
668
669
670static int __init intel_scu_ipc_init(void)
671{
672 platform = intel_mid_identify_cpu();
673 if (platform == 0)
674 return -ENODEV;
675 return pci_register_driver(&ipc_driver);
676}
677
678static void __exit intel_scu_ipc_exit(void)
679{
680 pci_unregister_driver(&ipc_driver);
681}
682
683MODULE_AUTHOR("Sreedhara DS <sreedhara.ds@intel.com>");
684MODULE_DESCRIPTION("Intel SCU IPC driver");
685MODULE_LICENSE("GPL");
686
687module_init(intel_scu_ipc_init);
688module_exit(intel_scu_ipc_exit);