Loading...
1/*
2 * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
3 *
4 * (C) Copyright 2008-2010,2015 Intel Corporation
5 * Author: Sreedhara DS (sreedhara.ds@intel.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * SCU running in ARC processor communicates with other entity running in IA
13 * core through IPC mechanism which in turn messaging between IA core ad SCU.
14 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
15 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
16 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
17 * along with other APIs.
18 */
19#include <linux/delay.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/device.h>
23#include <linux/pm.h>
24#include <linux/pci.h>
25#include <linux/interrupt.h>
26#include <linux/sfi.h>
27#include <asm/intel-mid.h>
28#include <asm/intel_scu_ipc.h>
29
30/* IPC defines the following message types */
31#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
32#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
33#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
34#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
35#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
36
37/* Command id associated with message IPCMSG_PCNTRL */
38#define IPC_CMD_PCNTRL_W 0 /* Register write */
39#define IPC_CMD_PCNTRL_R 1 /* Register read */
40#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
41
42/*
43 * IPC register summary
44 *
45 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
46 * To read or write information to the SCU, driver writes to IPC-1 memory
47 * mapped registers. The following is the IPC mechanism
48 *
49 * 1. IA core cDMI interface claims this transaction and converts it to a
50 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
51 *
52 * 2. South Complex cDMI block receives this message and writes it to
53 * the IPC-1 register block, causing an interrupt to the SCU
54 *
55 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
56 * message handler is called within firmware.
57 */
58
59#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
60#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
61#define IPC_IOC 0x100 /* IPC command register IOC bit */
62
63#define PCI_DEVICE_ID_LINCROFT 0x082a
64#define PCI_DEVICE_ID_PENWELL 0x080e
65#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
66#define PCI_DEVICE_ID_TANGIER 0x11a0
67
68/* intel scu ipc driver data */
69struct intel_scu_ipc_pdata_t {
70 u32 i2c_base;
71 u32 i2c_len;
72 u8 irq_mode;
73};
74
75static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
76 .i2c_base = 0xff12b000,
77 .i2c_len = 0x10,
78 .irq_mode = 0,
79};
80
81/* Penwell and Cloverview */
82static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
83 .i2c_base = 0xff12b000,
84 .i2c_len = 0x10,
85 .irq_mode = 1,
86};
87
88static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
89 .i2c_base = 0xff00d000,
90 .i2c_len = 0x10,
91 .irq_mode = 0,
92};
93
94struct intel_scu_ipc_dev {
95 struct device *dev;
96 void __iomem *ipc_base;
97 void __iomem *i2c_base;
98 struct completion cmd_complete;
99 u8 irq_mode;
100};
101
102static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
103
104/*
105 * IPC Read Buffer (Read Only):
106 * 16 byte buffer for receiving data from SCU, if IPC command
107 * processing results in response data
108 */
109#define IPC_READ_BUFFER 0x90
110
111#define IPC_I2C_CNTRL_ADDR 0
112#define I2C_DATA_ADDR 0x04
113
114static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
115
116/*
117 * Send ipc command
118 * Command Register (Write Only):
119 * A write to this register results in an interrupt to the SCU core processor
120 * Format:
121 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
122 */
123static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
124{
125 if (scu->irq_mode) {
126 reinit_completion(&scu->cmd_complete);
127 writel(cmd | IPC_IOC, scu->ipc_base);
128 }
129 writel(cmd, scu->ipc_base);
130}
131
132/*
133 * Write ipc data
134 * IPC Write Buffer (Write Only):
135 * 16-byte buffer for sending data associated with IPC command to
136 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
137 */
138static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
139{
140 writel(data, scu->ipc_base + 0x80 + offset);
141}
142
143/*
144 * Status Register (Read Only):
145 * Driver will read this register to get the ready/busy status of the IPC
146 * block and error status of the IPC command that was just processed by SCU
147 * Format:
148 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
149 */
150static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
151{
152 return __raw_readl(scu->ipc_base + 0x04);
153}
154
155/* Read ipc byte data */
156static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
157{
158 return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
159}
160
161/* Read ipc u32 data */
162static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
163{
164 return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
165}
166
167/* Wait till scu status is busy */
168static inline int busy_loop(struct intel_scu_ipc_dev *scu)
169{
170 u32 status = ipc_read_status(scu);
171 u32 loop_count = 100000;
172
173 /* break if scu doesn't reset busy bit after huge retry */
174 while ((status & BIT(0)) && --loop_count) {
175 udelay(1); /* scu processing time is in few u secods */
176 status = ipc_read_status(scu);
177 }
178
179 if (status & BIT(0)) {
180 dev_err(scu->dev, "IPC timed out");
181 return -ETIMEDOUT;
182 }
183
184 if (status & BIT(1))
185 return -EIO;
186
187 return 0;
188}
189
190/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
191static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
192{
193 int status;
194
195 if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) {
196 dev_err(scu->dev, "IPC timed out\n");
197 return -ETIMEDOUT;
198 }
199
200 status = ipc_read_status(scu);
201 if (status & BIT(1))
202 return -EIO;
203
204 return 0;
205}
206
207static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
208{
209 return scu->irq_mode ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
210}
211
212/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
213static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
214{
215 struct intel_scu_ipc_dev *scu = &ipcdev;
216 int nc;
217 u32 offset = 0;
218 int err;
219 u8 cbuf[IPC_WWBUF_SIZE];
220 u32 *wbuf = (u32 *)&cbuf;
221
222 memset(cbuf, 0, sizeof(cbuf));
223
224 mutex_lock(&ipclock);
225
226 if (scu->dev == NULL) {
227 mutex_unlock(&ipclock);
228 return -ENODEV;
229 }
230
231 for (nc = 0; nc < count; nc++, offset += 2) {
232 cbuf[offset] = addr[nc];
233 cbuf[offset + 1] = addr[nc] >> 8;
234 }
235
236 if (id == IPC_CMD_PCNTRL_R) {
237 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
238 ipc_data_writel(scu, wbuf[nc], offset);
239 ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
240 } else if (id == IPC_CMD_PCNTRL_W) {
241 for (nc = 0; nc < count; nc++, offset += 1)
242 cbuf[offset] = data[nc];
243 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
244 ipc_data_writel(scu, wbuf[nc], offset);
245 ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
246 } else if (id == IPC_CMD_PCNTRL_M) {
247 cbuf[offset] = data[0];
248 cbuf[offset + 1] = data[1];
249 ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
250 ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
251 }
252
253 err = intel_scu_ipc_check_status(scu);
254 if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
255 /* Workaround: values are read as 0 without memcpy_fromio */
256 memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
257 for (nc = 0; nc < count; nc++)
258 data[nc] = ipc_data_readb(scu, nc);
259 }
260 mutex_unlock(&ipclock);
261 return err;
262}
263
264/**
265 * intel_scu_ipc_ioread8 - read a word via the SCU
266 * @addr: register on SCU
267 * @data: return pointer for read byte
268 *
269 * Read a single register. Returns 0 on success or an error code. All
270 * locking between SCU accesses is handled for the caller.
271 *
272 * This function may sleep.
273 */
274int intel_scu_ipc_ioread8(u16 addr, u8 *data)
275{
276 return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
277}
278EXPORT_SYMBOL(intel_scu_ipc_ioread8);
279
280/**
281 * intel_scu_ipc_ioread16 - read a word via the SCU
282 * @addr: register on SCU
283 * @data: return pointer for read word
284 *
285 * Read a register pair. Returns 0 on success or an error code. All
286 * locking between SCU accesses is handled for the caller.
287 *
288 * This function may sleep.
289 */
290int intel_scu_ipc_ioread16(u16 addr, u16 *data)
291{
292 u16 x[2] = {addr, addr + 1};
293 return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
294}
295EXPORT_SYMBOL(intel_scu_ipc_ioread16);
296
297/**
298 * intel_scu_ipc_ioread32 - read a dword via the SCU
299 * @addr: register on SCU
300 * @data: return pointer for read dword
301 *
302 * Read four registers. Returns 0 on success or an error code. All
303 * locking between SCU accesses is handled for the caller.
304 *
305 * This function may sleep.
306 */
307int intel_scu_ipc_ioread32(u16 addr, u32 *data)
308{
309 u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
310 return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
311}
312EXPORT_SYMBOL(intel_scu_ipc_ioread32);
313
314/**
315 * intel_scu_ipc_iowrite8 - write a byte via the SCU
316 * @addr: register on SCU
317 * @data: byte to write
318 *
319 * Write a single register. Returns 0 on success or an error code. All
320 * locking between SCU accesses is handled for the caller.
321 *
322 * This function may sleep.
323 */
324int intel_scu_ipc_iowrite8(u16 addr, u8 data)
325{
326 return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
327}
328EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
329
330/**
331 * intel_scu_ipc_iowrite16 - write a word via the SCU
332 * @addr: register on SCU
333 * @data: word to write
334 *
335 * Write two registers. Returns 0 on success or an error code. All
336 * locking between SCU accesses is handled for the caller.
337 *
338 * This function may sleep.
339 */
340int intel_scu_ipc_iowrite16(u16 addr, u16 data)
341{
342 u16 x[2] = {addr, addr + 1};
343 return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
344}
345EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
346
347/**
348 * intel_scu_ipc_iowrite32 - write a dword via the SCU
349 * @addr: register on SCU
350 * @data: dword to write
351 *
352 * Write four registers. Returns 0 on success or an error code. All
353 * locking between SCU accesses is handled for the caller.
354 *
355 * This function may sleep.
356 */
357int intel_scu_ipc_iowrite32(u16 addr, u32 data)
358{
359 u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
360 return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
361}
362EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
363
364/**
365 * intel_scu_ipc_readvv - read a set of registers
366 * @addr: register list
367 * @data: bytes to return
368 * @len: length of array
369 *
370 * Read registers. Returns 0 on success or an error code. All
371 * locking between SCU accesses is handled for the caller.
372 *
373 * The largest array length permitted by the hardware is 5 items.
374 *
375 * This function may sleep.
376 */
377int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
378{
379 return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
380}
381EXPORT_SYMBOL(intel_scu_ipc_readv);
382
383/**
384 * intel_scu_ipc_writev - write a set of registers
385 * @addr: register list
386 * @data: bytes to write
387 * @len: length of array
388 *
389 * Write registers. Returns 0 on success or an error code. All
390 * locking between SCU accesses is handled for the caller.
391 *
392 * The largest array length permitted by the hardware is 5 items.
393 *
394 * This function may sleep.
395 *
396 */
397int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
398{
399 return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
400}
401EXPORT_SYMBOL(intel_scu_ipc_writev);
402
403/**
404 * intel_scu_ipc_update_register - r/m/w a register
405 * @addr: register address
406 * @bits: bits to update
407 * @mask: mask of bits to update
408 *
409 * Read-modify-write power control unit register. The first data argument
410 * must be register value and second is mask value
411 * mask is a bitmap that indicates which bits to update.
412 * 0 = masked. Don't modify this bit, 1 = modify this bit.
413 * returns 0 on success or an error code.
414 *
415 * This function may sleep. Locking between SCU accesses is handled
416 * for the caller.
417 */
418int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
419{
420 u8 data[2] = { bits, mask };
421 return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
422}
423EXPORT_SYMBOL(intel_scu_ipc_update_register);
424
425/**
426 * intel_scu_ipc_simple_command - send a simple command
427 * @cmd: command
428 * @sub: sub type
429 *
430 * Issue a simple command to the SCU. Do not use this interface if
431 * you must then access data as any data values may be overwritten
432 * by another SCU access by the time this function returns.
433 *
434 * This function may sleep. Locking for SCU accesses is handled for
435 * the caller.
436 */
437int intel_scu_ipc_simple_command(int cmd, int sub)
438{
439 struct intel_scu_ipc_dev *scu = &ipcdev;
440 int err;
441
442 mutex_lock(&ipclock);
443 if (scu->dev == NULL) {
444 mutex_unlock(&ipclock);
445 return -ENODEV;
446 }
447 ipc_command(scu, sub << 12 | cmd);
448 err = intel_scu_ipc_check_status(scu);
449 mutex_unlock(&ipclock);
450 return err;
451}
452EXPORT_SYMBOL(intel_scu_ipc_simple_command);
453
454/**
455 * intel_scu_ipc_command - command with data
456 * @cmd: command
457 * @sub: sub type
458 * @in: input data
459 * @inlen: input length in dwords
460 * @out: output data
461 * @outlein: output length in dwords
462 *
463 * Issue a command to the SCU which involves data transfers. Do the
464 * data copies under the lock but leave it for the caller to interpret
465 */
466int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
467 u32 *out, int outlen)
468{
469 struct intel_scu_ipc_dev *scu = &ipcdev;
470 int i, err;
471
472 mutex_lock(&ipclock);
473 if (scu->dev == NULL) {
474 mutex_unlock(&ipclock);
475 return -ENODEV;
476 }
477
478 for (i = 0; i < inlen; i++)
479 ipc_data_writel(scu, *in++, 4 * i);
480
481 ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
482 err = intel_scu_ipc_check_status(scu);
483
484 if (!err) {
485 for (i = 0; i < outlen; i++)
486 *out++ = ipc_data_readl(scu, 4 * i);
487 }
488
489 mutex_unlock(&ipclock);
490 return err;
491}
492EXPORT_SYMBOL(intel_scu_ipc_command);
493
494/* I2C commands */
495#define IPC_I2C_WRITE 1 /* I2C Write command */
496#define IPC_I2C_READ 2 /* I2C Read command */
497
498/**
499 * intel_scu_ipc_i2c_cntrl - I2C read/write operations
500 * @addr: I2C address + command bits
501 * @data: data to read/write
502 *
503 * Perform an an I2C read/write operation via the SCU. All locking is
504 * handled for the caller. This function may sleep.
505 *
506 * Returns an error code or 0 on success.
507 *
508 * This has to be in the IPC driver for the locking.
509 */
510int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
511{
512 struct intel_scu_ipc_dev *scu = &ipcdev;
513 u32 cmd = 0;
514
515 mutex_lock(&ipclock);
516 if (scu->dev == NULL) {
517 mutex_unlock(&ipclock);
518 return -ENODEV;
519 }
520 cmd = (addr >> 24) & 0xFF;
521 if (cmd == IPC_I2C_READ) {
522 writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
523 /* Write not getting updated without delay */
524 mdelay(1);
525 *data = readl(scu->i2c_base + I2C_DATA_ADDR);
526 } else if (cmd == IPC_I2C_WRITE) {
527 writel(*data, scu->i2c_base + I2C_DATA_ADDR);
528 mdelay(1);
529 writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
530 } else {
531 dev_err(scu->dev,
532 "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
533
534 mutex_unlock(&ipclock);
535 return -EIO;
536 }
537 mutex_unlock(&ipclock);
538 return 0;
539}
540EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
541
542/*
543 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
544 * When ioc bit is set to 1, caller api must wait for interrupt handler called
545 * which in turn unlocks the caller api. Currently this is not used
546 *
547 * This is edge triggered so we need take no action to clear anything
548 */
549static irqreturn_t ioc(int irq, void *dev_id)
550{
551 struct intel_scu_ipc_dev *scu = dev_id;
552
553 if (scu->irq_mode)
554 complete(&scu->cmd_complete);
555
556 return IRQ_HANDLED;
557}
558
559/**
560 * ipc_probe - probe an Intel SCU IPC
561 * @pdev: the PCI device matching
562 * @id: entry in the match table
563 *
564 * Enable and install an intel SCU IPC. This appears in the PCI space
565 * but uses some hard coded addresses as well.
566 */
567static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
568{
569 int platform; /* Platform type */
570 int err;
571 struct intel_scu_ipc_dev *scu = &ipcdev;
572 struct intel_scu_ipc_pdata_t *pdata;
573
574 platform = intel_mid_identify_cpu();
575 if (platform == 0)
576 return -ENODEV;
577
578 if (scu->dev) /* We support only one SCU */
579 return -EBUSY;
580
581 pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
582
583 scu->dev = &pdev->dev;
584 scu->irq_mode = pdata->irq_mode;
585
586 err = pcim_enable_device(pdev);
587 if (err)
588 return err;
589
590 err = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
591 if (err)
592 return err;
593
594 init_completion(&scu->cmd_complete);
595
596 err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
597 scu);
598 if (err)
599 return err;
600
601 scu->ipc_base = pcim_iomap_table(pdev)[0];
602
603 scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
604 if (!scu->i2c_base)
605 return -ENOMEM;
606
607 intel_scu_devices_create();
608
609 pci_set_drvdata(pdev, scu);
610 return 0;
611}
612
613static const struct pci_device_id pci_ids[] = {
614 {
615 PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT),
616 (kernel_ulong_t)&intel_scu_ipc_lincroft_pdata,
617 }, {
618 PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL),
619 (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
620 }, {
621 PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CLOVERVIEW),
622 (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
623 }, {
624 PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER),
625 (kernel_ulong_t)&intel_scu_ipc_tangier_pdata,
626 }, {
627 0,
628 }
629};
630
631static struct pci_driver ipc_driver = {
632 .driver = {
633 .suppress_bind_attrs = true,
634 },
635 .name = "intel_scu_ipc",
636 .id_table = pci_ids,
637 .probe = ipc_probe,
638};
639builtin_pci_driver(ipc_driver);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for the Intel SCU IPC mechanism
4 *
5 * (C) Copyright 2008-2010,2015 Intel Corporation
6 * Author: Sreedhara DS (sreedhara.ds@intel.com)
7 *
8 * SCU running in ARC processor communicates with other entity running in IA
9 * core through IPC mechanism which in turn messaging between IA core ad SCU.
10 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
11 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
12 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
13 * along with other APIs.
14 */
15
16#include <linux/cleanup.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/iopoll.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26
27#include <linux/platform_data/x86/intel_scu_ipc.h>
28
29/* IPC defines the following message types */
30#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
31
32/* Command id associated with message IPCMSG_PCNTRL */
33#define IPC_CMD_PCNTRL_W 0 /* Register write */
34#define IPC_CMD_PCNTRL_R 1 /* Register read */
35#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
36
37/*
38 * IPC register summary
39 *
40 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
41 * To read or write information to the SCU, driver writes to IPC-1 memory
42 * mapped registers. The following is the IPC mechanism
43 *
44 * 1. IA core cDMI interface claims this transaction and converts it to a
45 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
46 *
47 * 2. South Complex cDMI block receives this message and writes it to
48 * the IPC-1 register block, causing an interrupt to the SCU
49 *
50 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
51 * message handler is called within firmware.
52 */
53
54#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
55#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
56#define IPC_IOC 0x100 /* IPC command register IOC bit */
57
58struct intel_scu_ipc_dev {
59 struct device dev;
60 struct module *owner;
61 void __iomem *ipc_base;
62 struct completion cmd_complete;
63
64 struct intel_scu_ipc_data data;
65};
66
67#define IPC_STATUS 0x04
68#define IPC_STATUS_IRQ BIT(2)
69#define IPC_STATUS_ERR BIT(1)
70#define IPC_STATUS_BUSY BIT(0)
71
72/*
73 * IPC Write/Read Buffers:
74 * 16 byte buffer for sending and receiving data to and from SCU.
75 */
76#define IPC_WRITE_BUFFER 0x80
77#define IPC_READ_BUFFER 0x90
78
79/* Timeout in jiffies */
80#define IPC_TIMEOUT (10 * HZ)
81
82static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
83static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
84
85static struct class intel_scu_ipc_class = {
86 .name = "intel_scu_ipc",
87};
88
89/**
90 * intel_scu_ipc_dev_get() - Get SCU IPC instance
91 *
92 * The recommended new API takes SCU IPC instance as parameter and this
93 * function can be called by driver to get the instance. This also makes
94 * sure the driver providing the IPC functionality cannot be unloaded
95 * while the caller has the instance.
96 *
97 * Call intel_scu_ipc_dev_put() to release the instance.
98 *
99 * Returns %NULL if SCU IPC is not currently available.
100 */
101struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
102{
103 guard(mutex)(&ipclock);
104
105 if (ipcdev) {
106 get_device(&ipcdev->dev);
107 /*
108 * Prevent the IPC provider from being unloaded while it
109 * is being used.
110 */
111 if (try_module_get(ipcdev->owner))
112 return ipcdev;
113
114 put_device(&ipcdev->dev);
115 }
116
117 return NULL;
118}
119EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
120
121/**
122 * intel_scu_ipc_dev_put() - Put SCU IPC instance
123 * @scu: SCU IPC instance
124 *
125 * This function releases the SCU IPC instance retrieved from
126 * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
127 * unloaded.
128 */
129void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
130{
131 if (scu) {
132 module_put(scu->owner);
133 put_device(&scu->dev);
134 }
135}
136EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
137
138struct intel_scu_ipc_devres {
139 struct intel_scu_ipc_dev *scu;
140};
141
142static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
143{
144 struct intel_scu_ipc_devres *dr = res;
145 struct intel_scu_ipc_dev *scu = dr->scu;
146
147 intel_scu_ipc_dev_put(scu);
148}
149
150/**
151 * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
152 * @dev: Device requesting the SCU IPC device
153 *
154 * The recommended new API takes SCU IPC instance as parameter and this
155 * function can be called by driver to get the instance. This also makes
156 * sure the driver providing the IPC functionality cannot be unloaded
157 * while the caller has the instance.
158 *
159 * Returns %NULL if SCU IPC is not currently available.
160 */
161struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
162{
163 struct intel_scu_ipc_devres *dr;
164 struct intel_scu_ipc_dev *scu;
165
166 dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
167 if (!dr)
168 return NULL;
169
170 scu = intel_scu_ipc_dev_get();
171 if (!scu) {
172 devres_free(dr);
173 return NULL;
174 }
175
176 dr->scu = scu;
177 devres_add(dev, dr);
178
179 return scu;
180}
181EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
182
183/*
184 * Send ipc command
185 * Command Register (Write Only):
186 * A write to this register results in an interrupt to the SCU core processor
187 * Format:
188 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
189 */
190static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
191{
192 reinit_completion(&scu->cmd_complete);
193 writel(cmd | IPC_IOC, scu->ipc_base);
194}
195
196/*
197 * Write ipc data
198 * IPC Write Buffer (Write Only):
199 * 16-byte buffer for sending data associated with IPC command to
200 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
201 */
202static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
203{
204 writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
205}
206
207/*
208 * Status Register (Read Only):
209 * Driver will read this register to get the ready/busy status of the IPC
210 * block and error status of the IPC command that was just processed by SCU
211 * Format:
212 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
213 */
214static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
215{
216 return __raw_readl(scu->ipc_base + IPC_STATUS);
217}
218
219/* Read ipc u32 data */
220static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
221{
222 return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
223}
224
225/* Wait till scu status is busy */
226static inline int busy_loop(struct intel_scu_ipc_dev *scu)
227{
228 u8 status;
229 int err;
230
231 err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
232 100, jiffies_to_usecs(IPC_TIMEOUT));
233 if (err)
234 return err;
235
236 return (status & IPC_STATUS_ERR) ? -EIO : 0;
237}
238
239/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
240static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
241{
242 int status;
243
244 wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
245
246 status = ipc_read_status(scu);
247 if (status & IPC_STATUS_BUSY)
248 return -ETIMEDOUT;
249
250 if (status & IPC_STATUS_ERR)
251 return -EIO;
252
253 return 0;
254}
255
256static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
257{
258 return scu->data.irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
259}
260
261static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
262{
263 u8 status;
264
265 if (!scu)
266 scu = ipcdev;
267 if (!scu)
268 return ERR_PTR(-ENODEV);
269
270 status = ipc_read_status(scu);
271 if (status & IPC_STATUS_BUSY) {
272 dev_dbg(&scu->dev, "device is busy\n");
273 return ERR_PTR(-EBUSY);
274 }
275
276 return scu;
277}
278
279/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
280static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
281 u32 count, u32 op, u32 id)
282{
283 int nc;
284 u32 offset = 0;
285 int err;
286 u8 cbuf[IPC_WWBUF_SIZE];
287 u32 *wbuf = (u32 *)&cbuf;
288
289 memset(cbuf, 0, sizeof(cbuf));
290
291 guard(mutex)(&ipclock);
292
293 scu = intel_scu_ipc_get(scu);
294 if (IS_ERR(scu))
295 return PTR_ERR(scu);
296
297 for (nc = 0; nc < count; nc++, offset += 2) {
298 cbuf[offset] = addr[nc];
299 cbuf[offset + 1] = addr[nc] >> 8;
300 }
301
302 if (id == IPC_CMD_PCNTRL_R) {
303 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
304 ipc_data_writel(scu, wbuf[nc], offset);
305 ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
306 } else if (id == IPC_CMD_PCNTRL_W) {
307 for (nc = 0; nc < count; nc++, offset += 1)
308 cbuf[offset] = data[nc];
309 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
310 ipc_data_writel(scu, wbuf[nc], offset);
311 ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
312 } else if (id == IPC_CMD_PCNTRL_M) {
313 cbuf[offset] = data[0];
314 cbuf[offset + 1] = data[1];
315 ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
316 ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
317 }
318
319 err = intel_scu_ipc_check_status(scu);
320 if (err)
321 return err;
322
323 /* Read rbuf */
324 for (nc = 0, offset = 0; nc < 4; nc++, offset += 4)
325 wbuf[nc] = ipc_data_readl(scu, offset);
326 memcpy(data, wbuf, count);
327
328 return 0;
329}
330
331/**
332 * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
333 * @scu: Optional SCU IPC instance
334 * @addr: Register on SCU
335 * @data: Return pointer for read byte
336 *
337 * Read a single register. Returns %0 on success or an error code. All
338 * locking between SCU accesses is handled for the caller.
339 *
340 * This function may sleep.
341 */
342int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
343{
344 return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
345}
346EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
347
348/**
349 * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
350 * @scu: Optional SCU IPC instance
351 * @addr: Register on SCU
352 * @data: Byte to write
353 *
354 * Write a single register. Returns %0 on success or an error code. All
355 * locking between SCU accesses is handled for the caller.
356 *
357 * This function may sleep.
358 */
359int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
360{
361 return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
362}
363EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
364
365/**
366 * intel_scu_ipc_dev_readv() - Read a set of registers
367 * @scu: Optional SCU IPC instance
368 * @addr: Register list
369 * @data: Bytes to return
370 * @len: Length of array
371 *
372 * Read registers. Returns %0 on success or an error code. All locking
373 * between SCU accesses is handled for the caller.
374 *
375 * The largest array length permitted by the hardware is 5 items.
376 *
377 * This function may sleep.
378 */
379int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
380 size_t len)
381{
382 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
383}
384EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
385
386/**
387 * intel_scu_ipc_dev_writev() - Write a set of registers
388 * @scu: Optional SCU IPC instance
389 * @addr: Register list
390 * @data: Bytes to write
391 * @len: Length of array
392 *
393 * Write registers. Returns %0 on success or an error code. All locking
394 * between SCU accesses is handled for the caller.
395 *
396 * The largest array length permitted by the hardware is 5 items.
397 *
398 * This function may sleep.
399 */
400int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
401 size_t len)
402{
403 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
404}
405EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
406
407/**
408 * intel_scu_ipc_dev_update() - Update a register
409 * @scu: Optional SCU IPC instance
410 * @addr: Register address
411 * @data: Bits to update
412 * @mask: Mask of bits to update
413 *
414 * Read-modify-write power control unit register. The first data argument
415 * must be register value and second is mask value mask is a bitmap that
416 * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
417 * modify this bit. returns %0 on success or an error code.
418 *
419 * This function may sleep. Locking between SCU accesses is handled
420 * for the caller.
421 */
422int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
423 u8 mask)
424{
425 u8 tmp[2] = { data, mask };
426 return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
427}
428EXPORT_SYMBOL(intel_scu_ipc_dev_update);
429
430/**
431 * intel_scu_ipc_dev_simple_command() - Send a simple command
432 * @scu: Optional SCU IPC instance
433 * @cmd: Command
434 * @sub: Sub type
435 *
436 * Issue a simple command to the SCU. Do not use this interface if you must
437 * then access data as any data values may be overwritten by another SCU
438 * access by the time this function returns.
439 *
440 * This function may sleep. Locking for SCU accesses is handled for the
441 * caller.
442 */
443int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
444 int sub)
445{
446 u32 cmdval;
447 int err;
448
449 guard(mutex)(&ipclock);
450
451 scu = intel_scu_ipc_get(scu);
452 if (IS_ERR(scu))
453 return PTR_ERR(scu);
454
455 cmdval = sub << 12 | cmd;
456 ipc_command(scu, cmdval);
457 err = intel_scu_ipc_check_status(scu);
458 if (err)
459 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
460 return err;
461}
462EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
463
464/**
465 * intel_scu_ipc_dev_command_with_size() - Command with data
466 * @scu: Optional SCU IPC instance
467 * @cmd: Command
468 * @sub: Sub type
469 * @in: Input data
470 * @inlen: Input length in bytes
471 * @size: Input size written to the IPC command register in whatever
472 * units (dword, byte) the particular firmware requires. Normally
473 * should be the same as @inlen.
474 * @out: Output data
475 * @outlen: Output length in bytes
476 *
477 * Issue a command to the SCU which involves data transfers. Do the
478 * data copies under the lock but leave it for the caller to interpret.
479 */
480int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
481 int sub, const void *in, size_t inlen,
482 size_t size, void *out, size_t outlen)
483{
484 size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
485 size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
486 u32 cmdval, inbuf[4] = {}, outbuf[4] = {};
487 int i, err;
488
489 if (inbuflen > 4 || outbuflen > 4)
490 return -EINVAL;
491
492 guard(mutex)(&ipclock);
493
494 scu = intel_scu_ipc_get(scu);
495 if (IS_ERR(scu))
496 return PTR_ERR(scu);
497
498 memcpy(inbuf, in, inlen);
499 for (i = 0; i < inbuflen; i++)
500 ipc_data_writel(scu, inbuf[i], 4 * i);
501
502 cmdval = (size << 16) | (sub << 12) | cmd;
503 ipc_command(scu, cmdval);
504 err = intel_scu_ipc_check_status(scu);
505 if (err) {
506 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
507 return err;
508 }
509
510 for (i = 0; i < outbuflen; i++)
511 outbuf[i] = ipc_data_readl(scu, 4 * i);
512
513 memcpy(out, outbuf, outlen);
514
515 return 0;
516}
517EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
518
519/*
520 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
521 * When ioc bit is set to 1, caller api must wait for interrupt handler called
522 * which in turn unlocks the caller api. Currently this is not used
523 *
524 * This is edge triggered so we need take no action to clear anything
525 */
526static irqreturn_t ioc(int irq, void *dev_id)
527{
528 struct intel_scu_ipc_dev *scu = dev_id;
529 int status = ipc_read_status(scu);
530
531 writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
532 complete(&scu->cmd_complete);
533
534 return IRQ_HANDLED;
535}
536
537static void intel_scu_ipc_release(struct device *dev)
538{
539 struct intel_scu_ipc_dev *scu = container_of(dev, struct intel_scu_ipc_dev, dev);
540 struct intel_scu_ipc_data *data = &scu->data;
541
542 if (data->irq > 0)
543 free_irq(data->irq, scu);
544 iounmap(scu->ipc_base);
545 release_mem_region(data->mem.start, resource_size(&data->mem));
546 kfree(scu);
547}
548
549/**
550 * __intel_scu_ipc_register() - Register SCU IPC device
551 * @parent: Parent device
552 * @scu_data: Data used to configure SCU IPC
553 * @owner: Module registering the SCU IPC device
554 *
555 * Call this function to register SCU IPC mechanism under @parent.
556 * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
557 * failure. The caller may use the returned instance if it needs to do
558 * SCU IPC calls itself.
559 */
560struct intel_scu_ipc_dev *
561__intel_scu_ipc_register(struct device *parent,
562 const struct intel_scu_ipc_data *scu_data,
563 struct module *owner)
564{
565 int err;
566 struct intel_scu_ipc_data *data;
567 struct intel_scu_ipc_dev *scu;
568 void __iomem *ipc_base;
569
570 guard(mutex)(&ipclock);
571
572 /* We support only one IPC */
573 if (ipcdev)
574 return ERR_PTR(-EBUSY);
575
576 scu = kzalloc(sizeof(*scu), GFP_KERNEL);
577 if (!scu)
578 return ERR_PTR(-ENOMEM);
579
580 scu->owner = owner;
581 scu->dev.parent = parent;
582 scu->dev.class = &intel_scu_ipc_class;
583 scu->dev.release = intel_scu_ipc_release;
584
585 memcpy(&scu->data, scu_data, sizeof(scu->data));
586 data = &scu->data;
587
588 if (!request_mem_region(data->mem.start, resource_size(&data->mem), "intel_scu_ipc")) {
589 err = -EBUSY;
590 goto err_free;
591 }
592
593 ipc_base = ioremap(data->mem.start, resource_size(&data->mem));
594 if (!ipc_base) {
595 err = -ENOMEM;
596 goto err_release;
597 }
598
599 scu->ipc_base = ipc_base;
600 init_completion(&scu->cmd_complete);
601
602 if (data->irq > 0) {
603 err = request_irq(data->irq, ioc, 0, "intel_scu_ipc", scu);
604 if (err)
605 goto err_unmap;
606 }
607
608 /*
609 * After this point intel_scu_ipc_release() takes care of
610 * releasing the SCU IPC resources once refcount drops to zero.
611 */
612 dev_set_name(&scu->dev, "intel_scu_ipc");
613 err = device_register(&scu->dev);
614 if (err) {
615 put_device(&scu->dev);
616 return ERR_PTR(err);
617 }
618
619 /* Assign device at last */
620 ipcdev = scu;
621 return scu;
622
623err_unmap:
624 iounmap(ipc_base);
625err_release:
626 release_mem_region(data->mem.start, resource_size(&data->mem));
627err_free:
628 kfree(scu);
629 return ERR_PTR(err);
630}
631EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
632
633/**
634 * intel_scu_ipc_unregister() - Unregister SCU IPC
635 * @scu: SCU IPC handle
636 *
637 * This unregisters the SCU IPC device and releases the acquired
638 * resources once the refcount goes to zero.
639 */
640void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
641{
642 guard(mutex)(&ipclock);
643
644 if (!WARN_ON(!ipcdev)) {
645 ipcdev = NULL;
646 device_unregister(&scu->dev);
647 }
648}
649EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
650
651static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
652{
653 struct intel_scu_ipc_devres *dr = res;
654 struct intel_scu_ipc_dev *scu = dr->scu;
655
656 intel_scu_ipc_unregister(scu);
657}
658
659/**
660 * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
661 * @parent: Parent device
662 * @scu_data: Data used to configure SCU IPC
663 * @owner: Module registering the SCU IPC device
664 *
665 * Call this function to register managed SCU IPC mechanism under
666 * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
667 * case of failure. The caller may use the returned instance if it needs
668 * to do SCU IPC calls itself.
669 */
670struct intel_scu_ipc_dev *
671__devm_intel_scu_ipc_register(struct device *parent,
672 const struct intel_scu_ipc_data *scu_data,
673 struct module *owner)
674{
675 struct intel_scu_ipc_devres *dr;
676 struct intel_scu_ipc_dev *scu;
677
678 dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
679 if (!dr)
680 return NULL;
681
682 scu = __intel_scu_ipc_register(parent, scu_data, owner);
683 if (IS_ERR(scu)) {
684 devres_free(dr);
685 return scu;
686 }
687
688 dr->scu = scu;
689 devres_add(parent, dr);
690
691 return scu;
692}
693EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
694
695static int __init intel_scu_ipc_init(void)
696{
697 return class_register(&intel_scu_ipc_class);
698}
699subsys_initcall(intel_scu_ipc_init);
700
701static void __exit intel_scu_ipc_exit(void)
702{
703 class_unregister(&intel_scu_ipc_class);
704}
705module_exit(intel_scu_ipc_exit);