Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
  3 *
  4 * (C) Copyright 2008-2010 Intel Corporation
  5 * Author: Sreedhara DS (sreedhara.ds@intel.com)
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; version 2
 10 * of the License.
 11 *
 12 * SCU running in ARC processor communicates with other entity running in IA
 13 * core through IPC mechanism which in turn messaging between IA core ad SCU.
 14 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
 15 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
 16 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
 17 * along with other APIs.
 18 */
 
 19#include <linux/delay.h>
 
 20#include <linux/errno.h>
 21#include <linux/init.h>
 22#include <linux/sysdev.h>
 23#include <linux/pm.h>
 24#include <linux/pci.h>
 25#include <linux/interrupt.h>
 26#include <linux/sfi.h>
 27#include <asm/mrst.h>
 
 
 
 28#include <asm/intel_scu_ipc.h>
 29
 30/* IPC defines the following message types */
 31#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
 32#define IPCMSG_BATTERY        0xEF /* Coulomb Counter Accumulator */
 33#define IPCMSG_FW_UPDATE      0xFE /* Firmware update */
 34#define IPCMSG_PCNTRL         0xFF /* Power controller unit read/write */
 35#define IPCMSG_FW_REVISION    0xF4 /* Get firmware revision */
 36
 37/* Command id associated with message IPCMSG_PCNTRL */
 38#define IPC_CMD_PCNTRL_W      0 /* Register write */
 39#define IPC_CMD_PCNTRL_R      1 /* Register read */
 40#define IPC_CMD_PCNTRL_M      2 /* Register read-modify-write */
 41
 42/*
 43 * IPC register summary
 44 *
 45 * IPC register blocks are memory mapped at fixed address of 0xFF11C000
 46 * To read or write information to the SCU, driver writes to IPC-1 memory
 47 * mapped registers (base address 0xFF11C000). The following is the IPC
 48 * mechanism
 49 *
 50 * 1. IA core cDMI interface claims this transaction and converts it to a
 51 *    Transaction Layer Packet (TLP) message which is sent across the cDMI.
 52 *
 53 * 2. South Complex cDMI block receives this message and writes it to
 54 *    the IPC-1 register block, causing an interrupt to the SCU
 55 *
 56 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
 57 *    message handler is called within firmware.
 58 */
 59
 60#define IPC_BASE_ADDR     0xFF11C000	/* IPC1 base register address */
 61#define IPC_MAX_ADDR      0x100		/* Maximum IPC regisers */
 62#define IPC_WWBUF_SIZE    20		/* IPC Write buffer Size */
 63#define IPC_RWBUF_SIZE    20		/* IPC Read buffer Size */
 64#define IPC_I2C_BASE      0xFF12B000	/* I2C control register base address */
 65#define IPC_I2C_MAX_ADDR  0x10		/* Maximum I2C regisers */
 66
 67static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
 68static void ipc_remove(struct pci_dev *pdev);
 69
 70struct intel_scu_ipc_dev {
 71	struct pci_dev *pdev;
 
 
 
 72	void __iomem *ipc_base;
 73	void __iomem *i2c_base;
 74};
 75
 76static struct intel_scu_ipc_dev  ipcdev; /* Only one for now */
 77
 78static int platform;		/* Platform type */
 
 79
 80/*
 81 * IPC Read Buffer (Read Only):
 82 * 16 byte buffer for receiving data from SCU, if IPC command
 83 * processing results in response data
 84 */
 
 85#define IPC_READ_BUFFER		0x90
 86
 87#define IPC_I2C_CNTRL_ADDR	0
 88#define I2C_DATA_ADDR		0x04
 89
 
 90static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
 91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92/*
 
 93 * Command Register (Write Only):
 94 * A write to this register results in an interrupt to the SCU core processor
 95 * Format:
 96 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
 97 */
 98static inline void ipc_command(u32 cmd) /* Send ipc command */
 99{
100	writel(cmd, ipcdev.ipc_base);
 
101}
102
103/*
 
104 * IPC Write Buffer (Write Only):
105 * 16-byte buffer for sending data associated with IPC command to
106 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
107 */
108static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
109{
110	writel(data, ipcdev.ipc_base + 0x80 + offset);
111}
112
113/*
114 * Status Register (Read Only):
115 * Driver will read this register to get the ready/busy status of the IPC
116 * block and error status of the IPC command that was just processed by SCU
117 * Format:
118 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
119 */
 
 
 
 
120
121static inline u8 ipc_read_status(void)
 
122{
123	return __raw_readl(ipcdev.ipc_base + 0x04);
124}
125
126static inline u8 ipc_data_readb(u32 offset) /* Read ipc byte data */
 
127{
128	return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
129}
130
131static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */
 
132{
133	return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
 
 
 
 
 
 
 
 
134}
135
136static inline int busy_loop(void) /* Wait till scu status is busy */
 
137{
138	u32 status = 0;
139	u32 loop_count = 0;
140
141	status = ipc_read_status();
142	while (status & 1) {
143		udelay(1); /* scu processing time is in few u secods */
144		status = ipc_read_status();
145		loop_count++;
146		/* break if scu doesn't reset busy bit after huge retry */
147		if (loop_count > 100000) {
148			dev_err(&ipcdev.pdev->dev, "IPC timed out");
149			return -ETIMEDOUT;
150		}
151	}
152	if ((status >> 1) & 1)
153		return -EIO;
154
155	return 0;
156}
157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
159static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
 
160{
161	int i, nc, bytes, d;
162	u32 offset = 0;
163	int err;
164	u8 cbuf[IPC_WWBUF_SIZE] = { };
165	u32 *wbuf = (u32 *)&cbuf;
166
167	mutex_lock(&ipclock);
168
169	memset(cbuf, 0, sizeof(cbuf));
170
171	if (ipcdev.pdev == NULL) {
 
 
172		mutex_unlock(&ipclock);
173		return -ENODEV;
174	}
175
176	if (platform != MRST_CPU_CHIP_PENWELL) {
177		bytes = 0;
178		d = 0;
179		for (i = 0; i < count; i++) {
180			cbuf[bytes++] = addr[i];
181			cbuf[bytes++] = addr[i] >> 8;
182			if (id != IPC_CMD_PCNTRL_R)
183				cbuf[bytes++] = data[d++];
184			if (id == IPC_CMD_PCNTRL_M)
185				cbuf[bytes++] = data[d++];
186		}
187		for (i = 0; i < bytes; i += 4)
188			ipc_data_writel(wbuf[i/4], i);
189		ipc_command(bytes << 16 |  id << 12 | 0 << 8 | op);
190	} else {
191		for (nc = 0; nc < count; nc++, offset += 2) {
192			cbuf[offset] = addr[nc];
193			cbuf[offset + 1] = addr[nc] >> 8;
194		}
195
196		if (id == IPC_CMD_PCNTRL_R) {
197			for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
198				ipc_data_writel(wbuf[nc], offset);
199			ipc_command((count*2) << 16 |  id << 12 | 0 << 8 | op);
200		} else if (id == IPC_CMD_PCNTRL_W) {
201			for (nc = 0; nc < count; nc++, offset += 1)
202				cbuf[offset] = data[nc];
203			for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
204				ipc_data_writel(wbuf[nc], offset);
205			ipc_command((count*3) << 16 |  id << 12 | 0 << 8 | op);
206		} else if (id == IPC_CMD_PCNTRL_M) {
207			cbuf[offset] = data[0];
208			cbuf[offset + 1] = data[1];
209			ipc_data_writel(wbuf[0], 0); /* Write wbuff */
210			ipc_command(4 << 16 |  id << 12 | 0 << 8 | op);
211		}
212	}
213
214	err = busy_loop();
215	if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
216		/* Workaround: values are read as 0 without memcpy_fromio */
217		memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16);
218		if (platform != MRST_CPU_CHIP_PENWELL) {
219			for (nc = 0, offset = 2; nc < count; nc++, offset += 3)
220				data[nc] = ipc_data_readb(offset);
221		} else {
222			for (nc = 0; nc < count; nc++)
223				data[nc] = ipc_data_readb(nc);
224		}
225	}
226	mutex_unlock(&ipclock);
227	return err;
228}
229
230/**
231 *	intel_scu_ipc_ioread8		-	read a word via the SCU
232 *	@addr: register on SCU
233 *	@data: return pointer for read byte
234 *
235 *	Read a single register. Returns 0 on success or an error code. All
236 *	locking between SCU accesses is handled for the caller.
237 *
238 *	This function may sleep.
239 */
240int intel_scu_ipc_ioread8(u16 addr, u8 *data)
241{
242	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
243}
244EXPORT_SYMBOL(intel_scu_ipc_ioread8);
245
246/**
247 *	intel_scu_ipc_ioread16		-	read a word via the SCU
248 *	@addr: register on SCU
249 *	@data: return pointer for read word
250 *
251 *	Read a register pair. Returns 0 on success or an error code. All
252 *	locking between SCU accesses is handled for the caller.
253 *
254 *	This function may sleep.
255 */
256int intel_scu_ipc_ioread16(u16 addr, u16 *data)
257{
258	u16 x[2] = {addr, addr + 1 };
259	return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
260}
261EXPORT_SYMBOL(intel_scu_ipc_ioread16);
262
263/**
264 *	intel_scu_ipc_ioread32		-	read a dword via the SCU
265 *	@addr: register on SCU
266 *	@data: return pointer for read dword
267 *
268 *	Read four registers. Returns 0 on success or an error code. All
269 *	locking between SCU accesses is handled for the caller.
270 *
271 *	This function may sleep.
272 */
273int intel_scu_ipc_ioread32(u16 addr, u32 *data)
274{
275	u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
276	return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
277}
278EXPORT_SYMBOL(intel_scu_ipc_ioread32);
279
280/**
281 *	intel_scu_ipc_iowrite8		-	write a byte via the SCU
282 *	@addr: register on SCU
283 *	@data: byte to write
 
284 *
285 *	Write a single register. Returns 0 on success or an error code. All
286 *	locking between SCU accesses is handled for the caller.
287 *
288 *	This function may sleep.
289 */
290int intel_scu_ipc_iowrite8(u16 addr, u8 data)
291{
292	return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
293}
294EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
295
296/**
297 *	intel_scu_ipc_iowrite16		-	write a word via the SCU
298 *	@addr: register on SCU
299 *	@data: word to write
300 *
301 *	Write two registers. Returns 0 on success or an error code. All
302 *	locking between SCU accesses is handled for the caller.
303 *
304 *	This function may sleep.
305 */
306int intel_scu_ipc_iowrite16(u16 addr, u16 data)
307{
308	u16 x[2] = {addr, addr + 1 };
309	return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
310}
311EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
312
313/**
314 *	intel_scu_ipc_iowrite32		-	write a dword via the SCU
315 *	@addr: register on SCU
316 *	@data: dword to write
317 *
318 *	Write four registers. Returns 0 on success or an error code. All
319 *	locking between SCU accesses is handled for the caller.
320 *
321 *	This function may sleep.
322 */
323int intel_scu_ipc_iowrite32(u16 addr, u32 data)
 
324{
325	u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
326	return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
327}
328EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
329
330/**
331 *	intel_scu_ipc_readvv		-	read a set of registers
332 *	@addr: register list
333 *	@data: bytes to return
334 *	@len: length of array
 
335 *
336 *	Read registers. Returns 0 on success or an error code. All
337 *	locking between SCU accesses is handled for the caller.
338 *
339 *	The largest array length permitted by the hardware is 5 items.
340 *
341 *	This function may sleep.
342 */
343int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
 
344{
345	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
346}
347EXPORT_SYMBOL(intel_scu_ipc_readv);
348
349/**
350 *	intel_scu_ipc_writev		-	write a set of registers
351 *	@addr: register list
352 *	@data: bytes to write
353 *	@len: length of array
354 *
355 *	Write registers. Returns 0 on success or an error code. All
356 *	locking between SCU accesses is handled for the caller.
357 *
358 *	The largest array length permitted by the hardware is 5 items.
359 *
360 *	This function may sleep.
 
361 *
 
 
362 */
363int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
 
364{
365	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
 
366}
367EXPORT_SYMBOL(intel_scu_ipc_writev);
368
369
370/**
371 *	intel_scu_ipc_update_register	-	r/m/w a register
372 *	@addr: register address
373 *	@bits: bits to update
374 *	@mask: mask of bits to update
375 *
376 *	Read-modify-write power control unit register. The first data argument
377 *	must be register value and second is mask value
378 *	mask is a bitmap that indicates which bits to update.
379 *	0 = masked. Don't modify this bit, 1 = modify this bit.
380 *	returns 0 on success or an error code.
381 *
382 *	This function may sleep. Locking between SCU accesses is handled
383 *	for the caller.
384 */
385int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
386{
387	u8 data[2] = { bits, mask };
388	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
389}
390EXPORT_SYMBOL(intel_scu_ipc_update_register);
391
392/**
393 *	intel_scu_ipc_simple_command	-	send a simple command
394 *	@cmd: command
395 *	@sub: sub type
396 *
397 *	Issue a simple command to the SCU. Do not use this interface if
398 *	you must then access data as any data values may be overwritten
399 *	by another SCU access by the time this function returns.
400 *
401 *	This function may sleep. Locking for SCU accesses is handled for
402 *	the caller.
403 */
404int intel_scu_ipc_simple_command(int cmd, int sub)
405{
 
406	int err;
407
408	mutex_lock(&ipclock);
409	if (ipcdev.pdev == NULL) {
 
410		mutex_unlock(&ipclock);
411		return -ENODEV;
412	}
413	ipc_command(sub << 12 | cmd);
414	err = busy_loop();
 
 
415	mutex_unlock(&ipclock);
 
 
416	return err;
417}
418EXPORT_SYMBOL(intel_scu_ipc_simple_command);
419
420/**
421 *	intel_scu_ipc_command	-	command with data
422 *	@cmd: command
423 *	@sub: sub type
424 *	@in: input data
425 *	@inlen: input length in dwords
426 *	@out: output data
427 *	@outlein: output length in dwords
428 *
429 *	Issue a command to the SCU which involves data transfers. Do the
430 *	data copies under the lock but leave it for the caller to interpret
431 */
432
433int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
434							u32 *out, int outlen)
435{
 
 
 
 
 
 
 
436	int i, err;
437
 
 
 
438	mutex_lock(&ipclock);
439	if (ipcdev.pdev == NULL) {
 
440		mutex_unlock(&ipclock);
441		return -ENODEV;
442	}
443
444	for (i = 0; i < inlen; i++)
445		ipc_data_writel(*in++, 4 * i);
 
 
 
 
 
 
 
 
446
447	ipc_command((inlen << 16) | (sub << 12) | cmd);
448	err = busy_loop();
449
450	for (i = 0; i < outlen; i++)
451		*out++ = ipc_data_readl(4 * i);
452
453	mutex_unlock(&ipclock);
 
 
454	return err;
455}
456EXPORT_SYMBOL(intel_scu_ipc_command);
457
458/*I2C commands */
459#define IPC_I2C_WRITE 1 /* I2C Write command */
460#define IPC_I2C_READ  2 /* I2C Read command */
461
462/**
463 *	intel_scu_ipc_i2c_cntrl		-	I2C read/write operations
464 *	@addr: I2C address + command bits
465 *	@data: data to read/write
466 *
467 *	Perform an an I2C read/write operation via the SCU. All locking is
468 *	handled for the caller. This function may sleep.
469 *
470 *	Returns an error code or 0 on success.
471 *
472 *	This has to be in the IPC driver for the locking.
473 */
474int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
475{
476	u32 cmd = 0;
 
477
478	mutex_lock(&ipclock);
479	if (ipcdev.pdev == NULL) {
480		mutex_unlock(&ipclock);
481		return -ENODEV;
482	}
483	cmd = (addr >> 24) & 0xFF;
484	if (cmd == IPC_I2C_READ) {
485		writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
486		/* Write not getting updated without delay */
487		mdelay(1);
488		*data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
489	} else if (cmd == IPC_I2C_WRITE) {
490		writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
491		mdelay(1);
492		writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
493	} else {
494		dev_err(&ipcdev.pdev->dev,
495			"intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
496
497		mutex_unlock(&ipclock);
498		return -EIO;
499	}
500	mutex_unlock(&ipclock);
501	return 0;
502}
503EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
504
505#define IPC_FW_LOAD_ADDR 0xFFFC0000 /* Storage location for FW image */
506#define IPC_FW_UPDATE_MBOX_ADDR 0xFFFFDFF4 /* Mailbox between ipc and scu */
507#define IPC_MAX_FW_SIZE 262144 /* 256K storage size for loading the FW image */
508#define IPC_FW_MIP_HEADER_SIZE 2048 /* Firmware MIP header size */
509/* IPC inform SCU to get ready for update process */
510#define IPC_CMD_FW_UPDATE_READY  0x10FE
511/* IPC inform SCU to go for update process */
512#define IPC_CMD_FW_UPDATE_GO     0x20FE
513/* Status code for fw update */
514#define IPC_FW_UPDATE_SUCCESS	0x444f4e45 /* Status code 'DONE' */
515#define IPC_FW_UPDATE_BADN	0x4241444E /* Status code 'BADN' */
516#define IPC_FW_TXHIGH		0x54784849 /* Status code 'IPC_FW_TXHIGH' */
517#define IPC_FW_TXLOW		0x54784c4f /* Status code 'IPC_FW_TXLOW' */
518
519struct fw_update_mailbox {
520	u32    status;
521	u32    scu_flag;
522	u32    driver_flag;
523};
524
 
 
 
 
 
 
 
525
526/**
527 *	intel_scu_ipc_fw_update	-	 Firmware update utility
528 *	@buffer: firmware buffer
529 *	@length: size of firmware buffer
530 *
531 *	This function provides an interface to load the firmware into
532 *	the SCU. Returns 0 on success or -1 on failure
533 */
534int intel_scu_ipc_fw_update(u8 *buffer, u32 length)
535{
536	void __iomem *fw_update_base;
537	struct fw_update_mailbox __iomem *mailbox = NULL;
538	int retry_cnt = 0;
539	u32 status;
 
 
 
 
 
540
541	mutex_lock(&ipclock);
542	fw_update_base = ioremap_nocache(IPC_FW_LOAD_ADDR, (128*1024));
543	if (fw_update_base == NULL) {
544		mutex_unlock(&ipclock);
545		return -ENOMEM;
546	}
547	mailbox = ioremap_nocache(IPC_FW_UPDATE_MBOX_ADDR,
548					sizeof(struct fw_update_mailbox));
549	if (mailbox == NULL) {
550		iounmap(fw_update_base);
551		mutex_unlock(&ipclock);
552		return -ENOMEM;
553	}
554
555	ipc_command(IPC_CMD_FW_UPDATE_READY);
556
557	/* Intitialize mailbox */
558	writel(0, &mailbox->status);
559	writel(0, &mailbox->scu_flag);
560	writel(0, &mailbox->driver_flag);
561
562	/* Driver copies the 2KB MIP header to SRAM at 0xFFFC0000*/
563	memcpy_toio(fw_update_base, buffer, 0x800);
564
565	/* Driver sends "FW Update" IPC command (CMD_ID 0xFE; MSG_ID 0x02).
566	* Upon receiving this command, SCU will write the 2K MIP header
567	* from 0xFFFC0000 into NAND.
568	* SCU will write a status code into the Mailbox, and then set scu_flag.
569	*/
570
571	ipc_command(IPC_CMD_FW_UPDATE_GO);
572
573	/*Driver stalls until scu_flag is set */
574	while (readl(&mailbox->scu_flag) != 1) {
575		rmb();
576		mdelay(1);
577	}
578
579	/* Driver checks Mailbox status.
580	 * If the status is 'BADN', then abort (bad NAND).
581	 * If the status is 'IPC_FW_TXLOW', then continue.
582	 */
583	while (readl(&mailbox->status) != IPC_FW_TXLOW) {
584		rmb();
585		mdelay(10);
586	}
587	mdelay(10);
588
589update_retry:
590	if (retry_cnt > 5)
591		goto update_end;
 
592
593	if (readl(&mailbox->status) != IPC_FW_TXLOW)
594		goto update_end;
595	buffer = buffer + 0x800;
596	memcpy_toio(fw_update_base, buffer, 0x20000);
597	writel(1, &mailbox->driver_flag);
598	while (readl(&mailbox->scu_flag) == 1) {
599		rmb();
600		mdelay(1);
601	}
602
603	/* check for 'BADN' */
604	if (readl(&mailbox->status) == IPC_FW_UPDATE_BADN)
605		goto update_end;
606
607	while (readl(&mailbox->status) != IPC_FW_TXHIGH) {
608		rmb();
609		mdelay(10);
610	}
611	mdelay(10);
612
613	if (readl(&mailbox->status) != IPC_FW_TXHIGH)
614		goto update_end;
615
616	buffer = buffer + 0x20000;
617	memcpy_toio(fw_update_base, buffer, 0x20000);
618	writel(0, &mailbox->driver_flag);
 
619
620	while (mailbox->scu_flag == 0) {
621		rmb();
622		mdelay(1);
 
623	}
624
625	/* check for 'BADN' */
626	if (readl(&mailbox->status) == IPC_FW_UPDATE_BADN)
627		goto update_end;
628
629	if (readl(&mailbox->status) == IPC_FW_TXLOW) {
630		++retry_cnt;
631		goto update_retry;
 
 
632	}
633
634update_end:
635	status = readl(&mailbox->status);
 
 
 
636
637	iounmap(fw_update_base);
638	iounmap(mailbox);
 
 
 
 
 
639	mutex_unlock(&ipclock);
640
641	if (status == IPC_FW_UPDATE_SUCCESS)
642		return 0;
643	return -EIO;
644}
645EXPORT_SYMBOL(intel_scu_ipc_fw_update);
646
647/*
648 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
649 * When ioc bit is set to 1, caller api must wait for interrupt handler called
650 * which in turn unlocks the caller api. Currently this is not used
651 *
652 * This is edge triggered so we need take no action to clear anything
 
653 */
654static irqreturn_t ioc(int irq, void *dev_id)
655{
656	return IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
657}
658
659/**
660 *	ipc_probe	-	probe an Intel SCU IPC
661 *	@dev: the PCI device matching
662 *	@id: entry in the match table
 
663 *
664 *	Enable and install an intel SCU IPC. This appears in the PCI space
665 *	but uses some hard coded addresses as well.
 
 
666 */
667static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
 
 
668{
669	int err;
670	resource_size_t pci_resource;
671
672	if (ipcdev.pdev)		/* We support only one SCU */
673		return -EBUSY;
674
675	ipcdev.pdev = pci_dev_get(dev);
676
677	err = pci_enable_device(dev);
678	if (err)
679		return err;
680
681	err = pci_request_regions(dev, "intel_scu_ipc");
682	if (err)
683		return err;
684
685	pci_resource = pci_resource_start(dev, 0);
686	if (!pci_resource)
687		return -ENOMEM;
688
689	if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
690		return -EBUSY;
691
692	ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR);
693	if (!ipcdev.ipc_base)
694		return -ENOMEM;
695
696	ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR);
697	if (!ipcdev.i2c_base) {
698		iounmap(ipcdev.ipc_base);
699		return -ENOMEM;
700	}
701
702	intel_scu_devices_create();
 
703
704	return 0;
705}
706
707/**
708 *	ipc_remove	-	remove a bound IPC device
709 *	@pdev: PCI device
710 *
711 *	In practice the SCU is not removable but this function is also
712 *	called for each device on a module unload or cleanup which is the
713 *	path that will get used.
714 *
715 *	Free up the mappings and release the PCI resources
716 */
717static void ipc_remove(struct pci_dev *pdev)
718{
719	free_irq(pdev->irq, &ipcdev);
720	pci_release_regions(pdev);
721	pci_dev_put(ipcdev.pdev);
722	iounmap(ipcdev.ipc_base);
723	iounmap(ipcdev.i2c_base);
724	ipcdev.pdev = NULL;
725	intel_scu_devices_destroy();
726}
727
728static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
729	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)},
730	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
731	{ 0,}
732};
733MODULE_DEVICE_TABLE(pci, pci_ids);
734
735static struct pci_driver ipc_driver = {
736	.name = "intel_scu_ipc",
737	.id_table = pci_ids,
738	.probe = ipc_probe,
739	.remove = ipc_remove,
740};
741
742
743static int __init intel_scu_ipc_init(void)
744{
745	platform = mrst_identify_cpu();
746	if (platform == 0)
747		return -ENODEV;
748	return  pci_register_driver(&ipc_driver);
749}
 
750
751static void __exit intel_scu_ipc_exit(void)
752{
753	pci_unregister_driver(&ipc_driver);
754}
755
756MODULE_AUTHOR("Sreedhara DS <sreedhara.ds@intel.com>");
757MODULE_DESCRIPTION("Intel SCU IPC driver");
758MODULE_LICENSE("GPL");
759
760module_init(intel_scu_ipc_init);
761module_exit(intel_scu_ipc_exit);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for the Intel SCU IPC mechanism
  4 *
  5 * (C) Copyright 2008-2010,2015 Intel Corporation
  6 * Author: Sreedhara DS (sreedhara.ds@intel.com)
  7 *
 
 
 
 
 
  8 * SCU running in ARC processor communicates with other entity running in IA
  9 * core through IPC mechanism which in turn messaging between IA core ad SCU.
 10 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
 11 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
 12 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
 13 * along with other APIs.
 14 */
 15
 16#include <linux/delay.h>
 17#include <linux/device.h>
 18#include <linux/errno.h>
 19#include <linux/init.h>
 
 
 
 20#include <linux/interrupt.h>
 21#include <linux/io.h>
 22#include <linux/iopoll.h>
 23#include <linux/module.h>
 24#include <linux/slab.h>
 25
 26#include <asm/intel_scu_ipc.h>
 27
 28/* IPC defines the following message types */
 29#define IPCMSG_PCNTRL         0xff /* Power controller unit read/write */
 
 
 
 
 30
 31/* Command id associated with message IPCMSG_PCNTRL */
 32#define IPC_CMD_PCNTRL_W      0 /* Register write */
 33#define IPC_CMD_PCNTRL_R      1 /* Register read */
 34#define IPC_CMD_PCNTRL_M      2 /* Register read-modify-write */
 35
 36/*
 37 * IPC register summary
 38 *
 39 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
 40 * To read or write information to the SCU, driver writes to IPC-1 memory
 41 * mapped registers. The following is the IPC mechanism
 
 42 *
 43 * 1. IA core cDMI interface claims this transaction and converts it to a
 44 *    Transaction Layer Packet (TLP) message which is sent across the cDMI.
 45 *
 46 * 2. South Complex cDMI block receives this message and writes it to
 47 *    the IPC-1 register block, causing an interrupt to the SCU
 48 *
 49 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
 50 *    message handler is called within firmware.
 51 */
 52
 
 
 53#define IPC_WWBUF_SIZE    20		/* IPC Write buffer Size */
 54#define IPC_RWBUF_SIZE    20		/* IPC Read buffer Size */
 55#define IPC_IOC	          0x100		/* IPC command register IOC bit */
 
 
 
 
 56
 57struct intel_scu_ipc_dev {
 58	struct device dev;
 59	struct resource mem;
 60	struct module *owner;
 61	int irq;
 62	void __iomem *ipc_base;
 63	struct completion cmd_complete;
 64};
 65
 66#define IPC_STATUS		0x04
 67#define IPC_STATUS_IRQ		BIT(2)
 68#define IPC_STATUS_ERR		BIT(1)
 69#define IPC_STATUS_BUSY		BIT(0)
 70
 71/*
 72 * IPC Write/Read Buffers:
 73 * 16 byte buffer for sending and receiving data to and from SCU.
 
 74 */
 75#define IPC_WRITE_BUFFER	0x80
 76#define IPC_READ_BUFFER		0x90
 77
 78/* Timeout in jiffies */
 79#define IPC_TIMEOUT		(10 * HZ)
 80
 81static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
 82static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
 83
 84static struct class intel_scu_ipc_class = {
 85	.name = "intel_scu_ipc",
 86};
 87
 88/**
 89 * intel_scu_ipc_dev_get() - Get SCU IPC instance
 90 *
 91 * The recommended new API takes SCU IPC instance as parameter and this
 92 * function can be called by driver to get the instance. This also makes
 93 * sure the driver providing the IPC functionality cannot be unloaded
 94 * while the caller has the instance.
 95 *
 96 * Call intel_scu_ipc_dev_put() to release the instance.
 97 *
 98 * Returns %NULL if SCU IPC is not currently available.
 99 */
100struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
101{
102	struct intel_scu_ipc_dev *scu = NULL;
103
104	mutex_lock(&ipclock);
105	if (ipcdev) {
106		get_device(&ipcdev->dev);
107		/*
108		 * Prevent the IPC provider from being unloaded while it
109		 * is being used.
110		 */
111		if (!try_module_get(ipcdev->owner))
112			put_device(&ipcdev->dev);
113		else
114			scu = ipcdev;
115	}
116
117	mutex_unlock(&ipclock);
118	return scu;
119}
120EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
121
122/**
123 * intel_scu_ipc_dev_put() - Put SCU IPC instance
124 * @scu: SCU IPC instance
125 *
126 * This function releases the SCU IPC instance retrieved from
127 * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
128 * unloaded.
129 */
130void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
131{
132	if (scu) {
133		module_put(scu->owner);
134		put_device(&scu->dev);
135	}
136}
137EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
138
139struct intel_scu_ipc_devres {
140	struct intel_scu_ipc_dev *scu;
141};
142
143static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
144{
145	struct intel_scu_ipc_devres *dr = res;
146	struct intel_scu_ipc_dev *scu = dr->scu;
147
148	intel_scu_ipc_dev_put(scu);
149}
150
151/**
152 * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
153 * @dev: Device requesting the SCU IPC device
154 *
155 * The recommended new API takes SCU IPC instance as parameter and this
156 * function can be called by driver to get the instance. This also makes
157 * sure the driver providing the IPC functionality cannot be unloaded
158 * while the caller has the instance.
159 *
160 * Returns %NULL if SCU IPC is not currently available.
161 */
162struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
163{
164	struct intel_scu_ipc_devres *dr;
165	struct intel_scu_ipc_dev *scu;
166
167	dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
168	if (!dr)
169		return NULL;
170
171	scu = intel_scu_ipc_dev_get();
172	if (!scu) {
173		devres_free(dr);
174		return NULL;
175	}
176
177	dr->scu = scu;
178	devres_add(dev, dr);
179
180	return scu;
181}
182EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
183
184/*
185 * Send ipc command
186 * Command Register (Write Only):
187 * A write to this register results in an interrupt to the SCU core processor
188 * Format:
189 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
190 */
191static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
192{
193	reinit_completion(&scu->cmd_complete);
194	writel(cmd | IPC_IOC, scu->ipc_base);
195}
196
197/*
198 * Write ipc data
199 * IPC Write Buffer (Write Only):
200 * 16-byte buffer for sending data associated with IPC command to
201 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
202 */
203static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
204{
205	writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
206}
207
208/*
209 * Status Register (Read Only):
210 * Driver will read this register to get the ready/busy status of the IPC
211 * block and error status of the IPC command that was just processed by SCU
212 * Format:
213 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
214 */
215static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
216{
217	return __raw_readl(scu->ipc_base + IPC_STATUS);
218}
219
220/* Read ipc byte data */
221static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
222{
223	return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
224}
225
226/* Read ipc u32 data */
227static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
228{
229	return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
230}
231
232/* Wait till scu status is busy */
233static inline int busy_loop(struct intel_scu_ipc_dev *scu)
234{
235	u8 status;
236	int err;
237
238	err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
239				 100, jiffies_to_usecs(IPC_TIMEOUT));
240	if (err)
241		return err;
242
243	return (status & IPC_STATUS_ERR) ? -EIO : 0;
244}
245
246/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
247static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
248{
249	int status;
 
250
251	wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
252
253	status = ipc_read_status(scu);
254	if (status & IPC_STATUS_BUSY)
255		return -ETIMEDOUT;
256
257	if (status & IPC_STATUS_ERR)
 
 
 
 
 
258		return -EIO;
259
260	return 0;
261}
262
263static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
264{
265	return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
266}
267
268static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
269{
270	u8 status;
271
272	if (!scu)
273		scu = ipcdev;
274	if (!scu)
275		return ERR_PTR(-ENODEV);
276
277	status = ipc_read_status(scu);
278	if (status & IPC_STATUS_BUSY) {
279		dev_dbg(&scu->dev, "device is busy\n");
280		return ERR_PTR(-EBUSY);
281	}
282
283	return scu;
284}
285
286/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
287static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
288			u32 count, u32 op, u32 id)
289{
290	int nc;
291	u32 offset = 0;
292	int err;
293	u8 cbuf[IPC_WWBUF_SIZE];
294	u32 *wbuf = (u32 *)&cbuf;
295
 
 
296	memset(cbuf, 0, sizeof(cbuf));
297
298	mutex_lock(&ipclock);
299	scu = intel_scu_ipc_get(scu);
300	if (IS_ERR(scu)) {
301		mutex_unlock(&ipclock);
302		return PTR_ERR(scu);
303	}
304
305	for (nc = 0; nc < count; nc++, offset += 2) {
306		cbuf[offset] = addr[nc];
307		cbuf[offset + 1] = addr[nc] >> 8;
308	}
309
310	if (id == IPC_CMD_PCNTRL_R) {
311		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
312			ipc_data_writel(scu, wbuf[nc], offset);
313		ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
314	} else if (id == IPC_CMD_PCNTRL_W) {
315		for (nc = 0; nc < count; nc++, offset += 1)
316			cbuf[offset] = data[nc];
317		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
318			ipc_data_writel(scu, wbuf[nc], offset);
319		ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
320	} else if (id == IPC_CMD_PCNTRL_M) {
321		cbuf[offset] = data[0];
322		cbuf[offset + 1] = data[1];
323		ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
324		ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325	}
326
327	err = intel_scu_ipc_check_status(scu);
328	if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
329		/* Workaround: values are read as 0 without memcpy_fromio */
330		memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
331		for (nc = 0; nc < count; nc++)
332			data[nc] = ipc_data_readb(scu, nc);
 
 
 
 
 
333	}
334	mutex_unlock(&ipclock);
335	return err;
336}
337
338/**
339 * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
340 * @scu: Optional SCU IPC instance
341 * @addr: Register on SCU
342 * @data: Return pointer for read byte
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343 *
344 * Read a single register. Returns %0 on success or an error code. All
345 * locking between SCU accesses is handled for the caller.
346 *
347 * This function may sleep.
348 */
349int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
350{
351	return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
 
352}
353EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
354
355/**
356 * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
357 * @scu: Optional SCU IPC instance
358 * @addr: Register on SCU
359 * @data: Byte to write
360 *
361 * Write a single register. Returns %0 on success or an error code. All
362 * locking between SCU accesses is handled for the caller.
363 *
364 * This function may sleep.
365 */
366int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
367{
368	return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
369}
370EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
371
372/**
373 * intel_scu_ipc_dev_readv() - Read a set of registers
374 * @scu: Optional SCU IPC instance
375 * @addr: Register list
376 * @data: Bytes to return
377 * @len: Length of array
 
378 *
379 * Read registers. Returns %0 on success or an error code. All locking
380 * between SCU accesses is handled for the caller.
 
 
 
 
 
 
 
 
 
 
 
381 *
382 * The largest array length permitted by the hardware is 5 items.
 
383 *
384 * This function may sleep.
385 */
386int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
387			    size_t len)
388{
389	return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
 
390}
391EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
392
393/**
394 * intel_scu_ipc_dev_writev() - Write a set of registers
395 * @scu: Optional SCU IPC instance
396 * @addr: Register list
397 * @data: Bytes to write
398 * @len: Length of array
399 *
400 * Write registers. Returns %0 on success or an error code. All locking
401 * between SCU accesses is handled for the caller.
402 *
403 * The largest array length permitted by the hardware is 5 items.
404 *
405 * This function may sleep.
406 */
407int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
408			     size_t len)
409{
410	return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
411}
412EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
413
414/**
415 * intel_scu_ipc_dev_update() - Update a register
416 * @scu: Optional SCU IPC instance
417 * @addr: Register address
418 * @data: Bits to update
419 * @mask: Mask of bits to update
 
 
420 *
421 * Read-modify-write power control unit register. The first data argument
422 * must be register value and second is mask value mask is a bitmap that
423 * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
424 * modify this bit. returns %0 on success or an error code.
425 *
426 * This function may sleep. Locking between SCU accesses is handled
427 * for the caller.
428 */
429int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
430			     u8 mask)
431{
432	u8 tmp[2] = { data, mask };
433	return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
434}
435EXPORT_SYMBOL(intel_scu_ipc_dev_update);
 
436
437/**
438 * intel_scu_ipc_dev_simple_command() - Send a simple command
439 * @scu: Optional SCU IPC instance
440 * @cmd: Command
441 * @sub: Sub type
442 *
443 * Issue a simple command to the SCU. Do not use this interface if you must
444 * then access data as any data values may be overwritten by another SCU
445 * access by the time this function returns.
 
 
446 *
447 * This function may sleep. Locking for SCU accesses is handled for the
448 * caller.
449 */
450int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
451				     int sub)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452{
453	u32 cmdval;
454	int err;
455
456	mutex_lock(&ipclock);
457	scu = intel_scu_ipc_get(scu);
458	if (IS_ERR(scu)) {
459		mutex_unlock(&ipclock);
460		return PTR_ERR(scu);
461	}
462
463	cmdval = sub << 12 | cmd;
464	ipc_command(scu, cmdval);
465	err = intel_scu_ipc_check_status(scu);
466	mutex_unlock(&ipclock);
467	if (err)
468		dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
469	return err;
470}
471EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
472
473/**
474 * intel_scu_ipc_dev_command_with_size() - Command with data
475 * @scu: Optional SCU IPC instance
476 * @cmd: Command
477 * @sub: Sub type
478 * @in: Input data
479 * @inlen: Input length in bytes
480 * @size: Input size written to the IPC command register in whatever
481 *	  units (dword, byte) the particular firmware requires. Normally
482 *	  should be the same as @inlen.
483 * @out: Output data
484 * @outlen: Output length in bytes
485 *
486 * Issue a command to the SCU which involves data transfers. Do the
487 * data copies under the lock but leave it for the caller to interpret.
488 */
489int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
490					int sub, const void *in, size_t inlen,
491					size_t size, void *out, size_t outlen)
492{
493	size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
494	size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
495	u32 cmdval, inbuf[4] = {};
496	int i, err;
497
498	if (inbuflen > 4 || outbuflen > 4)
499		return -EINVAL;
500
501	mutex_lock(&ipclock);
502	scu = intel_scu_ipc_get(scu);
503	if (IS_ERR(scu)) {
504		mutex_unlock(&ipclock);
505		return PTR_ERR(scu);
506	}
507
508	memcpy(inbuf, in, inlen);
509	for (i = 0; i < inbuflen; i++)
510		ipc_data_writel(scu, inbuf[i], 4 * i);
511
512	cmdval = (size << 16) | (sub << 12) | cmd;
513	ipc_command(scu, cmdval);
514	err = intel_scu_ipc_check_status(scu);
515
516	if (!err) {
517		u32 outbuf[4] = {};
518
519		for (i = 0; i < outbuflen; i++)
520			outbuf[i] = ipc_data_readl(scu, 4 * i);
521
522		memcpy(out, outbuf, outlen);
523	}
524
525	mutex_unlock(&ipclock);
526	if (err)
527		dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
528	return err;
529}
530EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
 
 
 
 
531
532/*
533 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
534 * When ioc bit is set to 1, caller api must wait for interrupt handler called
535 * which in turn unlocks the caller api. Currently this is not used
 
 
 
 
 
536 *
537 * This is edge triggered so we need take no action to clear anything
538 */
539static irqreturn_t ioc(int irq, void *dev_id)
540{
541	struct intel_scu_ipc_dev *scu = dev_id;
542	int status = ipc_read_status(scu);
543
544	writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
545	complete(&scu->cmd_complete);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
546
547	return IRQ_HANDLED;
 
 
 
 
548}
 
549
550static void intel_scu_ipc_release(struct device *dev)
551{
552	struct intel_scu_ipc_dev *scu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553
554	scu = container_of(dev, struct intel_scu_ipc_dev, dev);
555	if (scu->irq > 0)
556		free_irq(scu->irq, scu);
557	iounmap(scu->ipc_base);
558	release_mem_region(scu->mem.start, resource_size(&scu->mem));
559	kfree(scu);
560}
561
562/**
563 * __intel_scu_ipc_register() - Register SCU IPC device
564 * @parent: Parent device
565 * @scu_data: Data used to configure SCU IPC
566 * @owner: Module registering the SCU IPC device
567 *
568 * Call this function to register SCU IPC mechanism under @parent.
569 * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
570 * failure. The caller may use the returned instance if it needs to do
571 * SCU IPC calls itself.
572 */
573struct intel_scu_ipc_dev *
574__intel_scu_ipc_register(struct device *parent,
575			 const struct intel_scu_ipc_data *scu_data,
576			 struct module *owner)
577{
578	int err;
579	struct intel_scu_ipc_dev *scu;
580	void __iomem *ipc_base;
581
582	mutex_lock(&ipclock);
583	/* We support only one IPC */
584	if (ipcdev) {
585		err = -EBUSY;
586		goto err_unlock;
587	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588
589	scu = kzalloc(sizeof(*scu), GFP_KERNEL);
590	if (!scu) {
591		err = -ENOMEM;
592		goto err_unlock;
 
 
 
 
 
 
 
 
 
 
 
593	}
 
594
595	scu->owner = owner;
596	scu->dev.parent = parent;
597	scu->dev.class = &intel_scu_ipc_class;
598	scu->dev.release = intel_scu_ipc_release;
599
600	if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
601				"intel_scu_ipc")) {
602		err = -EBUSY;
603		goto err_free;
 
 
 
 
604	}
605
606	ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
607	if (!ipc_base) {
608		err = -ENOMEM;
609		goto err_release;
 
 
 
610	}
 
 
 
 
611
612	scu->ipc_base = ipc_base;
613	scu->mem = scu_data->mem;
614	scu->irq = scu_data->irq;
615	init_completion(&scu->cmd_complete);
616
617	if (scu->irq > 0) {
618		err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
619		if (err)
620			goto err_unmap;
621	}
622
623	/*
624	 * After this point intel_scu_ipc_release() takes care of
625	 * releasing the SCU IPC resources once refcount drops to zero.
626	 */
627	dev_set_name(&scu->dev, "intel_scu_ipc");
628	err = device_register(&scu->dev);
629	if (err) {
630		put_device(&scu->dev);
631		goto err_unlock;
632	}
633
634	/* Assign device at last */
635	ipcdev = scu;
636	mutex_unlock(&ipclock);
637
638	return scu;
639
640err_unmap:
641	iounmap(ipc_base);
642err_release:
643	release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
644err_free:
645	kfree(scu);
646err_unlock:
647	mutex_unlock(&ipclock);
648
649	return ERR_PTR(err);
 
 
650}
651EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
652
653/**
654 * intel_scu_ipc_unregister() - Unregister SCU IPC
655 * @scu: SCU IPC handle
 
656 *
657 * This unregisters the SCU IPC device and releases the acquired
658 * resources once the refcount goes to zero.
659 */
660void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
661{
662	mutex_lock(&ipclock);
663	if (!WARN_ON(!ipcdev)) {
664		ipcdev = NULL;
665		device_unregister(&scu->dev);
666	}
667	mutex_unlock(&ipclock);
668}
669EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
670
671static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
672{
673	struct intel_scu_ipc_devres *dr = res;
674	struct intel_scu_ipc_dev *scu = dr->scu;
675
676	intel_scu_ipc_unregister(scu);
677}
678
679/**
680 * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
681 * @parent: Parent device
682 * @scu_data: Data used to configure SCU IPC
683 * @owner: Module registering the SCU IPC device
684 *
685 * Call this function to register managed SCU IPC mechanism under
686 * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
687 * case of failure. The caller may use the returned instance if it needs
688 * to do SCU IPC calls itself.
689 */
690struct intel_scu_ipc_dev *
691__devm_intel_scu_ipc_register(struct device *parent,
692			      const struct intel_scu_ipc_data *scu_data,
693			      struct module *owner)
694{
695	struct intel_scu_ipc_devres *dr;
696	struct intel_scu_ipc_dev *scu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
697
698	dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
699	if (!dr)
700		return NULL;
701
702	scu = __intel_scu_ipc_register(parent, scu_data, owner);
703	if (IS_ERR(scu)) {
704		devres_free(dr);
705		return scu;
706	}
707
708	dr->scu = scu;
709	devres_add(parent, dr);
710
711	return scu;
712}
713EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
714
715static int __init intel_scu_ipc_init(void)
716{
717	return class_register(&intel_scu_ipc_class);
 
 
 
718}
719subsys_initcall(intel_scu_ipc_init);
720
721static void __exit intel_scu_ipc_exit(void)
722{
723	class_unregister(&intel_scu_ipc_class);
724}
 
 
 
 
 
 
725module_exit(intel_scu_ipc_exit);