Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2023, Intel Corporation.
  4 * Intel Visual Sensing Controller Transport Layer Linux driver
  5 */
  6
  7#include <linux/acpi.h>
  8#include <linux/cleanup.h>
  9#include <linux/crc32.h>
 10#include <linux/delay.h>
 11#include <linux/device.h>
 12#include <linux/interrupt.h>
 13#include <linux/iopoll.h>
 14#include <linux/irq.h>
 15#include <linux/irqreturn.h>
 16#include <linux/module.h>
 17#include <linux/mutex.h>
 18#include <linux/platform_device.h>
 19#include <linux/spi/spi.h>
 20#include <linux/types.h>
 21
 22#include "vsc-tp.h"
 23
 24#define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS	20
 25#define VSC_TP_ROM_BOOTUP_DELAY_MS		10
 26#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US		(500 * USEC_PER_MSEC)
 27#define VSC_TP_ROM_XFER_POLL_DELAY_US		(20 * USEC_PER_MSEC)
 28#define VSC_TP_WAIT_FW_ASSERTED_TIMEOUT		(2 * HZ)
 29#define VSC_TP_MAX_XFER_COUNT			5
 30
 31#define VSC_TP_PACKET_SYNC			0x31
 32#define VSC_TP_CRC_SIZE				sizeof(u32)
 33#define VSC_TP_MAX_MSG_SIZE			2048
 34/* SPI xfer timeout size */
 35#define VSC_TP_XFER_TIMEOUT_BYTES		700
 36#define VSC_TP_PACKET_PADDING_SIZE		1
 37#define VSC_TP_PACKET_SIZE(pkt) \
 38	(sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
 39#define VSC_TP_MAX_PACKET_SIZE \
 40	(sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
 41#define VSC_TP_MAX_XFER_SIZE \
 42	(VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
 43#define VSC_TP_NEXT_XFER_LEN(len, offset) \
 44	(len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
 45
 46struct vsc_tp_packet {
 47	__u8 sync;
 48	__u8 cmd;
 49	__le16 len;
 50	__le32 seq;
 51	__u8 buf[] __counted_by(len);
 52};
 53
 54struct vsc_tp {
 55	/* do the actual data transfer */
 56	struct spi_device *spi;
 57
 58	/* bind with mei framework */
 59	struct platform_device *pdev;
 60
 61	struct gpio_desc *wakeuphost;
 62	struct gpio_desc *resetfw;
 63	struct gpio_desc *wakeupfw;
 64
 65	/* command sequence number */
 66	u32 seq;
 67
 68	/* command buffer */
 69	void *tx_buf;
 70	void *rx_buf;
 71
 72	atomic_t assert_cnt;
 73	wait_queue_head_t xfer_wait;
 74
 75	vsc_tp_event_cb_t event_notify;
 76	void *event_notify_context;
 77
 78	/* used to protect command download */
 79	struct mutex mutex;
 80};
 81
 82/* GPIO resources */
 83static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
 84static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
 85static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
 86static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
 87
 88static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
 89	{ "wakeuphost-gpios", &wakeuphost_gpio, 1 },
 90	{ "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
 91	{ "resetfw-gpios", &resetfw_gpio, 1 },
 92	{ "wakeupfw-gpios", &wakeupfw, 1 },
 93	{}
 94};
 95
 96/* wakeup firmware and wait for response */
 97static int vsc_tp_wakeup_request(struct vsc_tp *tp)
 98{
 99	int ret;
100
101	gpiod_set_value_cansleep(tp->wakeupfw, 0);
102
103	ret = wait_event_timeout(tp->xfer_wait,
104				 atomic_read(&tp->assert_cnt) &&
105				 gpiod_get_value_cansleep(tp->wakeuphost),
106				 VSC_TP_WAIT_FW_ASSERTED_TIMEOUT);
107	if (!ret)
108		return -ETIMEDOUT;
109
110	return 0;
111}
112
113static void vsc_tp_wakeup_release(struct vsc_tp *tp)
114{
115	atomic_dec_if_positive(&tp->assert_cnt);
116
117	gpiod_set_value_cansleep(tp->wakeupfw, 1);
118}
119
120static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
121{
122	struct spi_message msg = { 0 };
123	struct spi_transfer xfer = {
124		.tx_buf = obuf,
125		.rx_buf = ibuf,
126		.len = len,
127	};
128
129	spi_message_init_with_transfers(&msg, &xfer, 1);
130
131	return spi_sync_locked(tp->spi, &msg);
132}
133
134static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
135			      void *ibuf, u16 ilen)
136{
137	int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
138	int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
139	u8 *src, *crc_src, *rx_buf = tp->rx_buf;
140	int count_down = VSC_TP_MAX_XFER_COUNT;
141	u32 recv_crc = 0, crc = ~0;
142	struct vsc_tp_packet ack;
143	u8 *dst = (u8 *)&ack;
144	bool synced = false;
145
146	do {
147		ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
148		if (ret)
149			return ret;
150		memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
151
152		if (synced) {
153			src = rx_buf;
154			src_len = next_xfer_len;
155		} else {
156			src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
157			if (!src)
158				continue;
159			synced = true;
160			src_len = next_xfer_len - (src - rx_buf);
161		}
162
163		/* traverse received data */
164		while (src_len > 0) {
165			cpy_len = min(src_len, dst_len);
166			memcpy(dst, src, cpy_len);
167			crc_src = src;
168			src += cpy_len;
169			src_len -= cpy_len;
170			dst += cpy_len;
171			dst_len -= cpy_len;
172
173			if (offset < sizeof(ack)) {
174				offset += cpy_len;
175				crc = crc32(crc, crc_src, cpy_len);
176
177				if (!src_len)
178					continue;
179
180				if (le16_to_cpu(ack.len)) {
181					dst = ibuf;
182					dst_len = min(ilen, le16_to_cpu(ack.len));
183				} else {
184					dst = (u8 *)&recv_crc;
185					dst_len = sizeof(recv_crc);
186				}
187			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
188				offset += cpy_len;
189				crc = crc32(crc, crc_src, cpy_len);
190
191				if (src_len) {
192					int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
193
194					cpy_len = min(src_len, remain);
195					offset += cpy_len;
196					crc = crc32(crc, src, cpy_len);
197					src += cpy_len;
198					src_len -= cpy_len;
199					if (src_len) {
200						dst = (u8 *)&recv_crc;
201						dst_len = sizeof(recv_crc);
202						continue;
203					}
204				}
205				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
206			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
207				offset += cpy_len;
208
209				if (src_len) {
210					/* terminate the traverse */
211					next_xfer_len = 0;
212					break;
213				}
214				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
215			}
216		}
217	} while (next_xfer_len > 0 && --count_down);
218
219	if (next_xfer_len > 0)
220		return -EAGAIN;
221
222	if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
223		dev_err(&tp->spi->dev, "recv crc or seq error\n");
224		return -EINVAL;
225	}
226
227	if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
228	    ack.cmd == VSC_TP_CMD_BUSY) {
229		dev_err(&tp->spi->dev, "recv cmd ack error\n");
230		return -EAGAIN;
231	}
232
233	return min(le16_to_cpu(ack.len), ilen);
234}
235
236/**
237 * vsc_tp_xfer - transfer data to firmware
238 * @tp: vsc_tp device handle
239 * @cmd: the command to be sent to the device
240 * @obuf: the tx buffer to be sent to the device
241 * @olen: the length of tx buffer
242 * @ibuf: the rx buffer to receive from the device
243 * @ilen: the length of rx buffer
244 * Return: the length of received data in case of success,
245 *	otherwise negative value
246 */
247int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
248		void *ibuf, size_t ilen)
249{
250	struct vsc_tp_packet *pkt = tp->tx_buf;
251	u32 crc;
252	int ret;
253
254	if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
255		return -EINVAL;
256
257	guard(mutex)(&tp->mutex);
258
259	pkt->sync = VSC_TP_PACKET_SYNC;
260	pkt->cmd = cmd;
261	pkt->len = cpu_to_le16(olen);
262	pkt->seq = cpu_to_le32(++tp->seq);
263	memcpy(pkt->buf, obuf, olen);
264
265	crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
266	memcpy(pkt->buf + olen, &crc, sizeof(crc));
267
268	ret = vsc_tp_wakeup_request(tp);
269	if (unlikely(ret))
270		dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
271	else
272		ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
273
274	vsc_tp_wakeup_release(tp);
275
276	return ret;
277}
278EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, VSC_TP);
279
280/**
281 * vsc_tp_rom_xfer - transfer data to rom code
282 * @tp: vsc_tp device handle
283 * @obuf: the data buffer to be sent to the device
284 * @ibuf: the buffer to receive data from the device
285 * @len: the length of tx buffer and rx buffer
286 * Return: 0 in case of success, negative value in case of error
287 */
288int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
289{
290	size_t words = len / sizeof(__be32);
291	int ret;
292
293	if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
294		return -EINVAL;
295
296	guard(mutex)(&tp->mutex);
297
298	/* rom xfer is big endian */
299	cpu_to_be32_array(tp->tx_buf, obuf, words);
300
301	ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
302				!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
303				VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
304				tp->wakeuphost);
305	if (ret) {
306		dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
307		return ret;
308	}
309
310	ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len);
311	if (ret)
312		return ret;
313
314	if (ibuf)
315		cpu_to_be32_array(ibuf, tp->rx_buf, words);
316
317	return ret;
318}
319
320/**
321 * vsc_tp_reset - reset vsc transport layer
322 * @tp: vsc_tp device handle
323 */
324void vsc_tp_reset(struct vsc_tp *tp)
325{
326	disable_irq(tp->spi->irq);
327
328	/* toggle reset pin */
329	gpiod_set_value_cansleep(tp->resetfw, 0);
330	msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
331	gpiod_set_value_cansleep(tp->resetfw, 1);
332
333	/* wait for ROM */
334	msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
335
336	/*
337	 * Set default host wakeup pin to non-active
338	 * to avoid unexpected host irq interrupt.
339	 */
340	gpiod_set_value_cansleep(tp->wakeupfw, 1);
341
342	atomic_set(&tp->assert_cnt, 0);
343
344	enable_irq(tp->spi->irq);
345}
346EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, VSC_TP);
347
348/**
349 * vsc_tp_need_read - check if device has data to sent
350 * @tp: vsc_tp device handle
351 * Return: true if device has data to sent, otherwise false
352 */
353bool vsc_tp_need_read(struct vsc_tp *tp)
354{
355	if (!atomic_read(&tp->assert_cnt))
356		return false;
357	if (!gpiod_get_value_cansleep(tp->wakeuphost))
358		return false;
359	if (!gpiod_get_value_cansleep(tp->wakeupfw))
360		return false;
361
362	return true;
363}
364EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP);
365
366/**
367 * vsc_tp_register_event_cb - register a callback function to receive event
368 * @tp: vsc_tp device handle
369 * @event_cb: callback function
370 * @context: execution context of event callback
371 * Return: 0 in case of success, negative value in case of error
372 */
373int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
374			    void *context)
375{
376	tp->event_notify = event_cb;
377	tp->event_notify_context = context;
378
379	return 0;
380}
381EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
382
383/**
384 * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
385 * @tp: vsc_tp device handle
386 */
387void vsc_tp_intr_synchronize(struct vsc_tp *tp)
388{
389	synchronize_irq(tp->spi->irq);
390}
391EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, VSC_TP);
392
393/**
394 * vsc_tp_intr_enable - enable vsc_tp interrupt
395 * @tp: vsc_tp device handle
396 */
397void vsc_tp_intr_enable(struct vsc_tp *tp)
398{
399	enable_irq(tp->spi->irq);
400}
401EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, VSC_TP);
402
403/**
404 * vsc_tp_intr_disable - disable vsc_tp interrupt
405 * @tp: vsc_tp device handle
406 */
407void vsc_tp_intr_disable(struct vsc_tp *tp)
408{
409	disable_irq(tp->spi->irq);
410}
411EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
412
413static irqreturn_t vsc_tp_isr(int irq, void *data)
414{
415	struct vsc_tp *tp = data;
416
417	atomic_inc(&tp->assert_cnt);
418
419	wake_up(&tp->xfer_wait);
420
421	return IRQ_WAKE_THREAD;
422}
423
424static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
425{
426	struct vsc_tp *tp = data;
427
428	if (tp->event_notify)
429		tp->event_notify(tp->event_notify_context);
430
431	return IRQ_HANDLED;
432}
433
434static int vsc_tp_match_any(struct acpi_device *adev, void *data)
435{
436	struct acpi_device **__adev = data;
437
438	*__adev = adev;
439
440	return 1;
441}
442
443static int vsc_tp_probe(struct spi_device *spi)
444{
445	struct platform_device_info pinfo = { 0 };
446	struct device *dev = &spi->dev;
447	struct platform_device *pdev;
448	struct acpi_device *adev;
449	struct vsc_tp *tp;
450	int ret;
451
452	tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
453	if (!tp)
454		return -ENOMEM;
455
456	tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
457	if (!tp->tx_buf)
458		return -ENOMEM;
459
460	tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
461	if (!tp->rx_buf)
462		return -ENOMEM;
463
464	ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
465	if (ret)
466		return ret;
467
468	tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
469	if (IS_ERR(tp->wakeuphost))
470		return PTR_ERR(tp->wakeuphost);
471
472	tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
473	if (IS_ERR(tp->resetfw))
474		return PTR_ERR(tp->resetfw);
475
476	tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
477	if (IS_ERR(tp->wakeupfw))
478		return PTR_ERR(tp->wakeupfw);
479
480	atomic_set(&tp->assert_cnt, 0);
481	init_waitqueue_head(&tp->xfer_wait);
482	tp->spi = spi;
483
484	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
485	ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
486					vsc_tp_thread_isr,
487					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
488					dev_name(dev), tp);
489	if (ret)
490		return ret;
491
492	mutex_init(&tp->mutex);
493
494	/* only one child acpi device */
495	ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
496				      vsc_tp_match_any, &adev);
497	if (!ret) {
498		ret = -ENODEV;
499		goto err_destroy_lock;
500	}
501	pinfo.fwnode = acpi_fwnode_handle(adev);
502
503	pinfo.name = "intel_vsc";
504	pinfo.data = &tp;
505	pinfo.size_data = sizeof(tp);
506	pinfo.id = PLATFORM_DEVID_NONE;
507
508	pdev = platform_device_register_full(&pinfo);
509	if (IS_ERR(pdev)) {
510		ret = PTR_ERR(pdev);
511		goto err_destroy_lock;
512	}
513
514	tp->pdev = pdev;
515	spi_set_drvdata(spi, tp);
516
517	return 0;
518
519err_destroy_lock:
520	mutex_destroy(&tp->mutex);
521
522	return ret;
523}
524
525static void vsc_tp_remove(struct spi_device *spi)
526{
527	struct vsc_tp *tp = spi_get_drvdata(spi);
528
529	platform_device_unregister(tp->pdev);
530
531	mutex_destroy(&tp->mutex);
532}
533
534static const struct acpi_device_id vsc_tp_acpi_ids[] = {
535	{ "INTC1009" }, /* Raptor Lake */
536	{ "INTC1058" }, /* Tiger Lake */
537	{ "INTC1094" }, /* Alder Lake */
538	{ "INTC10D0" }, /* Meteor Lake */
539	{}
540};
541MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
542
543static struct spi_driver vsc_tp_driver = {
544	.probe = vsc_tp_probe,
545	.remove = vsc_tp_remove,
546	.driver = {
547		.name = "vsc-tp",
548		.acpi_match_table = vsc_tp_acpi_ids,
549	},
550};
551module_spi_driver(vsc_tp_driver);
552
553MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
554MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
555MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
556MODULE_LICENSE("GPL");