Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2023, Intel Corporation.
  4 * Intel Visual Sensing Controller Transport Layer Linux driver
  5 */
  6
  7#include <linux/acpi.h>
  8#include <linux/cleanup.h>
  9#include <linux/crc32.h>
 10#include <linux/delay.h>
 11#include <linux/device.h>
 12#include <linux/interrupt.h>
 13#include <linux/iopoll.h>
 14#include <linux/irq.h>
 15#include <linux/irqreturn.h>
 16#include <linux/module.h>
 17#include <linux/mutex.h>
 18#include <linux/platform_device.h>
 19#include <linux/spi/spi.h>
 20#include <linux/types.h>
 21
 22#include "vsc-tp.h"
 23
 24#define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS	20
 25#define VSC_TP_ROM_BOOTUP_DELAY_MS		10
 26#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US		(500 * USEC_PER_MSEC)
 27#define VSC_TP_ROM_XFER_POLL_DELAY_US		(20 * USEC_PER_MSEC)
 28#define VSC_TP_WAIT_FW_POLL_TIMEOUT		(2 * HZ)
 29#define VSC_TP_WAIT_FW_POLL_DELAY_US		(20 * USEC_PER_MSEC)
 30#define VSC_TP_MAX_XFER_COUNT			5
 31
 32#define VSC_TP_PACKET_SYNC			0x31
 33#define VSC_TP_CRC_SIZE				sizeof(u32)
 34#define VSC_TP_MAX_MSG_SIZE			2048
 35/* SPI xfer timeout size */
 36#define VSC_TP_XFER_TIMEOUT_BYTES		700
 37#define VSC_TP_PACKET_PADDING_SIZE		1
 38#define VSC_TP_PACKET_SIZE(pkt) \
 39	(sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
 40#define VSC_TP_MAX_PACKET_SIZE \
 41	(sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
 42#define VSC_TP_MAX_XFER_SIZE \
 43	(VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
 44#define VSC_TP_NEXT_XFER_LEN(len, offset) \
 45	(len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
 46
 47struct vsc_tp_packet {
 48	__u8 sync;
 49	__u8 cmd;
 50	__le16 len;
 51	__le32 seq;
 52	__u8 buf[] __counted_by(len);
 53};
 54
 55struct vsc_tp {
 56	/* do the actual data transfer */
 57	struct spi_device *spi;
 58
 59	/* bind with mei framework */
 60	struct platform_device *pdev;
 61
 62	struct gpio_desc *wakeuphost;
 63	struct gpio_desc *resetfw;
 64	struct gpio_desc *wakeupfw;
 65
 66	/* command sequence number */
 67	u32 seq;
 68
 69	/* command buffer */
 70	void *tx_buf;
 71	void *rx_buf;
 72
 73	atomic_t assert_cnt;
 74	wait_queue_head_t xfer_wait;
 75
 76	vsc_tp_event_cb_t event_notify;
 77	void *event_notify_context;
 78
 79	/* used to protect command download */
 80	struct mutex mutex;
 81};
 82
 83/* GPIO resources */
 84static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
 85static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
 86static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
 87static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
 88
 89static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
 90	{ "wakeuphost-gpios", &wakeuphost_gpio, 1 },
 91	{ "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
 92	{ "resetfw-gpios", &resetfw_gpio, 1 },
 93	{ "wakeupfw-gpios", &wakeupfw, 1 },
 94	{}
 95};
 96
 97static irqreturn_t vsc_tp_isr(int irq, void *data)
 98{
 99	struct vsc_tp *tp = data;
100
101	atomic_inc(&tp->assert_cnt);
102
103	wake_up(&tp->xfer_wait);
104
105	return IRQ_WAKE_THREAD;
106}
107
108static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
109{
110	struct vsc_tp *tp = data;
111
112	if (tp->event_notify)
113		tp->event_notify(tp->event_notify_context);
114
115	return IRQ_HANDLED;
116}
117
118/* wakeup firmware and wait for response */
119static int vsc_tp_wakeup_request(struct vsc_tp *tp)
120{
121	int ret;
122
123	gpiod_set_value_cansleep(tp->wakeupfw, 0);
124
125	ret = wait_event_timeout(tp->xfer_wait,
126				 atomic_read(&tp->assert_cnt),
127				 VSC_TP_WAIT_FW_POLL_TIMEOUT);
128	if (!ret)
129		return -ETIMEDOUT;
130
131	return read_poll_timeout(gpiod_get_value_cansleep, ret, ret,
132				 VSC_TP_WAIT_FW_POLL_DELAY_US,
133				 VSC_TP_WAIT_FW_POLL_TIMEOUT, false,
134				 tp->wakeuphost);
135}
136
137static void vsc_tp_wakeup_release(struct vsc_tp *tp)
138{
139	atomic_dec_if_positive(&tp->assert_cnt);
140
141	gpiod_set_value_cansleep(tp->wakeupfw, 1);
142}
143
144static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
145{
146	struct spi_message msg = { 0 };
147	struct spi_transfer xfer = {
148		.tx_buf = obuf,
149		.rx_buf = ibuf,
150		.len = len,
151	};
152
153	spi_message_init_with_transfers(&msg, &xfer, 1);
154
155	return spi_sync_locked(tp->spi, &msg);
156}
157
158static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
159			      void *ibuf, u16 ilen)
160{
161	int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
162	int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
163	u8 *src, *crc_src, *rx_buf = tp->rx_buf;
164	int count_down = VSC_TP_MAX_XFER_COUNT;
165	u32 recv_crc = 0, crc = ~0;
166	struct vsc_tp_packet ack;
167	u8 *dst = (u8 *)&ack;
168	bool synced = false;
169
170	do {
171		ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
172		if (ret)
173			return ret;
174		memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
175
176		if (synced) {
177			src = rx_buf;
178			src_len = next_xfer_len;
179		} else {
180			src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
181			if (!src)
182				continue;
183			synced = true;
184			src_len = next_xfer_len - (src - rx_buf);
185		}
186
187		/* traverse received data */
188		while (src_len > 0) {
189			cpy_len = min(src_len, dst_len);
190			memcpy(dst, src, cpy_len);
191			crc_src = src;
192			src += cpy_len;
193			src_len -= cpy_len;
194			dst += cpy_len;
195			dst_len -= cpy_len;
196
197			if (offset < sizeof(ack)) {
198				offset += cpy_len;
199				crc = crc32(crc, crc_src, cpy_len);
200
201				if (!src_len)
202					continue;
203
204				if (le16_to_cpu(ack.len)) {
205					dst = ibuf;
206					dst_len = min(ilen, le16_to_cpu(ack.len));
207				} else {
208					dst = (u8 *)&recv_crc;
209					dst_len = sizeof(recv_crc);
210				}
211			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
212				offset += cpy_len;
213				crc = crc32(crc, crc_src, cpy_len);
214
215				if (src_len) {
216					int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
217
218					cpy_len = min(src_len, remain);
219					offset += cpy_len;
220					crc = crc32(crc, src, cpy_len);
221					src += cpy_len;
222					src_len -= cpy_len;
223					if (src_len) {
224						dst = (u8 *)&recv_crc;
225						dst_len = sizeof(recv_crc);
226						continue;
227					}
228				}
229				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
230			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
231				offset += cpy_len;
232
233				if (src_len) {
234					/* terminate the traverse */
235					next_xfer_len = 0;
236					break;
237				}
238				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
239			}
240		}
241	} while (next_xfer_len > 0 && --count_down);
242
243	if (next_xfer_len > 0)
244		return -EAGAIN;
245
246	if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
247		dev_err(&tp->spi->dev, "recv crc or seq error\n");
248		return -EINVAL;
249	}
250
251	if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
252	    ack.cmd == VSC_TP_CMD_BUSY) {
253		dev_err(&tp->spi->dev, "recv cmd ack error\n");
254		return -EAGAIN;
255	}
256
257	return min(le16_to_cpu(ack.len), ilen);
258}
259
260/**
261 * vsc_tp_xfer - transfer data to firmware
262 * @tp: vsc_tp device handle
263 * @cmd: the command to be sent to the device
264 * @obuf: the tx buffer to be sent to the device
265 * @olen: the length of tx buffer
266 * @ibuf: the rx buffer to receive from the device
267 * @ilen: the length of rx buffer
268 * Return: the length of received data in case of success,
269 *	otherwise negative value
270 */
271int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
272		void *ibuf, size_t ilen)
273{
274	struct vsc_tp_packet *pkt = tp->tx_buf;
275	u32 crc;
276	int ret;
277
278	if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
279		return -EINVAL;
280
281	guard(mutex)(&tp->mutex);
282
283	pkt->sync = VSC_TP_PACKET_SYNC;
284	pkt->cmd = cmd;
285	pkt->len = cpu_to_le16(olen);
286	pkt->seq = cpu_to_le32(++tp->seq);
287	memcpy(pkt->buf, obuf, olen);
288
289	crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
290	memcpy(pkt->buf + olen, &crc, sizeof(crc));
291
292	ret = vsc_tp_wakeup_request(tp);
293	if (unlikely(ret))
294		dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
295	else
296		ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
297
298	vsc_tp_wakeup_release(tp);
299
300	return ret;
301}
302EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, "VSC_TP");
303
304/**
305 * vsc_tp_rom_xfer - transfer data to rom code
306 * @tp: vsc_tp device handle
307 * @obuf: the data buffer to be sent to the device
308 * @ibuf: the buffer to receive data from the device
309 * @len: the length of tx buffer and rx buffer
310 * Return: 0 in case of success, negative value in case of error
311 */
312int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
313{
314	size_t words = len / sizeof(__be32);
315	int ret;
316
317	if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
318		return -EINVAL;
319
320	guard(mutex)(&tp->mutex);
321
322	/* rom xfer is big endian */
323	cpu_to_be32_array(tp->tx_buf, obuf, words);
324
325	ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
326				!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
327				VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
328				tp->wakeuphost);
329	if (ret) {
330		dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
331		return ret;
332	}
333
334	ret = vsc_tp_dev_xfer(tp, tp->tx_buf, ibuf ? tp->rx_buf : NULL, len);
335	if (ret)
336		return ret;
337
338	if (ibuf)
339		be32_to_cpu_array(ibuf, tp->rx_buf, words);
340
341	return ret;
342}
343
344/**
345 * vsc_tp_reset - reset vsc transport layer
346 * @tp: vsc_tp device handle
347 */
348void vsc_tp_reset(struct vsc_tp *tp)
349{
350	disable_irq(tp->spi->irq);
351
352	/* toggle reset pin */
353	gpiod_set_value_cansleep(tp->resetfw, 0);
354	msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
355	gpiod_set_value_cansleep(tp->resetfw, 1);
356
357	/* wait for ROM */
358	msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
359
360	/*
361	 * Set default host wakeup pin to non-active
362	 * to avoid unexpected host irq interrupt.
363	 */
364	gpiod_set_value_cansleep(tp->wakeupfw, 1);
365
366	atomic_set(&tp->assert_cnt, 0);
367}
368EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, "VSC_TP");
369
370/**
371 * vsc_tp_need_read - check if device has data to sent
372 * @tp: vsc_tp device handle
373 * Return: true if device has data to sent, otherwise false
374 */
375bool vsc_tp_need_read(struct vsc_tp *tp)
376{
377	if (!atomic_read(&tp->assert_cnt))
378		return false;
379	if (!gpiod_get_value_cansleep(tp->wakeuphost))
380		return false;
381	if (!gpiod_get_value_cansleep(tp->wakeupfw))
382		return false;
383
384	return true;
385}
386EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, "VSC_TP");
387
388/**
389 * vsc_tp_register_event_cb - register a callback function to receive event
390 * @tp: vsc_tp device handle
391 * @event_cb: callback function
392 * @context: execution context of event callback
393 * Return: 0 in case of success, negative value in case of error
394 */
395int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
396			    void *context)
397{
398	tp->event_notify = event_cb;
399	tp->event_notify_context = context;
400
401	return 0;
402}
403EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, "VSC_TP");
404
405/**
406 * vsc_tp_request_irq - request irq for vsc_tp device
407 * @tp: vsc_tp device handle
408 */
409int vsc_tp_request_irq(struct vsc_tp *tp)
410{
411	struct spi_device *spi = tp->spi;
412	struct device *dev = &spi->dev;
413	int ret;
414
415	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
416	ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
417				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
418				   dev_name(dev), tp);
419	if (ret)
420		return ret;
421
422	return 0;
423}
424EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, "VSC_TP");
425
426/**
427 * vsc_tp_free_irq - free irq for vsc_tp device
428 * @tp: vsc_tp device handle
429 */
430void vsc_tp_free_irq(struct vsc_tp *tp)
431{
432	free_irq(tp->spi->irq, tp);
433}
434EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, "VSC_TP");
435
436/**
437 * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
438 * @tp: vsc_tp device handle
439 */
440void vsc_tp_intr_synchronize(struct vsc_tp *tp)
441{
442	synchronize_irq(tp->spi->irq);
443}
444EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, "VSC_TP");
445
446/**
447 * vsc_tp_intr_enable - enable vsc_tp interrupt
448 * @tp: vsc_tp device handle
449 */
450void vsc_tp_intr_enable(struct vsc_tp *tp)
451{
452	enable_irq(tp->spi->irq);
453}
454EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, "VSC_TP");
455
456/**
457 * vsc_tp_intr_disable - disable vsc_tp interrupt
458 * @tp: vsc_tp device handle
459 */
460void vsc_tp_intr_disable(struct vsc_tp *tp)
461{
462	disable_irq(tp->spi->irq);
463}
464EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, "VSC_TP");
465
466static int vsc_tp_match_any(struct acpi_device *adev, void *data)
467{
468	struct acpi_device **__adev = data;
469
470	*__adev = adev;
471
472	return 1;
473}
474
475static int vsc_tp_probe(struct spi_device *spi)
476{
477	struct vsc_tp *tp;
478	struct platform_device_info pinfo = {
479		.name = "intel_vsc",
480		.data = &tp,
481		.size_data = sizeof(tp),
482		.id = PLATFORM_DEVID_NONE,
483	};
484	struct device *dev = &spi->dev;
485	struct platform_device *pdev;
486	struct acpi_device *adev;
487	int ret;
488
489	tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
490	if (!tp)
491		return -ENOMEM;
492
493	tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
494	if (!tp->tx_buf)
495		return -ENOMEM;
496
497	tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
498	if (!tp->rx_buf)
499		return -ENOMEM;
500
501	ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
502	if (ret)
503		return ret;
504
505	tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN);
506	if (IS_ERR(tp->wakeuphost))
507		return PTR_ERR(tp->wakeuphost);
508
509	tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
510	if (IS_ERR(tp->resetfw))
511		return PTR_ERR(tp->resetfw);
512
513	tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
514	if (IS_ERR(tp->wakeupfw))
515		return PTR_ERR(tp->wakeupfw);
516
517	atomic_set(&tp->assert_cnt, 0);
518	init_waitqueue_head(&tp->xfer_wait);
519	tp->spi = spi;
520
521	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
522	ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
523				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
524				   dev_name(dev), tp);
525	if (ret)
526		return ret;
527
528	mutex_init(&tp->mutex);
529
530	/* only one child acpi device */
531	ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
532				      vsc_tp_match_any, &adev);
533	if (!ret) {
534		ret = -ENODEV;
535		goto err_destroy_lock;
536	}
537
538	pinfo.fwnode = acpi_fwnode_handle(adev);
539	pdev = platform_device_register_full(&pinfo);
540	if (IS_ERR(pdev)) {
541		ret = PTR_ERR(pdev);
542		goto err_destroy_lock;
543	}
544
545	tp->pdev = pdev;
546	spi_set_drvdata(spi, tp);
547
548	return 0;
549
550err_destroy_lock:
551	mutex_destroy(&tp->mutex);
552
553	free_irq(spi->irq, tp);
554
555	return ret;
556}
557
558static void vsc_tp_remove(struct spi_device *spi)
559{
560	struct vsc_tp *tp = spi_get_drvdata(spi);
561
562	platform_device_unregister(tp->pdev);
563
564	mutex_destroy(&tp->mutex);
565
566	free_irq(spi->irq, tp);
567}
568
569static void vsc_tp_shutdown(struct spi_device *spi)
570{
571	struct vsc_tp *tp = spi_get_drvdata(spi);
572
573	platform_device_unregister(tp->pdev);
574
575	mutex_destroy(&tp->mutex);
576
577	vsc_tp_reset(tp);
578
579	free_irq(spi->irq, tp);
580}
581
582static const struct acpi_device_id vsc_tp_acpi_ids[] = {
583	{ "INTC1009" }, /* Raptor Lake */
584	{ "INTC1058" }, /* Tiger Lake */
585	{ "INTC1094" }, /* Alder Lake */
586	{ "INTC10D0" }, /* Meteor Lake */
587	{}
588};
589MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
590
591static struct spi_driver vsc_tp_driver = {
592	.probe = vsc_tp_probe,
593	.remove = vsc_tp_remove,
594	.shutdown = vsc_tp_shutdown,
595	.driver = {
596		.name = "vsc-tp",
597		.acpi_match_table = vsc_tp_acpi_ids,
598	},
599};
600module_spi_driver(vsc_tp_driver);
601
602MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
603MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
604MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
605MODULE_LICENSE("GPL");