Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (c) 2018 MediaTek Inc.
  3
  4/*
  5 * Bluetooth support for MediaTek serial devices
  6 *
  7 * Author: Sean Wang <sean.wang@mediatek.com>
  8 *
  9 */
 10
 11#include <asm/unaligned.h>
 12#include <linux/atomic.h>
 13#include <linux/clk.h>
 14#include <linux/firmware.h>
 15#include <linux/gpio/consumer.h>
 16#include <linux/iopoll.h>
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/of.h>
 20#include <linux/pinctrl/consumer.h>
 21#include <linux/pm_runtime.h>
 22#include <linux/regulator/consumer.h>
 23#include <linux/serdev.h>
 24#include <linux/skbuff.h>
 
 25
 26#include <net/bluetooth/bluetooth.h>
 27#include <net/bluetooth/hci_core.h>
 28
 29#include "h4_recv.h"
 30#include "btmtk.h"
 31
 32#define VERSION "0.2"
 33
 34#define MTK_STP_TLR_SIZE	2
 35
 36#define BTMTKUART_TX_STATE_ACTIVE	1
 37#define BTMTKUART_TX_STATE_WAKEUP	2
 38#define BTMTKUART_TX_WAIT_VND_EVT	3
 39#define BTMTKUART_REQUIRED_WAKEUP	4
 40
 41#define BTMTKUART_FLAG_STANDALONE_HW	 BIT(0)
 42
 43struct mtk_stp_hdr {
 44	u8	prefix;
 45	__be16	dlen;
 46	u8	cs;
 47} __packed;
 48
 49struct btmtkuart_data {
 50	unsigned int flags;
 51	const char *fwname;
 52};
 53
 54struct btmtkuart_dev {
 55	struct hci_dev *hdev;
 56	struct serdev_device *serdev;
 57
 58	struct clk *clk;
 59	struct clk *osc;
 60	struct regulator *vcc;
 61	struct gpio_desc *reset;
 62	struct gpio_desc *boot;
 63	struct pinctrl *pinctrl;
 64	struct pinctrl_state *pins_runtime;
 65	struct pinctrl_state *pins_boot;
 66	speed_t	desired_speed;
 67	speed_t	curr_speed;
 68
 69	struct work_struct tx_work;
 70	unsigned long tx_state;
 71	struct sk_buff_head txq;
 72
 73	struct sk_buff *rx_skb;
 74	struct sk_buff *evt_skb;
 75
 76	u8	stp_pad[6];
 77	u8	stp_cursor;
 78	u16	stp_dlen;
 79
 80	const struct btmtkuart_data *data;
 81};
 82
 83#define btmtkuart_is_standalone(bdev)	\
 84	((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
 85#define btmtkuart_is_builtin_soc(bdev)	\
 86	!((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
 87
 88static int mtk_hci_wmt_sync(struct hci_dev *hdev,
 89			    struct btmtk_hci_wmt_params *wmt_params)
 90{
 91	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 92	struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
 93	u32 hlen, status = BTMTK_WMT_INVALID;
 94	struct btmtk_hci_wmt_evt *wmt_evt;
 95	struct btmtk_hci_wmt_cmd *wc;
 96	struct btmtk_wmt_hdr *hdr;
 97	int err;
 98
 99	/* Send the WMT command and wait until the WMT event returns */
100	hlen = sizeof(*hdr) + wmt_params->dlen;
101	if (hlen > 255) {
102		err = -EINVAL;
103		goto err_free_skb;
104	}
105
106	wc = kzalloc(hlen, GFP_KERNEL);
107	if (!wc) {
108		err = -ENOMEM;
109		goto err_free_skb;
110	}
111
112	hdr = &wc->hdr;
113	hdr->dir = 1;
114	hdr->op = wmt_params->op;
115	hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
116	hdr->flag = wmt_params->flag;
117	memcpy(wc->data, wmt_params->data, wmt_params->dlen);
118
119	set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
120
121	err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
122	if (err < 0) {
123		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
124		goto err_free_wc;
125	}
126
127	/* The vendor specific WMT commands are all answered by a vendor
128	 * specific event and will not have the Command Status or Command
129	 * Complete as with usual HCI command flow control.
130	 *
131	 * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
132	 * state to be cleared. The driver specific event receive routine
133	 * will clear that state and with that indicate completion of the
134	 * WMT command.
135	 */
136	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
137				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
138	if (err == -EINTR) {
139		bt_dev_err(hdev, "Execution of wmt command interrupted");
140		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
141		goto err_free_wc;
142	}
143
144	if (err) {
145		bt_dev_err(hdev, "Execution of wmt command timed out");
146		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
147		err = -ETIMEDOUT;
148		goto err_free_wc;
149	}
150
151	/* Parse and handle the return WMT event */
152	wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
153	if (wmt_evt->whdr.op != hdr->op) {
154		bt_dev_err(hdev, "Wrong op received %d expected %d",
155			   wmt_evt->whdr.op, hdr->op);
156		err = -EIO;
157		goto err_free_wc;
158	}
159
160	switch (wmt_evt->whdr.op) {
161	case BTMTK_WMT_SEMAPHORE:
162		if (wmt_evt->whdr.flag == 2)
163			status = BTMTK_WMT_PATCH_UNDONE;
164		else
165			status = BTMTK_WMT_PATCH_DONE;
166		break;
167	case BTMTK_WMT_FUNC_CTRL:
168		wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
169		if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
170			status = BTMTK_WMT_ON_DONE;
171		else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
172			status = BTMTK_WMT_ON_PROGRESS;
173		else
174			status = BTMTK_WMT_ON_UNDONE;
175		break;
176	}
177
178	if (wmt_params->status)
179		*wmt_params->status = status;
180
181err_free_wc:
182	kfree(wc);
183err_free_skb:
184	kfree_skb(bdev->evt_skb);
185	bdev->evt_skb = NULL;
186
187	return err;
188}
189
190static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
191{
192	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
193	struct hci_event_hdr *hdr = (void *)skb->data;
194	int err;
195
196	/* When someone waits for the WMT event, the skb is being cloned
197	 * and being processed the events from there then.
198	 */
199	if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
200		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
201		if (!bdev->evt_skb) {
202			err = -ENOMEM;
203			goto err_out;
204		}
205	}
206
207	err = hci_recv_frame(hdev, skb);
208	if (err < 0)
209		goto err_free_skb;
210
211	if (hdr->evt == HCI_EV_WMT) {
212		if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
213				       &bdev->tx_state)) {
214			/* Barrier to sync with other CPUs */
215			smp_mb__after_atomic();
216			wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
217		}
218	}
219
220	return 0;
221
222err_free_skb:
223	kfree_skb(bdev->evt_skb);
224	bdev->evt_skb = NULL;
225
226err_out:
227	return err;
228}
229
230static const struct h4_recv_pkt mtk_recv_pkts[] = {
231	{ H4_RECV_ACL,      .recv = hci_recv_frame },
232	{ H4_RECV_SCO,      .recv = hci_recv_frame },
233	{ H4_RECV_EVENT,    .recv = btmtkuart_recv_event },
234};
235
236static void btmtkuart_tx_work(struct work_struct *work)
237{
238	struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
239						   tx_work);
240	struct serdev_device *serdev = bdev->serdev;
241	struct hci_dev *hdev = bdev->hdev;
242
243	while (1) {
244		clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
245
246		while (1) {
247			struct sk_buff *skb = skb_dequeue(&bdev->txq);
248			int len;
249
250			if (!skb)
251				break;
252
253			len = serdev_device_write_buf(serdev, skb->data,
254						      skb->len);
255			hdev->stat.byte_tx += len;
256
257			skb_pull(skb, len);
258			if (skb->len > 0) {
259				skb_queue_head(&bdev->txq, skb);
260				break;
261			}
262
263			switch (hci_skb_pkt_type(skb)) {
264			case HCI_COMMAND_PKT:
265				hdev->stat.cmd_tx++;
266				break;
267			case HCI_ACLDATA_PKT:
268				hdev->stat.acl_tx++;
269				break;
270			case HCI_SCODATA_PKT:
271				hdev->stat.sco_tx++;
272				break;
273			}
274
275			kfree_skb(skb);
276		}
277
278		if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
279			break;
280	}
281
282	clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
283}
284
285static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
286{
287	if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
288		set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
289
290	schedule_work(&bdev->tx_work);
291}
292
293static const unsigned char *
294mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
295	      int *sz_h4)
296{
297	struct mtk_stp_hdr *shdr;
298
299	/* The cursor is reset when all the data of STP is consumed out */
300	if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
301		bdev->stp_cursor = 0;
302
303	/* Filling pad until all STP info is obtained */
304	while (bdev->stp_cursor < 6 && count > 0) {
305		bdev->stp_pad[bdev->stp_cursor] = *data;
306		bdev->stp_cursor++;
307		data++;
308		count--;
309	}
310
311	/* Retrieve STP info and have a sanity check */
312	if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
313		shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
314		bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
315
316		/* Resync STP when unexpected data is being read */
317		if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
318			bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
319				   shdr->prefix, bdev->stp_dlen);
320			bdev->stp_cursor = 2;
321			bdev->stp_dlen = 0;
322		}
323	}
324
325	/* Directly quit when there's no data found for H4 can process */
326	if (count <= 0)
327		return NULL;
328
329	/* Tranlate to how much the size of data H4 can handle so far */
330	*sz_h4 = min_t(int, count, bdev->stp_dlen);
331
332	/* Update the remaining size of STP packet */
333	bdev->stp_dlen -= *sz_h4;
334
335	/* Data points to STP payload which can be handled by H4 */
336	return data;
337}
338
339static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
340{
341	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
342	const unsigned char *p_left = data, *p_h4;
343	int sz_left = count, sz_h4, adv;
344	int err;
345
346	while (sz_left > 0) {
347		/*  The serial data received from MT7622 BT controller is
348		 *  at all time padded around with the STP header and tailer.
349		 *
350		 *  A full STP packet is looking like
351		 *   -----------------------------------
352		 *  | STP header  |  H:4   | STP tailer |
353		 *   -----------------------------------
354		 *  but it doesn't guarantee to contain a full H:4 packet which
355		 *  means that it's possible for multiple STP packets forms a
356		 *  full H:4 packet that means extra STP header + length doesn't
357		 *  indicate a full H:4 frame, things can fragment. Whose length
358		 *  recorded in STP header just shows up the most length the
359		 *  H:4 engine can handle currently.
360		 */
361
362		p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
363		if (!p_h4)
364			break;
365
366		adv = p_h4 - p_left;
367		sz_left -= adv;
368		p_left += adv;
369
370		bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
371					   sz_h4, mtk_recv_pkts,
372					   ARRAY_SIZE(mtk_recv_pkts));
373		if (IS_ERR(bdev->rx_skb)) {
374			err = PTR_ERR(bdev->rx_skb);
375			bt_dev_err(bdev->hdev,
376				   "Frame reassembly failed (%d)", err);
377			bdev->rx_skb = NULL;
378			return;
379		}
380
381		sz_left -= sz_h4;
382		p_left += sz_h4;
383	}
384}
385
386static ssize_t btmtkuart_receive_buf(struct serdev_device *serdev,
387				     const u8 *data, size_t count)
388{
389	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
390
391	btmtkuart_recv(bdev->hdev, data, count);
392
393	bdev->hdev->stat.byte_rx += count;
394
395	return count;
396}
397
398static void btmtkuart_write_wakeup(struct serdev_device *serdev)
399{
400	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
401
402	btmtkuart_tx_wakeup(bdev);
403}
404
405static const struct serdev_device_ops btmtkuart_client_ops = {
406	.receive_buf = btmtkuart_receive_buf,
407	.write_wakeup = btmtkuart_write_wakeup,
408};
409
410static int btmtkuart_open(struct hci_dev *hdev)
411{
412	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
413	struct device *dev;
414	int err;
415
416	err = serdev_device_open(bdev->serdev);
417	if (err) {
418		bt_dev_err(hdev, "Unable to open UART device %s",
419			   dev_name(&bdev->serdev->dev));
420		goto err_open;
421	}
422
423	if (btmtkuart_is_standalone(bdev)) {
424		if (bdev->curr_speed != bdev->desired_speed)
425			err = serdev_device_set_baudrate(bdev->serdev,
426							 115200);
427		else
428			err = serdev_device_set_baudrate(bdev->serdev,
429							 bdev->desired_speed);
430
431		if (err < 0) {
432			bt_dev_err(hdev, "Unable to set baudrate UART device %s",
433				   dev_name(&bdev->serdev->dev));
434			goto  err_serdev_close;
435		}
436
437		serdev_device_set_flow_control(bdev->serdev, false);
438	}
439
440	bdev->stp_cursor = 2;
441	bdev->stp_dlen = 0;
442
443	dev = &bdev->serdev->dev;
444
445	/* Enable the power domain and clock the device requires */
446	pm_runtime_enable(dev);
447	err = pm_runtime_resume_and_get(dev);
448	if (err < 0)
449		goto err_disable_rpm;
450
451	err = clk_prepare_enable(bdev->clk);
452	if (err < 0)
453		goto err_put_rpm;
454
455	return 0;
456
457err_put_rpm:
458	pm_runtime_put_sync(dev);
459err_disable_rpm:
460	pm_runtime_disable(dev);
461err_serdev_close:
462	serdev_device_close(bdev->serdev);
463err_open:
464	return err;
465}
466
467static int btmtkuart_close(struct hci_dev *hdev)
468{
469	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
470	struct device *dev = &bdev->serdev->dev;
471
472	/* Shutdown the clock and power domain the device requires */
473	clk_disable_unprepare(bdev->clk);
474	pm_runtime_put_sync(dev);
475	pm_runtime_disable(dev);
476
477	serdev_device_close(bdev->serdev);
478
479	return 0;
480}
481
482static int btmtkuart_flush(struct hci_dev *hdev)
483{
484	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
485
486	/* Flush any pending characters */
487	serdev_device_write_flush(bdev->serdev);
488	skb_queue_purge(&bdev->txq);
489
490	cancel_work_sync(&bdev->tx_work);
491
492	kfree_skb(bdev->rx_skb);
493	bdev->rx_skb = NULL;
494
495	bdev->stp_cursor = 2;
496	bdev->stp_dlen = 0;
497
498	return 0;
499}
500
501static int btmtkuart_func_query(struct hci_dev *hdev)
502{
503	struct btmtk_hci_wmt_params wmt_params;
504	int status, err;
505	u8 param = 0;
506
507	/* Query whether the function is enabled */
508	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
509	wmt_params.flag = 4;
510	wmt_params.dlen = sizeof(param);
511	wmt_params.data = &param;
512	wmt_params.status = &status;
513
514	err = mtk_hci_wmt_sync(hdev, &wmt_params);
515	if (err < 0) {
516		bt_dev_err(hdev, "Failed to query function status (%d)", err);
517		return err;
518	}
519
520	return status;
521}
522
523static int btmtkuart_change_baudrate(struct hci_dev *hdev)
524{
525	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
526	struct btmtk_hci_wmt_params wmt_params;
527	__le32 baudrate;
528	u8 param;
529	int err;
530
531	/* Indicate the device to enter the probe state the host is
532	 * ready to change a new baudrate.
533	 */
534	baudrate = cpu_to_le32(bdev->desired_speed);
535	wmt_params.op = BTMTK_WMT_HIF;
536	wmt_params.flag = 1;
537	wmt_params.dlen = 4;
538	wmt_params.data = &baudrate;
539	wmt_params.status = NULL;
540
541	err = mtk_hci_wmt_sync(hdev, &wmt_params);
542	if (err < 0) {
543		bt_dev_err(hdev, "Failed to device baudrate (%d)", err);
544		return err;
545	}
546
547	err = serdev_device_set_baudrate(bdev->serdev,
548					 bdev->desired_speed);
549	if (err < 0) {
550		bt_dev_err(hdev, "Failed to set up host baudrate (%d)",
551			   err);
552		return err;
553	}
554
555	serdev_device_set_flow_control(bdev->serdev, false);
556
557	/* Send a dummy byte 0xff to activate the new baudrate */
558	param = 0xff;
559	err = serdev_device_write_buf(bdev->serdev, &param, sizeof(param));
560	if (err < 0 || err < sizeof(param))
561		return err;
562
563	serdev_device_wait_until_sent(bdev->serdev, 0);
564
565	/* Wait some time for the device changing baudrate done */
566	usleep_range(20000, 22000);
567
568	/* Test the new baudrate */
569	wmt_params.op = BTMTK_WMT_TEST;
570	wmt_params.flag = 7;
571	wmt_params.dlen = 0;
572	wmt_params.data = NULL;
573	wmt_params.status = NULL;
574
575	err = mtk_hci_wmt_sync(hdev, &wmt_params);
576	if (err < 0) {
577		bt_dev_err(hdev, "Failed to test new baudrate (%d)",
578			   err);
579		return err;
580	}
581
582	bdev->curr_speed = bdev->desired_speed;
583
584	return 0;
585}
586
587static int btmtkuart_setup(struct hci_dev *hdev)
588{
589	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
590	struct btmtk_hci_wmt_params wmt_params;
591	ktime_t calltime, delta, rettime;
592	struct btmtk_tci_sleep tci_sleep;
593	unsigned long long duration;
594	struct sk_buff *skb;
595	int err, status;
596	u8 param = 0x1;
597
598	calltime = ktime_get();
599
600	/* Wakeup MCUSYS is required for certain devices before we start to
601	 * do any setups.
602	 */
603	if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
604		wmt_params.op = BTMTK_WMT_WAKEUP;
605		wmt_params.flag = 3;
606		wmt_params.dlen = 0;
607		wmt_params.data = NULL;
608		wmt_params.status = NULL;
609
610		err = mtk_hci_wmt_sync(hdev, &wmt_params);
611		if (err < 0) {
612			bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err);
613			return err;
614		}
615
616		clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
617	}
618
619	if (btmtkuart_is_standalone(bdev))
620		btmtkuart_change_baudrate(hdev);
621
622	/* Query whether the firmware is already download */
623	wmt_params.op = BTMTK_WMT_SEMAPHORE;
624	wmt_params.flag = 1;
625	wmt_params.dlen = 0;
626	wmt_params.data = NULL;
627	wmt_params.status = &status;
628
629	err = mtk_hci_wmt_sync(hdev, &wmt_params);
630	if (err < 0) {
631		bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
632		return err;
633	}
634
635	if (status == BTMTK_WMT_PATCH_DONE) {
636		bt_dev_info(hdev, "Firmware already downloaded");
637		goto ignore_setup_fw;
638	}
639
640	/* Setup a firmware which the device definitely requires */
641	err = btmtk_setup_firmware(hdev, bdev->data->fwname, mtk_hci_wmt_sync);
642	if (err < 0)
643		return err;
644
645ignore_setup_fw:
646	/* Query whether the device is already enabled */
647	err = readx_poll_timeout(btmtkuart_func_query, hdev, status,
648				 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
649				 2000, 5000000);
650	/* -ETIMEDOUT happens */
651	if (err < 0)
652		return err;
653
654	/* The other errors happen in btusb_mtk_func_query */
655	if (status < 0)
656		return status;
657
658	if (status == BTMTK_WMT_ON_DONE) {
659		bt_dev_info(hdev, "function already on");
660		goto ignore_func_on;
661	}
662
663	/* Enable Bluetooth protocol */
664	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
665	wmt_params.flag = 0;
666	wmt_params.dlen = sizeof(param);
667	wmt_params.data = &param;
668	wmt_params.status = NULL;
669
670	err = mtk_hci_wmt_sync(hdev, &wmt_params);
671	if (err < 0) {
672		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
673		return err;
674	}
675
676ignore_func_on:
677	/* Apply the low power environment setup */
678	tci_sleep.mode = 0x5;
679	tci_sleep.duration = cpu_to_le16(0x640);
680	tci_sleep.host_duration = cpu_to_le16(0x640);
681	tci_sleep.host_wakeup_pin = 0;
682	tci_sleep.time_compensation = 0;
683
684	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
685			     HCI_INIT_TIMEOUT);
686	if (IS_ERR(skb)) {
687		err = PTR_ERR(skb);
688		bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
689		return err;
690	}
691	kfree_skb(skb);
692
693	rettime = ktime_get();
694	delta = ktime_sub(rettime, calltime);
695	duration = (unsigned long long)ktime_to_ns(delta) >> 10;
696
697	bt_dev_info(hdev, "Device setup in %llu usecs", duration);
698
699	return 0;
700}
701
702static int btmtkuart_shutdown(struct hci_dev *hdev)
703{
704	struct btmtk_hci_wmt_params wmt_params;
705	u8 param = 0x0;
706	int err;
707
708	/* Disable the device */
709	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
710	wmt_params.flag = 0;
711	wmt_params.dlen = sizeof(param);
712	wmt_params.data = &param;
713	wmt_params.status = NULL;
714
715	err = mtk_hci_wmt_sync(hdev, &wmt_params);
716	if (err < 0) {
717		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
718		return err;
719	}
720
721	return 0;
722}
723
724static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
725{
726	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
727	struct mtk_stp_hdr *shdr;
728	int err, dlen, type = 0;
729
730	/* Prepend skb with frame type */
731	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
732
733	/* Make sure that there is enough rooms for STP header and trailer */
734	if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
735	    (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
736		err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
737				       GFP_ATOMIC);
738		if (err < 0)
739			return err;
740	}
741
742	/* Add the STP header */
743	dlen = skb->len;
744	shdr = skb_push(skb, sizeof(*shdr));
745	shdr->prefix = 0x80;
746	shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
747	shdr->cs = 0;		/* MT7622 doesn't care about checksum value */
748
749	/* Add the STP trailer */
750	skb_put_zero(skb, MTK_STP_TLR_SIZE);
751
752	skb_queue_tail(&bdev->txq, skb);
753
754	btmtkuart_tx_wakeup(bdev);
755	return 0;
756}
757
758static int btmtkuart_parse_dt(struct serdev_device *serdev)
759{
760	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
761	struct device_node *node = serdev->dev.of_node;
762	u32 speed = 921600;
763	int err;
764
765	if (btmtkuart_is_standalone(bdev)) {
766		of_property_read_u32(node, "current-speed", &speed);
767
768		bdev->desired_speed = speed;
769
770		bdev->vcc = devm_regulator_get(&serdev->dev, "vcc");
771		if (IS_ERR(bdev->vcc)) {
772			err = PTR_ERR(bdev->vcc);
773			return err;
774		}
775
776		bdev->osc = devm_clk_get_optional(&serdev->dev, "osc");
777		if (IS_ERR(bdev->osc)) {
778			err = PTR_ERR(bdev->osc);
779			return err;
780		}
781
782		bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot",
783						     GPIOD_OUT_LOW);
784		if (IS_ERR(bdev->boot)) {
785			err = PTR_ERR(bdev->boot);
786			return err;
787		}
788
789		bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
790		if (IS_ERR(bdev->pinctrl)) {
791			err = PTR_ERR(bdev->pinctrl);
792			return err;
793		}
794
795		bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
796						       "default");
797		if (IS_ERR(bdev->pins_boot) && !bdev->boot) {
798			err = PTR_ERR(bdev->pins_boot);
799			dev_err(&serdev->dev,
800				"Should assign RXD to LOW at boot stage\n");
801			return err;
802		}
803
804		bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl,
805							  "runtime");
806		if (IS_ERR(bdev->pins_runtime)) {
807			err = PTR_ERR(bdev->pins_runtime);
808			return err;
809		}
810
811		bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset",
812						      GPIOD_OUT_LOW);
813		if (IS_ERR(bdev->reset)) {
814			err = PTR_ERR(bdev->reset);
815			return err;
816		}
817	} else if (btmtkuart_is_builtin_soc(bdev)) {
818		bdev->clk = devm_clk_get(&serdev->dev, "ref");
819		if (IS_ERR(bdev->clk))
820			return PTR_ERR(bdev->clk);
821	}
822
823	return 0;
824}
825
826static int btmtkuart_probe(struct serdev_device *serdev)
827{
828	struct btmtkuart_dev *bdev;
829	struct hci_dev *hdev;
830	int err;
831
832	bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
833	if (!bdev)
834		return -ENOMEM;
835
836	bdev->data = of_device_get_match_data(&serdev->dev);
837	if (!bdev->data)
838		return -ENODEV;
839
840	bdev->serdev = serdev;
841	serdev_device_set_drvdata(serdev, bdev);
842
843	serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
844
845	err = btmtkuart_parse_dt(serdev);
846	if (err < 0)
847		return err;
848
849	INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
850	skb_queue_head_init(&bdev->txq);
851
852	/* Initialize and register HCI device */
853	hdev = hci_alloc_dev();
854	if (!hdev) {
855		dev_err(&serdev->dev, "Can't allocate HCI device\n");
856		return -ENOMEM;
857	}
858
859	bdev->hdev = hdev;
860
861	hdev->bus = HCI_UART;
862	hci_set_drvdata(hdev, bdev);
863
864	hdev->open     = btmtkuart_open;
865	hdev->close    = btmtkuart_close;
866	hdev->flush    = btmtkuart_flush;
867	hdev->setup    = btmtkuart_setup;
868	hdev->shutdown = btmtkuart_shutdown;
869	hdev->send     = btmtkuart_send_frame;
870	hdev->set_bdaddr = btmtk_set_bdaddr;
871	SET_HCIDEV_DEV(hdev, &serdev->dev);
872
873	hdev->manufacturer = 70;
874	set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
875
876	if (btmtkuart_is_standalone(bdev)) {
877		err = clk_prepare_enable(bdev->osc);
878		if (err < 0)
879			goto err_hci_free_dev;
880
881		if (bdev->boot) {
882			gpiod_set_value_cansleep(bdev->boot, 1);
883		} else {
884			/* Switch to the specific pin state for the booting
885			 * requires.
886			 */
887			pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
888		}
889
890		/* Power on */
891		err = regulator_enable(bdev->vcc);
892		if (err < 0)
893			goto err_clk_disable_unprepare;
894
895		/* Reset if the reset-gpios is available otherwise the board
896		 * -level design should be guaranteed.
897		 */
898		if (bdev->reset) {
899			gpiod_set_value_cansleep(bdev->reset, 1);
900			usleep_range(1000, 2000);
901			gpiod_set_value_cansleep(bdev->reset, 0);
902		}
903
904		/* Wait some time until device got ready and switch to the pin
905		 * mode the device requires for UART transfers.
906		 */
907		msleep(50);
908
909		if (bdev->boot)
910			devm_gpiod_put(&serdev->dev, bdev->boot);
911
912		pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
913
914		/* A standalone device doesn't depends on power domain on SoC,
915		 * so mark it as no callbacks.
916		 */
917		pm_runtime_no_callbacks(&serdev->dev);
918
919		set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
920	}
921
922	err = hci_register_dev(hdev);
923	if (err < 0) {
924		dev_err(&serdev->dev, "Can't register HCI device\n");
925		goto err_regulator_disable;
926	}
927
928	return 0;
929
930err_regulator_disable:
931	if (btmtkuart_is_standalone(bdev))
932		regulator_disable(bdev->vcc);
933err_clk_disable_unprepare:
934	if (btmtkuart_is_standalone(bdev))
935		clk_disable_unprepare(bdev->osc);
936err_hci_free_dev:
937	hci_free_dev(hdev);
938
939	return err;
940}
941
942static void btmtkuart_remove(struct serdev_device *serdev)
943{
944	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
945	struct hci_dev *hdev = bdev->hdev;
946
947	if (btmtkuart_is_standalone(bdev)) {
948		regulator_disable(bdev->vcc);
949		clk_disable_unprepare(bdev->osc);
950	}
951
952	hci_unregister_dev(hdev);
953	hci_free_dev(hdev);
954}
955
956static const struct btmtkuart_data mt7622_data __maybe_unused = {
957	.fwname = FIRMWARE_MT7622,
958};
959
960static const struct btmtkuart_data mt7663_data __maybe_unused = {
961	.flags = BTMTKUART_FLAG_STANDALONE_HW,
962	.fwname = FIRMWARE_MT7663,
963};
964
965static const struct btmtkuart_data mt7668_data __maybe_unused = {
966	.flags = BTMTKUART_FLAG_STANDALONE_HW,
967	.fwname = FIRMWARE_MT7668,
968};
969
970#ifdef CONFIG_OF
971static const struct of_device_id mtk_of_match_table[] = {
972	{ .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data},
973	{ .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data},
974	{ .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data},
975	{ }
976};
977MODULE_DEVICE_TABLE(of, mtk_of_match_table);
978#endif
979
980static struct serdev_device_driver btmtkuart_driver = {
981	.probe = btmtkuart_probe,
982	.remove = btmtkuart_remove,
983	.driver = {
984		.name = "btmtkuart",
985		.of_match_table = of_match_ptr(mtk_of_match_table),
986	},
987};
988
989module_serdev_device_driver(btmtkuart_driver);
990
991MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
992MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
993MODULE_VERSION(VERSION);
994MODULE_LICENSE("GPL");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (c) 2018 MediaTek Inc.
  3
  4/*
  5 * Bluetooth support for MediaTek serial devices
  6 *
  7 * Author: Sean Wang <sean.wang@mediatek.com>
  8 *
  9 */
 10
 11#include <linux/unaligned.h>
 12#include <linux/atomic.h>
 13#include <linux/clk.h>
 14#include <linux/firmware.h>
 15#include <linux/gpio/consumer.h>
 16#include <linux/iopoll.h>
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/of.h>
 20#include <linux/pinctrl/consumer.h>
 21#include <linux/pm_runtime.h>
 22#include <linux/regulator/consumer.h>
 23#include <linux/serdev.h>
 24#include <linux/skbuff.h>
 25#include <linux/usb.h>
 26
 27#include <net/bluetooth/bluetooth.h>
 28#include <net/bluetooth/hci_core.h>
 29
 30#include "h4_recv.h"
 31#include "btmtk.h"
 32
 33#define VERSION "0.2"
 34
 35#define MTK_STP_TLR_SIZE	2
 36
 37#define BTMTKUART_TX_STATE_ACTIVE	1
 38#define BTMTKUART_TX_STATE_WAKEUP	2
 39#define BTMTKUART_TX_WAIT_VND_EVT	3
 40#define BTMTKUART_REQUIRED_WAKEUP	4
 41
 42#define BTMTKUART_FLAG_STANDALONE_HW	 BIT(0)
 43
 44struct mtk_stp_hdr {
 45	u8	prefix;
 46	__be16	dlen;
 47	u8	cs;
 48} __packed;
 49
 50struct btmtkuart_data {
 51	unsigned int flags;
 52	const char *fwname;
 53};
 54
 55struct btmtkuart_dev {
 56	struct hci_dev *hdev;
 57	struct serdev_device *serdev;
 58
 59	struct clk *clk;
 60	struct clk *osc;
 61	struct regulator *vcc;
 62	struct gpio_desc *reset;
 63	struct gpio_desc *boot;
 64	struct pinctrl *pinctrl;
 65	struct pinctrl_state *pins_runtime;
 66	struct pinctrl_state *pins_boot;
 67	speed_t	desired_speed;
 68	speed_t	curr_speed;
 69
 70	struct work_struct tx_work;
 71	unsigned long tx_state;
 72	struct sk_buff_head txq;
 73
 74	struct sk_buff *rx_skb;
 75	struct sk_buff *evt_skb;
 76
 77	u8	stp_pad[6];
 78	u8	stp_cursor;
 79	u16	stp_dlen;
 80
 81	const struct btmtkuart_data *data;
 82};
 83
 84#define btmtkuart_is_standalone(bdev)	\
 85	((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
 86#define btmtkuart_is_builtin_soc(bdev)	\
 87	!((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
 88
 89static int mtk_hci_wmt_sync(struct hci_dev *hdev,
 90			    struct btmtk_hci_wmt_params *wmt_params)
 91{
 92	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 93	struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
 94	u32 hlen, status = BTMTK_WMT_INVALID;
 95	struct btmtk_hci_wmt_evt *wmt_evt;
 96	struct btmtk_hci_wmt_cmd *wc;
 97	struct btmtk_wmt_hdr *hdr;
 98	int err;
 99
100	/* Send the WMT command and wait until the WMT event returns */
101	hlen = sizeof(*hdr) + wmt_params->dlen;
102	if (hlen > 255) {
103		err = -EINVAL;
104		goto err_free_skb;
105	}
106
107	wc = kzalloc(hlen, GFP_KERNEL);
108	if (!wc) {
109		err = -ENOMEM;
110		goto err_free_skb;
111	}
112
113	hdr = &wc->hdr;
114	hdr->dir = 1;
115	hdr->op = wmt_params->op;
116	hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
117	hdr->flag = wmt_params->flag;
118	memcpy(wc->data, wmt_params->data, wmt_params->dlen);
119
120	set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
121
122	err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
123	if (err < 0) {
124		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
125		goto err_free_wc;
126	}
127
128	/* The vendor specific WMT commands are all answered by a vendor
129	 * specific event and will not have the Command Status or Command
130	 * Complete as with usual HCI command flow control.
131	 *
132	 * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
133	 * state to be cleared. The driver specific event receive routine
134	 * will clear that state and with that indicate completion of the
135	 * WMT command.
136	 */
137	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
138				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
139	if (err == -EINTR) {
140		bt_dev_err(hdev, "Execution of wmt command interrupted");
141		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
142		goto err_free_wc;
143	}
144
145	if (err) {
146		bt_dev_err(hdev, "Execution of wmt command timed out");
147		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
148		err = -ETIMEDOUT;
149		goto err_free_wc;
150	}
151
152	/* Parse and handle the return WMT event */
153	wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
154	if (wmt_evt->whdr.op != hdr->op) {
155		bt_dev_err(hdev, "Wrong op received %d expected %d",
156			   wmt_evt->whdr.op, hdr->op);
157		err = -EIO;
158		goto err_free_wc;
159	}
160
161	switch (wmt_evt->whdr.op) {
162	case BTMTK_WMT_SEMAPHORE:
163		if (wmt_evt->whdr.flag == 2)
164			status = BTMTK_WMT_PATCH_UNDONE;
165		else
166			status = BTMTK_WMT_PATCH_DONE;
167		break;
168	case BTMTK_WMT_FUNC_CTRL:
169		wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
170		if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
171			status = BTMTK_WMT_ON_DONE;
172		else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
173			status = BTMTK_WMT_ON_PROGRESS;
174		else
175			status = BTMTK_WMT_ON_UNDONE;
176		break;
177	}
178
179	if (wmt_params->status)
180		*wmt_params->status = status;
181
182err_free_wc:
183	kfree(wc);
184err_free_skb:
185	kfree_skb(bdev->evt_skb);
186	bdev->evt_skb = NULL;
187
188	return err;
189}
190
191static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
192{
193	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
194	struct hci_event_hdr *hdr = (void *)skb->data;
195	int err;
196
197	/* When someone waits for the WMT event, the skb is being cloned
198	 * and being processed the events from there then.
199	 */
200	if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
201		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
202		if (!bdev->evt_skb) {
203			err = -ENOMEM;
204			goto err_out;
205		}
206	}
207
208	err = hci_recv_frame(hdev, skb);
209	if (err < 0)
210		goto err_free_skb;
211
212	if (hdr->evt == HCI_EV_WMT) {
213		if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
214				       &bdev->tx_state)) {
215			/* Barrier to sync with other CPUs */
216			smp_mb__after_atomic();
217			wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
218		}
219	}
220
221	return 0;
222
223err_free_skb:
224	kfree_skb(bdev->evt_skb);
225	bdev->evt_skb = NULL;
226
227err_out:
228	return err;
229}
230
231static const struct h4_recv_pkt mtk_recv_pkts[] = {
232	{ H4_RECV_ACL,      .recv = hci_recv_frame },
233	{ H4_RECV_SCO,      .recv = hci_recv_frame },
234	{ H4_RECV_EVENT,    .recv = btmtkuart_recv_event },
235};
236
237static void btmtkuart_tx_work(struct work_struct *work)
238{
239	struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
240						   tx_work);
241	struct serdev_device *serdev = bdev->serdev;
242	struct hci_dev *hdev = bdev->hdev;
243
244	while (1) {
245		clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
246
247		while (1) {
248			struct sk_buff *skb = skb_dequeue(&bdev->txq);
249			int len;
250
251			if (!skb)
252				break;
253
254			len = serdev_device_write_buf(serdev, skb->data,
255						      skb->len);
256			hdev->stat.byte_tx += len;
257
258			skb_pull(skb, len);
259			if (skb->len > 0) {
260				skb_queue_head(&bdev->txq, skb);
261				break;
262			}
263
264			switch (hci_skb_pkt_type(skb)) {
265			case HCI_COMMAND_PKT:
266				hdev->stat.cmd_tx++;
267				break;
268			case HCI_ACLDATA_PKT:
269				hdev->stat.acl_tx++;
270				break;
271			case HCI_SCODATA_PKT:
272				hdev->stat.sco_tx++;
273				break;
274			}
275
276			kfree_skb(skb);
277		}
278
279		if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
280			break;
281	}
282
283	clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
284}
285
286static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
287{
288	if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
289		set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
290
291	schedule_work(&bdev->tx_work);
292}
293
294static const unsigned char *
295mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
296	      int *sz_h4)
297{
298	struct mtk_stp_hdr *shdr;
299
300	/* The cursor is reset when all the data of STP is consumed out */
301	if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
302		bdev->stp_cursor = 0;
303
304	/* Filling pad until all STP info is obtained */
305	while (bdev->stp_cursor < 6 && count > 0) {
306		bdev->stp_pad[bdev->stp_cursor] = *data;
307		bdev->stp_cursor++;
308		data++;
309		count--;
310	}
311
312	/* Retrieve STP info and have a sanity check */
313	if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
314		shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
315		bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
316
317		/* Resync STP when unexpected data is being read */
318		if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
319			bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
320				   shdr->prefix, bdev->stp_dlen);
321			bdev->stp_cursor = 2;
322			bdev->stp_dlen = 0;
323		}
324	}
325
326	/* Directly quit when there's no data found for H4 can process */
327	if (count <= 0)
328		return NULL;
329
330	/* Translate to how much the size of data H4 can handle so far */
331	*sz_h4 = min_t(int, count, bdev->stp_dlen);
332
333	/* Update the remaining size of STP packet */
334	bdev->stp_dlen -= *sz_h4;
335
336	/* Data points to STP payload which can be handled by H4 */
337	return data;
338}
339
340static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
341{
342	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
343	const unsigned char *p_left = data, *p_h4;
344	int sz_left = count, sz_h4, adv;
345	int err;
346
347	while (sz_left > 0) {
348		/*  The serial data received from MT7622 BT controller is
349		 *  at all time padded around with the STP header and tailer.
350		 *
351		 *  A full STP packet is looking like
352		 *   -----------------------------------
353		 *  | STP header  |  H:4   | STP tailer |
354		 *   -----------------------------------
355		 *  but it doesn't guarantee to contain a full H:4 packet which
356		 *  means that it's possible for multiple STP packets forms a
357		 *  full H:4 packet that means extra STP header + length doesn't
358		 *  indicate a full H:4 frame, things can fragment. Whose length
359		 *  recorded in STP header just shows up the most length the
360		 *  H:4 engine can handle currently.
361		 */
362
363		p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
364		if (!p_h4)
365			break;
366
367		adv = p_h4 - p_left;
368		sz_left -= adv;
369		p_left += adv;
370
371		bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
372					   sz_h4, mtk_recv_pkts,
373					   ARRAY_SIZE(mtk_recv_pkts));
374		if (IS_ERR(bdev->rx_skb)) {
375			err = PTR_ERR(bdev->rx_skb);
376			bt_dev_err(bdev->hdev,
377				   "Frame reassembly failed (%d)", err);
378			bdev->rx_skb = NULL;
379			return;
380		}
381
382		sz_left -= sz_h4;
383		p_left += sz_h4;
384	}
385}
386
387static size_t btmtkuart_receive_buf(struct serdev_device *serdev,
388				    const u8 *data, size_t count)
389{
390	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
391
392	btmtkuart_recv(bdev->hdev, data, count);
393
394	bdev->hdev->stat.byte_rx += count;
395
396	return count;
397}
398
399static void btmtkuart_write_wakeup(struct serdev_device *serdev)
400{
401	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
402
403	btmtkuart_tx_wakeup(bdev);
404}
405
406static const struct serdev_device_ops btmtkuart_client_ops = {
407	.receive_buf = btmtkuart_receive_buf,
408	.write_wakeup = btmtkuart_write_wakeup,
409};
410
411static int btmtkuart_open(struct hci_dev *hdev)
412{
413	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
414	struct device *dev;
415	int err;
416
417	err = serdev_device_open(bdev->serdev);
418	if (err) {
419		bt_dev_err(hdev, "Unable to open UART device %s",
420			   dev_name(&bdev->serdev->dev));
421		goto err_open;
422	}
423
424	if (btmtkuart_is_standalone(bdev)) {
425		if (bdev->curr_speed != bdev->desired_speed)
426			err = serdev_device_set_baudrate(bdev->serdev,
427							 115200);
428		else
429			err = serdev_device_set_baudrate(bdev->serdev,
430							 bdev->desired_speed);
431
432		if (err < 0) {
433			bt_dev_err(hdev, "Unable to set baudrate UART device %s",
434				   dev_name(&bdev->serdev->dev));
435			goto  err_serdev_close;
436		}
437
438		serdev_device_set_flow_control(bdev->serdev, false);
439	}
440
441	bdev->stp_cursor = 2;
442	bdev->stp_dlen = 0;
443
444	dev = &bdev->serdev->dev;
445
446	/* Enable the power domain and clock the device requires */
447	pm_runtime_enable(dev);
448	err = pm_runtime_resume_and_get(dev);
449	if (err < 0)
450		goto err_disable_rpm;
451
452	err = clk_prepare_enable(bdev->clk);
453	if (err < 0)
454		goto err_put_rpm;
455
456	return 0;
457
458err_put_rpm:
459	pm_runtime_put_sync(dev);
460err_disable_rpm:
461	pm_runtime_disable(dev);
462err_serdev_close:
463	serdev_device_close(bdev->serdev);
464err_open:
465	return err;
466}
467
468static int btmtkuart_close(struct hci_dev *hdev)
469{
470	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
471	struct device *dev = &bdev->serdev->dev;
472
473	/* Shutdown the clock and power domain the device requires */
474	clk_disable_unprepare(bdev->clk);
475	pm_runtime_put_sync(dev);
476	pm_runtime_disable(dev);
477
478	serdev_device_close(bdev->serdev);
479
480	return 0;
481}
482
483static int btmtkuart_flush(struct hci_dev *hdev)
484{
485	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
486
487	/* Flush any pending characters */
488	serdev_device_write_flush(bdev->serdev);
489	skb_queue_purge(&bdev->txq);
490
491	cancel_work_sync(&bdev->tx_work);
492
493	kfree_skb(bdev->rx_skb);
494	bdev->rx_skb = NULL;
495
496	bdev->stp_cursor = 2;
497	bdev->stp_dlen = 0;
498
499	return 0;
500}
501
502static int btmtkuart_func_query(struct hci_dev *hdev)
503{
504	struct btmtk_hci_wmt_params wmt_params;
505	int status, err;
506	u8 param = 0;
507
508	/* Query whether the function is enabled */
509	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
510	wmt_params.flag = 4;
511	wmt_params.dlen = sizeof(param);
512	wmt_params.data = &param;
513	wmt_params.status = &status;
514
515	err = mtk_hci_wmt_sync(hdev, &wmt_params);
516	if (err < 0) {
517		bt_dev_err(hdev, "Failed to query function status (%d)", err);
518		return err;
519	}
520
521	return status;
522}
523
524static int btmtkuart_change_baudrate(struct hci_dev *hdev)
525{
526	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
527	struct btmtk_hci_wmt_params wmt_params;
528	__le32 baudrate;
529	u8 param;
530	int err;
531
532	/* Indicate the device to enter the probe state the host is
533	 * ready to change a new baudrate.
534	 */
535	baudrate = cpu_to_le32(bdev->desired_speed);
536	wmt_params.op = BTMTK_WMT_HIF;
537	wmt_params.flag = 1;
538	wmt_params.dlen = 4;
539	wmt_params.data = &baudrate;
540	wmt_params.status = NULL;
541
542	err = mtk_hci_wmt_sync(hdev, &wmt_params);
543	if (err < 0) {
544		bt_dev_err(hdev, "Failed to device baudrate (%d)", err);
545		return err;
546	}
547
548	err = serdev_device_set_baudrate(bdev->serdev,
549					 bdev->desired_speed);
550	if (err < 0) {
551		bt_dev_err(hdev, "Failed to set up host baudrate (%d)",
552			   err);
553		return err;
554	}
555
556	serdev_device_set_flow_control(bdev->serdev, false);
557
558	/* Send a dummy byte 0xff to activate the new baudrate */
559	param = 0xff;
560	err = serdev_device_write_buf(bdev->serdev, &param, sizeof(param));
561	if (err < 0 || err < sizeof(param))
562		return err;
563
564	serdev_device_wait_until_sent(bdev->serdev, 0);
565
566	/* Wait some time for the device changing baudrate done */
567	usleep_range(20000, 22000);
568
569	/* Test the new baudrate */
570	wmt_params.op = BTMTK_WMT_TEST;
571	wmt_params.flag = 7;
572	wmt_params.dlen = 0;
573	wmt_params.data = NULL;
574	wmt_params.status = NULL;
575
576	err = mtk_hci_wmt_sync(hdev, &wmt_params);
577	if (err < 0) {
578		bt_dev_err(hdev, "Failed to test new baudrate (%d)",
579			   err);
580		return err;
581	}
582
583	bdev->curr_speed = bdev->desired_speed;
584
585	return 0;
586}
587
588static int btmtkuart_setup(struct hci_dev *hdev)
589{
590	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
591	struct btmtk_hci_wmt_params wmt_params;
592	ktime_t calltime, delta, rettime;
593	struct btmtk_tci_sleep tci_sleep;
594	unsigned long long duration;
595	struct sk_buff *skb;
596	int err, status;
597	u8 param = 0x1;
598
599	calltime = ktime_get();
600
601	/* Wakeup MCUSYS is required for certain devices before we start to
602	 * do any setups.
603	 */
604	if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
605		wmt_params.op = BTMTK_WMT_WAKEUP;
606		wmt_params.flag = 3;
607		wmt_params.dlen = 0;
608		wmt_params.data = NULL;
609		wmt_params.status = NULL;
610
611		err = mtk_hci_wmt_sync(hdev, &wmt_params);
612		if (err < 0) {
613			bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err);
614			return err;
615		}
616
617		clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
618	}
619
620	if (btmtkuart_is_standalone(bdev))
621		btmtkuart_change_baudrate(hdev);
622
623	/* Query whether the firmware is already download */
624	wmt_params.op = BTMTK_WMT_SEMAPHORE;
625	wmt_params.flag = 1;
626	wmt_params.dlen = 0;
627	wmt_params.data = NULL;
628	wmt_params.status = &status;
629
630	err = mtk_hci_wmt_sync(hdev, &wmt_params);
631	if (err < 0) {
632		bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
633		return err;
634	}
635
636	if (status == BTMTK_WMT_PATCH_DONE) {
637		bt_dev_info(hdev, "Firmware already downloaded");
638		goto ignore_setup_fw;
639	}
640
641	/* Setup a firmware which the device definitely requires */
642	err = btmtk_setup_firmware(hdev, bdev->data->fwname, mtk_hci_wmt_sync);
643	if (err < 0)
644		return err;
645
646ignore_setup_fw:
647	/* Query whether the device is already enabled */
648	err = readx_poll_timeout(btmtkuart_func_query, hdev, status,
649				 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
650				 2000, 5000000);
651	/* -ETIMEDOUT happens */
652	if (err < 0)
653		return err;
654
655	/* The other errors happen in btusb_mtk_func_query */
656	if (status < 0)
657		return status;
658
659	if (status == BTMTK_WMT_ON_DONE) {
660		bt_dev_info(hdev, "function already on");
661		goto ignore_func_on;
662	}
663
664	/* Enable Bluetooth protocol */
665	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
666	wmt_params.flag = 0;
667	wmt_params.dlen = sizeof(param);
668	wmt_params.data = &param;
669	wmt_params.status = NULL;
670
671	err = mtk_hci_wmt_sync(hdev, &wmt_params);
672	if (err < 0) {
673		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
674		return err;
675	}
676
677ignore_func_on:
678	/* Apply the low power environment setup */
679	tci_sleep.mode = 0x5;
680	tci_sleep.duration = cpu_to_le16(0x640);
681	tci_sleep.host_duration = cpu_to_le16(0x640);
682	tci_sleep.host_wakeup_pin = 0;
683	tci_sleep.time_compensation = 0;
684
685	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
686			     HCI_INIT_TIMEOUT);
687	if (IS_ERR(skb)) {
688		err = PTR_ERR(skb);
689		bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
690		return err;
691	}
692	kfree_skb(skb);
693
694	rettime = ktime_get();
695	delta = ktime_sub(rettime, calltime);
696	duration = (unsigned long long)ktime_to_ns(delta) >> 10;
697
698	bt_dev_info(hdev, "Device setup in %llu usecs", duration);
699
700	return 0;
701}
702
703static int btmtkuart_shutdown(struct hci_dev *hdev)
704{
705	struct btmtk_hci_wmt_params wmt_params;
706	u8 param = 0x0;
707	int err;
708
709	/* Disable the device */
710	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
711	wmt_params.flag = 0;
712	wmt_params.dlen = sizeof(param);
713	wmt_params.data = &param;
714	wmt_params.status = NULL;
715
716	err = mtk_hci_wmt_sync(hdev, &wmt_params);
717	if (err < 0) {
718		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
719		return err;
720	}
721
722	return 0;
723}
724
725static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
726{
727	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
728	struct mtk_stp_hdr *shdr;
729	int err, dlen, type = 0;
730
731	/* Prepend skb with frame type */
732	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
733
734	/* Make sure that there is enough rooms for STP header and trailer */
735	if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
736	    (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
737		err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
738				       GFP_ATOMIC);
739		if (err < 0)
740			return err;
741	}
742
743	/* Add the STP header */
744	dlen = skb->len;
745	shdr = skb_push(skb, sizeof(*shdr));
746	shdr->prefix = 0x80;
747	shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
748	shdr->cs = 0;		/* MT7622 doesn't care about checksum value */
749
750	/* Add the STP trailer */
751	skb_put_zero(skb, MTK_STP_TLR_SIZE);
752
753	skb_queue_tail(&bdev->txq, skb);
754
755	btmtkuart_tx_wakeup(bdev);
756	return 0;
757}
758
759static int btmtkuart_parse_dt(struct serdev_device *serdev)
760{
761	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
762	struct device_node *node = serdev->dev.of_node;
763	u32 speed = 921600;
764	int err;
765
766	if (btmtkuart_is_standalone(bdev)) {
767		of_property_read_u32(node, "current-speed", &speed);
768
769		bdev->desired_speed = speed;
770
771		bdev->vcc = devm_regulator_get(&serdev->dev, "vcc");
772		if (IS_ERR(bdev->vcc)) {
773			err = PTR_ERR(bdev->vcc);
774			return err;
775		}
776
777		bdev->osc = devm_clk_get_optional(&serdev->dev, "osc");
778		if (IS_ERR(bdev->osc)) {
779			err = PTR_ERR(bdev->osc);
780			return err;
781		}
782
783		bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot",
784						     GPIOD_OUT_LOW);
785		if (IS_ERR(bdev->boot)) {
786			err = PTR_ERR(bdev->boot);
787			return err;
788		}
789
790		bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
791		if (IS_ERR(bdev->pinctrl)) {
792			err = PTR_ERR(bdev->pinctrl);
793			return err;
794		}
795
796		bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
797						       "default");
798		if (IS_ERR(bdev->pins_boot) && !bdev->boot) {
799			err = PTR_ERR(bdev->pins_boot);
800			dev_err(&serdev->dev,
801				"Should assign RXD to LOW at boot stage\n");
802			return err;
803		}
804
805		bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl,
806							  "runtime");
807		if (IS_ERR(bdev->pins_runtime)) {
808			err = PTR_ERR(bdev->pins_runtime);
809			return err;
810		}
811
812		bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset",
813						      GPIOD_OUT_LOW);
814		if (IS_ERR(bdev->reset)) {
815			err = PTR_ERR(bdev->reset);
816			return err;
817		}
818	} else if (btmtkuart_is_builtin_soc(bdev)) {
819		bdev->clk = devm_clk_get(&serdev->dev, "ref");
820		if (IS_ERR(bdev->clk))
821			return PTR_ERR(bdev->clk);
822	}
823
824	return 0;
825}
826
827static int btmtkuart_probe(struct serdev_device *serdev)
828{
829	struct btmtkuart_dev *bdev;
830	struct hci_dev *hdev;
831	int err;
832
833	bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
834	if (!bdev)
835		return -ENOMEM;
836
837	bdev->data = of_device_get_match_data(&serdev->dev);
838	if (!bdev->data)
839		return -ENODEV;
840
841	bdev->serdev = serdev;
842	serdev_device_set_drvdata(serdev, bdev);
843
844	serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
845
846	err = btmtkuart_parse_dt(serdev);
847	if (err < 0)
848		return err;
849
850	INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
851	skb_queue_head_init(&bdev->txq);
852
853	/* Initialize and register HCI device */
854	hdev = hci_alloc_dev();
855	if (!hdev) {
856		dev_err(&serdev->dev, "Can't allocate HCI device\n");
857		return -ENOMEM;
858	}
859
860	bdev->hdev = hdev;
861
862	hdev->bus = HCI_UART;
863	hci_set_drvdata(hdev, bdev);
864
865	hdev->open     = btmtkuart_open;
866	hdev->close    = btmtkuart_close;
867	hdev->flush    = btmtkuart_flush;
868	hdev->setup    = btmtkuart_setup;
869	hdev->shutdown = btmtkuart_shutdown;
870	hdev->send     = btmtkuart_send_frame;
871	hdev->set_bdaddr = btmtk_set_bdaddr;
872	SET_HCIDEV_DEV(hdev, &serdev->dev);
873
874	hdev->manufacturer = 70;
875	set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
876
877	if (btmtkuart_is_standalone(bdev)) {
878		err = clk_prepare_enable(bdev->osc);
879		if (err < 0)
880			goto err_hci_free_dev;
881
882		if (bdev->boot) {
883			gpiod_set_value_cansleep(bdev->boot, 1);
884		} else {
885			/* Switch to the specific pin state for the booting
886			 * requires.
887			 */
888			pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
889		}
890
891		/* Power on */
892		err = regulator_enable(bdev->vcc);
893		if (err < 0)
894			goto err_clk_disable_unprepare;
895
896		/* Reset if the reset-gpios is available otherwise the board
897		 * -level design should be guaranteed.
898		 */
899		if (bdev->reset) {
900			gpiod_set_value_cansleep(bdev->reset, 1);
901			usleep_range(1000, 2000);
902			gpiod_set_value_cansleep(bdev->reset, 0);
903		}
904
905		/* Wait some time until device got ready and switch to the pin
906		 * mode the device requires for UART transfers.
907		 */
908		msleep(50);
909
910		if (bdev->boot)
911			devm_gpiod_put(&serdev->dev, bdev->boot);
912
913		pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
914
915		/* A standalone device doesn't depends on power domain on SoC,
916		 * so mark it as no callbacks.
917		 */
918		pm_runtime_no_callbacks(&serdev->dev);
919
920		set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
921	}
922
923	err = hci_register_dev(hdev);
924	if (err < 0) {
925		dev_err(&serdev->dev, "Can't register HCI device\n");
926		goto err_regulator_disable;
927	}
928
929	return 0;
930
931err_regulator_disable:
932	if (btmtkuart_is_standalone(bdev))
933		regulator_disable(bdev->vcc);
934err_clk_disable_unprepare:
935	if (btmtkuart_is_standalone(bdev))
936		clk_disable_unprepare(bdev->osc);
937err_hci_free_dev:
938	hci_free_dev(hdev);
939
940	return err;
941}
942
943static void btmtkuart_remove(struct serdev_device *serdev)
944{
945	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
946	struct hci_dev *hdev = bdev->hdev;
947
948	if (btmtkuart_is_standalone(bdev)) {
949		regulator_disable(bdev->vcc);
950		clk_disable_unprepare(bdev->osc);
951	}
952
953	hci_unregister_dev(hdev);
954	hci_free_dev(hdev);
955}
956
957static const struct btmtkuart_data mt7622_data __maybe_unused = {
958	.fwname = FIRMWARE_MT7622,
959};
960
961static const struct btmtkuart_data mt7663_data __maybe_unused = {
962	.flags = BTMTKUART_FLAG_STANDALONE_HW,
963	.fwname = FIRMWARE_MT7663,
964};
965
966static const struct btmtkuart_data mt7668_data __maybe_unused = {
967	.flags = BTMTKUART_FLAG_STANDALONE_HW,
968	.fwname = FIRMWARE_MT7668,
969};
970
971#ifdef CONFIG_OF
972static const struct of_device_id mtk_of_match_table[] = {
973	{ .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data},
974	{ .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data},
975	{ .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data},
976	{ }
977};
978MODULE_DEVICE_TABLE(of, mtk_of_match_table);
979#endif
980
981static struct serdev_device_driver btmtkuart_driver = {
982	.probe = btmtkuart_probe,
983	.remove = btmtkuart_remove,
984	.driver = {
985		.name = "btmtkuart",
986		.of_match_table = of_match_ptr(mtk_of_match_table),
987	},
988};
989
990module_serdev_device_driver(btmtkuart_driver);
991
992MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
993MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
994MODULE_VERSION(VERSION);
995MODULE_LICENSE("GPL");