Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (c) 2018 MediaTek Inc.
  3
  4/*
  5 * Bluetooth support for MediaTek serial devices
  6 *
  7 * Author: Sean Wang <sean.wang@mediatek.com>
  8 *
  9 */
 10
 11#include <asm/unaligned.h>
 12#include <linux/atomic.h>
 13#include <linux/clk.h>
 14#include <linux/firmware.h>
 15#include <linux/gpio/consumer.h>
 16#include <linux/iopoll.h>
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/of.h>
 
 20#include <linux/pinctrl/consumer.h>
 21#include <linux/pm_runtime.h>
 22#include <linux/regulator/consumer.h>
 23#include <linux/serdev.h>
 24#include <linux/skbuff.h>
 25
 26#include <net/bluetooth/bluetooth.h>
 27#include <net/bluetooth/hci_core.h>
 28
 29#include "h4_recv.h"
 30#include "btmtk.h"
 31
 32#define VERSION "0.2"
 33
 
 
 
 
 34#define MTK_STP_TLR_SIZE	2
 35
 36#define BTMTKUART_TX_STATE_ACTIVE	1
 37#define BTMTKUART_TX_STATE_WAKEUP	2
 38#define BTMTKUART_TX_WAIT_VND_EVT	3
 39#define BTMTKUART_REQUIRED_WAKEUP	4
 40
 41#define BTMTKUART_FLAG_STANDALONE_HW	 BIT(0)
 42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43struct mtk_stp_hdr {
 44	u8	prefix;
 45	__be16	dlen;
 46	u8	cs;
 47} __packed;
 48
 49struct btmtkuart_data {
 50	unsigned int flags;
 51	const char *fwname;
 52};
 53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54struct btmtkuart_dev {
 55	struct hci_dev *hdev;
 56	struct serdev_device *serdev;
 57
 58	struct clk *clk;
 59	struct clk *osc;
 60	struct regulator *vcc;
 61	struct gpio_desc *reset;
 62	struct gpio_desc *boot;
 63	struct pinctrl *pinctrl;
 64	struct pinctrl_state *pins_runtime;
 65	struct pinctrl_state *pins_boot;
 66	speed_t	desired_speed;
 67	speed_t	curr_speed;
 68
 69	struct work_struct tx_work;
 70	unsigned long tx_state;
 71	struct sk_buff_head txq;
 72
 73	struct sk_buff *rx_skb;
 74	struct sk_buff *evt_skb;
 75
 76	u8	stp_pad[6];
 77	u8	stp_cursor;
 78	u16	stp_dlen;
 79
 80	const struct btmtkuart_data *data;
 81};
 82
 83#define btmtkuart_is_standalone(bdev)	\
 84	((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
 85#define btmtkuart_is_builtin_soc(bdev)	\
 86	!((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
 87
 88static int mtk_hci_wmt_sync(struct hci_dev *hdev,
 89			    struct btmtk_hci_wmt_params *wmt_params)
 90{
 91	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 92	struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
 93	u32 hlen, status = BTMTK_WMT_INVALID;
 94	struct btmtk_hci_wmt_evt *wmt_evt;
 95	struct btmtk_hci_wmt_cmd *wc;
 96	struct btmtk_wmt_hdr *hdr;
 97	int err;
 98
 99	/* Send the WMT command and wait until the WMT event returns */
100	hlen = sizeof(*hdr) + wmt_params->dlen;
101	if (hlen > 255) {
102		err = -EINVAL;
103		goto err_free_skb;
104	}
105
106	wc = kzalloc(hlen, GFP_KERNEL);
107	if (!wc) {
108		err = -ENOMEM;
109		goto err_free_skb;
110	}
111
112	hdr = &wc->hdr;
113	hdr->dir = 1;
114	hdr->op = wmt_params->op;
115	hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
116	hdr->flag = wmt_params->flag;
117	memcpy(wc->data, wmt_params->data, wmt_params->dlen);
118
119	set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
120
121	err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
122	if (err < 0) {
123		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
124		goto err_free_wc;
125	}
126
127	/* The vendor specific WMT commands are all answered by a vendor
128	 * specific event and will not have the Command Status or Command
129	 * Complete as with usual HCI command flow control.
130	 *
131	 * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
132	 * state to be cleared. The driver specific event receive routine
133	 * will clear that state and with that indicate completion of the
134	 * WMT command.
135	 */
136	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
137				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
138	if (err == -EINTR) {
139		bt_dev_err(hdev, "Execution of wmt command interrupted");
140		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
141		goto err_free_wc;
142	}
143
144	if (err) {
145		bt_dev_err(hdev, "Execution of wmt command timed out");
146		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
147		err = -ETIMEDOUT;
148		goto err_free_wc;
149	}
150
151	/* Parse and handle the return WMT event */
152	wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
153	if (wmt_evt->whdr.op != hdr->op) {
154		bt_dev_err(hdev, "Wrong op received %d expected %d",
155			   wmt_evt->whdr.op, hdr->op);
156		err = -EIO;
157		goto err_free_wc;
158	}
159
160	switch (wmt_evt->whdr.op) {
161	case BTMTK_WMT_SEMAPHORE:
162		if (wmt_evt->whdr.flag == 2)
163			status = BTMTK_WMT_PATCH_UNDONE;
164		else
165			status = BTMTK_WMT_PATCH_DONE;
166		break;
167	case BTMTK_WMT_FUNC_CTRL:
168		wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
169		if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
170			status = BTMTK_WMT_ON_DONE;
171		else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
172			status = BTMTK_WMT_ON_PROGRESS;
173		else
174			status = BTMTK_WMT_ON_UNDONE;
175		break;
176	}
177
178	if (wmt_params->status)
179		*wmt_params->status = status;
180
181err_free_wc:
182	kfree(wc);
183err_free_skb:
184	kfree_skb(bdev->evt_skb);
185	bdev->evt_skb = NULL;
186
187	return err;
188}
189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
191{
192	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
193	struct hci_event_hdr *hdr = (void *)skb->data;
194	int err;
195
 
 
 
 
 
 
 
196	/* When someone waits for the WMT event, the skb is being cloned
197	 * and being processed the events from there then.
198	 */
199	if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
200		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
201		if (!bdev->evt_skb) {
202			err = -ENOMEM;
203			goto err_out;
204		}
205	}
206
207	err = hci_recv_frame(hdev, skb);
208	if (err < 0)
209		goto err_free_skb;
210
211	if (hdr->evt == HCI_EV_WMT) {
212		if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
213				       &bdev->tx_state)) {
214			/* Barrier to sync with other CPUs */
215			smp_mb__after_atomic();
216			wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
217		}
218	}
219
220	return 0;
221
222err_free_skb:
223	kfree_skb(bdev->evt_skb);
224	bdev->evt_skb = NULL;
225
226err_out:
227	return err;
228}
229
230static const struct h4_recv_pkt mtk_recv_pkts[] = {
231	{ H4_RECV_ACL,      .recv = hci_recv_frame },
232	{ H4_RECV_SCO,      .recv = hci_recv_frame },
233	{ H4_RECV_EVENT,    .recv = btmtkuart_recv_event },
234};
235
236static void btmtkuart_tx_work(struct work_struct *work)
237{
238	struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
239						   tx_work);
240	struct serdev_device *serdev = bdev->serdev;
241	struct hci_dev *hdev = bdev->hdev;
242
243	while (1) {
244		clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
245
246		while (1) {
247			struct sk_buff *skb = skb_dequeue(&bdev->txq);
248			int len;
249
250			if (!skb)
251				break;
252
253			len = serdev_device_write_buf(serdev, skb->data,
254						      skb->len);
255			hdev->stat.byte_tx += len;
256
257			skb_pull(skb, len);
258			if (skb->len > 0) {
259				skb_queue_head(&bdev->txq, skb);
260				break;
261			}
262
263			switch (hci_skb_pkt_type(skb)) {
264			case HCI_COMMAND_PKT:
265				hdev->stat.cmd_tx++;
266				break;
267			case HCI_ACLDATA_PKT:
268				hdev->stat.acl_tx++;
269				break;
270			case HCI_SCODATA_PKT:
271				hdev->stat.sco_tx++;
272				break;
273			}
274
275			kfree_skb(skb);
276		}
277
278		if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
279			break;
280	}
281
282	clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
283}
284
285static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
286{
287	if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
288		set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
289
290	schedule_work(&bdev->tx_work);
291}
292
293static const unsigned char *
294mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
295	      int *sz_h4)
296{
297	struct mtk_stp_hdr *shdr;
298
299	/* The cursor is reset when all the data of STP is consumed out */
300	if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
301		bdev->stp_cursor = 0;
302
303	/* Filling pad until all STP info is obtained */
304	while (bdev->stp_cursor < 6 && count > 0) {
305		bdev->stp_pad[bdev->stp_cursor] = *data;
306		bdev->stp_cursor++;
307		data++;
308		count--;
309	}
310
311	/* Retrieve STP info and have a sanity check */
312	if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
313		shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
314		bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
315
316		/* Resync STP when unexpected data is being read */
317		if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
318			bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
319				   shdr->prefix, bdev->stp_dlen);
320			bdev->stp_cursor = 2;
321			bdev->stp_dlen = 0;
322		}
323	}
324
325	/* Directly quit when there's no data found for H4 can process */
326	if (count <= 0)
327		return NULL;
328
329	/* Tranlate to how much the size of data H4 can handle so far */
330	*sz_h4 = min_t(int, count, bdev->stp_dlen);
331
332	/* Update the remaining size of STP packet */
333	bdev->stp_dlen -= *sz_h4;
334
335	/* Data points to STP payload which can be handled by H4 */
336	return data;
337}
338
339static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
340{
341	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
342	const unsigned char *p_left = data, *p_h4;
343	int sz_left = count, sz_h4, adv;
344	int err;
345
346	while (sz_left > 0) {
347		/*  The serial data received from MT7622 BT controller is
348		 *  at all time padded around with the STP header and tailer.
349		 *
350		 *  A full STP packet is looking like
351		 *   -----------------------------------
352		 *  | STP header  |  H:4   | STP tailer |
353		 *   -----------------------------------
354		 *  but it doesn't guarantee to contain a full H:4 packet which
355		 *  means that it's possible for multiple STP packets forms a
356		 *  full H:4 packet that means extra STP header + length doesn't
357		 *  indicate a full H:4 frame, things can fragment. Whose length
358		 *  recorded in STP header just shows up the most length the
359		 *  H:4 engine can handle currently.
360		 */
361
362		p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
363		if (!p_h4)
364			break;
365
366		adv = p_h4 - p_left;
367		sz_left -= adv;
368		p_left += adv;
369
370		bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
371					   sz_h4, mtk_recv_pkts,
372					   ARRAY_SIZE(mtk_recv_pkts));
373		if (IS_ERR(bdev->rx_skb)) {
374			err = PTR_ERR(bdev->rx_skb);
375			bt_dev_err(bdev->hdev,
376				   "Frame reassembly failed (%d)", err);
377			bdev->rx_skb = NULL;
378			return;
379		}
380
381		sz_left -= sz_h4;
382		p_left += sz_h4;
383	}
 
 
384}
385
386static ssize_t btmtkuart_receive_buf(struct serdev_device *serdev,
387				     const u8 *data, size_t count)
388{
389	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
 
390
391	btmtkuart_recv(bdev->hdev, data, count);
 
 
392
393	bdev->hdev->stat.byte_rx += count;
394
395	return count;
396}
397
398static void btmtkuart_write_wakeup(struct serdev_device *serdev)
399{
400	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
401
402	btmtkuart_tx_wakeup(bdev);
403}
404
405static const struct serdev_device_ops btmtkuart_client_ops = {
406	.receive_buf = btmtkuart_receive_buf,
407	.write_wakeup = btmtkuart_write_wakeup,
408};
409
410static int btmtkuart_open(struct hci_dev *hdev)
411{
412	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
413	struct device *dev;
414	int err;
415
416	err = serdev_device_open(bdev->serdev);
417	if (err) {
418		bt_dev_err(hdev, "Unable to open UART device %s",
419			   dev_name(&bdev->serdev->dev));
420		goto err_open;
421	}
422
423	if (btmtkuart_is_standalone(bdev)) {
424		if (bdev->curr_speed != bdev->desired_speed)
425			err = serdev_device_set_baudrate(bdev->serdev,
426							 115200);
427		else
428			err = serdev_device_set_baudrate(bdev->serdev,
429							 bdev->desired_speed);
430
431		if (err < 0) {
432			bt_dev_err(hdev, "Unable to set baudrate UART device %s",
433				   dev_name(&bdev->serdev->dev));
434			goto  err_serdev_close;
435		}
436
437		serdev_device_set_flow_control(bdev->serdev, false);
438	}
439
440	bdev->stp_cursor = 2;
441	bdev->stp_dlen = 0;
442
443	dev = &bdev->serdev->dev;
444
445	/* Enable the power domain and clock the device requires */
446	pm_runtime_enable(dev);
447	err = pm_runtime_resume_and_get(dev);
448	if (err < 0)
 
449		goto err_disable_rpm;
 
450
451	err = clk_prepare_enable(bdev->clk);
452	if (err < 0)
453		goto err_put_rpm;
454
455	return 0;
456
457err_put_rpm:
458	pm_runtime_put_sync(dev);
459err_disable_rpm:
460	pm_runtime_disable(dev);
461err_serdev_close:
462	serdev_device_close(bdev->serdev);
463err_open:
464	return err;
465}
466
467static int btmtkuart_close(struct hci_dev *hdev)
468{
469	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
470	struct device *dev = &bdev->serdev->dev;
471
472	/* Shutdown the clock and power domain the device requires */
473	clk_disable_unprepare(bdev->clk);
474	pm_runtime_put_sync(dev);
475	pm_runtime_disable(dev);
476
477	serdev_device_close(bdev->serdev);
478
479	return 0;
480}
481
482static int btmtkuart_flush(struct hci_dev *hdev)
483{
484	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
485
486	/* Flush any pending characters */
487	serdev_device_write_flush(bdev->serdev);
488	skb_queue_purge(&bdev->txq);
489
490	cancel_work_sync(&bdev->tx_work);
491
492	kfree_skb(bdev->rx_skb);
493	bdev->rx_skb = NULL;
494
495	bdev->stp_cursor = 2;
496	bdev->stp_dlen = 0;
497
498	return 0;
499}
500
501static int btmtkuart_func_query(struct hci_dev *hdev)
502{
503	struct btmtk_hci_wmt_params wmt_params;
504	int status, err;
505	u8 param = 0;
506
507	/* Query whether the function is enabled */
508	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
509	wmt_params.flag = 4;
510	wmt_params.dlen = sizeof(param);
511	wmt_params.data = &param;
512	wmt_params.status = &status;
513
514	err = mtk_hci_wmt_sync(hdev, &wmt_params);
515	if (err < 0) {
516		bt_dev_err(hdev, "Failed to query function status (%d)", err);
517		return err;
518	}
519
520	return status;
521}
522
523static int btmtkuart_change_baudrate(struct hci_dev *hdev)
524{
525	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
526	struct btmtk_hci_wmt_params wmt_params;
527	__le32 baudrate;
528	u8 param;
529	int err;
530
531	/* Indicate the device to enter the probe state the host is
532	 * ready to change a new baudrate.
533	 */
534	baudrate = cpu_to_le32(bdev->desired_speed);
535	wmt_params.op = BTMTK_WMT_HIF;
536	wmt_params.flag = 1;
537	wmt_params.dlen = 4;
538	wmt_params.data = &baudrate;
539	wmt_params.status = NULL;
540
541	err = mtk_hci_wmt_sync(hdev, &wmt_params);
542	if (err < 0) {
543		bt_dev_err(hdev, "Failed to device baudrate (%d)", err);
544		return err;
545	}
546
547	err = serdev_device_set_baudrate(bdev->serdev,
548					 bdev->desired_speed);
549	if (err < 0) {
550		bt_dev_err(hdev, "Failed to set up host baudrate (%d)",
551			   err);
552		return err;
553	}
554
555	serdev_device_set_flow_control(bdev->serdev, false);
556
557	/* Send a dummy byte 0xff to activate the new baudrate */
558	param = 0xff;
559	err = serdev_device_write_buf(bdev->serdev, &param, sizeof(param));
 
560	if (err < 0 || err < sizeof(param))
561		return err;
562
563	serdev_device_wait_until_sent(bdev->serdev, 0);
564
565	/* Wait some time for the device changing baudrate done */
566	usleep_range(20000, 22000);
567
568	/* Test the new baudrate */
569	wmt_params.op = BTMTK_WMT_TEST;
570	wmt_params.flag = 7;
571	wmt_params.dlen = 0;
572	wmt_params.data = NULL;
573	wmt_params.status = NULL;
574
575	err = mtk_hci_wmt_sync(hdev, &wmt_params);
576	if (err < 0) {
577		bt_dev_err(hdev, "Failed to test new baudrate (%d)",
578			   err);
579		return err;
580	}
581
582	bdev->curr_speed = bdev->desired_speed;
583
584	return 0;
585}
586
587static int btmtkuart_setup(struct hci_dev *hdev)
588{
589	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
590	struct btmtk_hci_wmt_params wmt_params;
591	ktime_t calltime, delta, rettime;
592	struct btmtk_tci_sleep tci_sleep;
593	unsigned long long duration;
594	struct sk_buff *skb;
595	int err, status;
596	u8 param = 0x1;
597
598	calltime = ktime_get();
599
600	/* Wakeup MCUSYS is required for certain devices before we start to
601	 * do any setups.
602	 */
603	if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
604		wmt_params.op = BTMTK_WMT_WAKEUP;
605		wmt_params.flag = 3;
606		wmt_params.dlen = 0;
607		wmt_params.data = NULL;
608		wmt_params.status = NULL;
609
610		err = mtk_hci_wmt_sync(hdev, &wmt_params);
611		if (err < 0) {
612			bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err);
613			return err;
614		}
615
616		clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
617	}
618
619	if (btmtkuart_is_standalone(bdev))
620		btmtkuart_change_baudrate(hdev);
621
622	/* Query whether the firmware is already download */
623	wmt_params.op = BTMTK_WMT_SEMAPHORE;
624	wmt_params.flag = 1;
625	wmt_params.dlen = 0;
626	wmt_params.data = NULL;
627	wmt_params.status = &status;
628
629	err = mtk_hci_wmt_sync(hdev, &wmt_params);
630	if (err < 0) {
631		bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
632		return err;
633	}
634
635	if (status == BTMTK_WMT_PATCH_DONE) {
636		bt_dev_info(hdev, "Firmware already downloaded");
637		goto ignore_setup_fw;
638	}
639
640	/* Setup a firmware which the device definitely requires */
641	err = btmtk_setup_firmware(hdev, bdev->data->fwname, mtk_hci_wmt_sync);
642	if (err < 0)
643		return err;
644
645ignore_setup_fw:
646	/* Query whether the device is already enabled */
647	err = readx_poll_timeout(btmtkuart_func_query, hdev, status,
648				 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
649				 2000, 5000000);
650	/* -ETIMEDOUT happens */
651	if (err < 0)
652		return err;
653
654	/* The other errors happen in btusb_mtk_func_query */
655	if (status < 0)
656		return status;
657
658	if (status == BTMTK_WMT_ON_DONE) {
659		bt_dev_info(hdev, "function already on");
660		goto ignore_func_on;
661	}
662
663	/* Enable Bluetooth protocol */
664	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
665	wmt_params.flag = 0;
666	wmt_params.dlen = sizeof(param);
667	wmt_params.data = &param;
668	wmt_params.status = NULL;
669
670	err = mtk_hci_wmt_sync(hdev, &wmt_params);
671	if (err < 0) {
672		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
673		return err;
674	}
675
676ignore_func_on:
677	/* Apply the low power environment setup */
678	tci_sleep.mode = 0x5;
679	tci_sleep.duration = cpu_to_le16(0x640);
680	tci_sleep.host_duration = cpu_to_le16(0x640);
681	tci_sleep.host_wakeup_pin = 0;
682	tci_sleep.time_compensation = 0;
683
684	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
685			     HCI_INIT_TIMEOUT);
686	if (IS_ERR(skb)) {
687		err = PTR_ERR(skb);
688		bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
689		return err;
690	}
691	kfree_skb(skb);
692
693	rettime = ktime_get();
694	delta = ktime_sub(rettime, calltime);
695	duration = (unsigned long long)ktime_to_ns(delta) >> 10;
696
697	bt_dev_info(hdev, "Device setup in %llu usecs", duration);
698
699	return 0;
700}
701
702static int btmtkuart_shutdown(struct hci_dev *hdev)
703{
704	struct btmtk_hci_wmt_params wmt_params;
705	u8 param = 0x0;
706	int err;
707
708	/* Disable the device */
709	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
710	wmt_params.flag = 0;
711	wmt_params.dlen = sizeof(param);
712	wmt_params.data = &param;
713	wmt_params.status = NULL;
714
715	err = mtk_hci_wmt_sync(hdev, &wmt_params);
716	if (err < 0) {
717		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
718		return err;
719	}
720
721	return 0;
722}
723
724static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
725{
726	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
727	struct mtk_stp_hdr *shdr;
728	int err, dlen, type = 0;
729
730	/* Prepend skb with frame type */
731	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
732
733	/* Make sure that there is enough rooms for STP header and trailer */
734	if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
735	    (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
736		err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
737				       GFP_ATOMIC);
738		if (err < 0)
739			return err;
740	}
741
742	/* Add the STP header */
743	dlen = skb->len;
744	shdr = skb_push(skb, sizeof(*shdr));
745	shdr->prefix = 0x80;
746	shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
747	shdr->cs = 0;		/* MT7622 doesn't care about checksum value */
748
749	/* Add the STP trailer */
750	skb_put_zero(skb, MTK_STP_TLR_SIZE);
751
752	skb_queue_tail(&bdev->txq, skb);
753
754	btmtkuart_tx_wakeup(bdev);
755	return 0;
756}
757
758static int btmtkuart_parse_dt(struct serdev_device *serdev)
759{
760	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
761	struct device_node *node = serdev->dev.of_node;
762	u32 speed = 921600;
763	int err;
764
765	if (btmtkuart_is_standalone(bdev)) {
766		of_property_read_u32(node, "current-speed", &speed);
767
768		bdev->desired_speed = speed;
769
770		bdev->vcc = devm_regulator_get(&serdev->dev, "vcc");
771		if (IS_ERR(bdev->vcc)) {
772			err = PTR_ERR(bdev->vcc);
773			return err;
774		}
775
776		bdev->osc = devm_clk_get_optional(&serdev->dev, "osc");
777		if (IS_ERR(bdev->osc)) {
778			err = PTR_ERR(bdev->osc);
779			return err;
780		}
781
782		bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot",
783						     GPIOD_OUT_LOW);
784		if (IS_ERR(bdev->boot)) {
785			err = PTR_ERR(bdev->boot);
786			return err;
787		}
788
789		bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
790		if (IS_ERR(bdev->pinctrl)) {
791			err = PTR_ERR(bdev->pinctrl);
792			return err;
793		}
794
795		bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
796						       "default");
797		if (IS_ERR(bdev->pins_boot) && !bdev->boot) {
798			err = PTR_ERR(bdev->pins_boot);
799			dev_err(&serdev->dev,
800				"Should assign RXD to LOW at boot stage\n");
801			return err;
802		}
803
804		bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl,
805							  "runtime");
806		if (IS_ERR(bdev->pins_runtime)) {
807			err = PTR_ERR(bdev->pins_runtime);
808			return err;
809		}
810
811		bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset",
812						      GPIOD_OUT_LOW);
813		if (IS_ERR(bdev->reset)) {
814			err = PTR_ERR(bdev->reset);
815			return err;
816		}
817	} else if (btmtkuart_is_builtin_soc(bdev)) {
818		bdev->clk = devm_clk_get(&serdev->dev, "ref");
819		if (IS_ERR(bdev->clk))
820			return PTR_ERR(bdev->clk);
821	}
822
823	return 0;
824}
825
826static int btmtkuart_probe(struct serdev_device *serdev)
827{
828	struct btmtkuart_dev *bdev;
829	struct hci_dev *hdev;
830	int err;
831
832	bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
833	if (!bdev)
834		return -ENOMEM;
835
836	bdev->data = of_device_get_match_data(&serdev->dev);
837	if (!bdev->data)
838		return -ENODEV;
839
840	bdev->serdev = serdev;
841	serdev_device_set_drvdata(serdev, bdev);
842
843	serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
844
845	err = btmtkuart_parse_dt(serdev);
846	if (err < 0)
847		return err;
848
849	INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
850	skb_queue_head_init(&bdev->txq);
851
852	/* Initialize and register HCI device */
853	hdev = hci_alloc_dev();
854	if (!hdev) {
855		dev_err(&serdev->dev, "Can't allocate HCI device\n");
856		return -ENOMEM;
857	}
858
859	bdev->hdev = hdev;
860
861	hdev->bus = HCI_UART;
862	hci_set_drvdata(hdev, bdev);
863
864	hdev->open     = btmtkuart_open;
865	hdev->close    = btmtkuart_close;
866	hdev->flush    = btmtkuart_flush;
867	hdev->setup    = btmtkuart_setup;
868	hdev->shutdown = btmtkuart_shutdown;
869	hdev->send     = btmtkuart_send_frame;
870	hdev->set_bdaddr = btmtk_set_bdaddr;
871	SET_HCIDEV_DEV(hdev, &serdev->dev);
872
873	hdev->manufacturer = 70;
874	set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
875
876	if (btmtkuart_is_standalone(bdev)) {
877		err = clk_prepare_enable(bdev->osc);
878		if (err < 0)
879			goto err_hci_free_dev;
880
881		if (bdev->boot) {
882			gpiod_set_value_cansleep(bdev->boot, 1);
883		} else {
884			/* Switch to the specific pin state for the booting
885			 * requires.
886			 */
887			pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
888		}
889
890		/* Power on */
891		err = regulator_enable(bdev->vcc);
892		if (err < 0)
893			goto err_clk_disable_unprepare;
 
 
894
895		/* Reset if the reset-gpios is available otherwise the board
896		 * -level design should be guaranteed.
897		 */
898		if (bdev->reset) {
899			gpiod_set_value_cansleep(bdev->reset, 1);
900			usleep_range(1000, 2000);
901			gpiod_set_value_cansleep(bdev->reset, 0);
902		}
903
904		/* Wait some time until device got ready and switch to the pin
905		 * mode the device requires for UART transfers.
906		 */
907		msleep(50);
908
909		if (bdev->boot)
910			devm_gpiod_put(&serdev->dev, bdev->boot);
911
912		pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
913
914		/* A standalone device doesn't depends on power domain on SoC,
915		 * so mark it as no callbacks.
916		 */
917		pm_runtime_no_callbacks(&serdev->dev);
918
919		set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
920	}
921
922	err = hci_register_dev(hdev);
923	if (err < 0) {
924		dev_err(&serdev->dev, "Can't register HCI device\n");
 
925		goto err_regulator_disable;
926	}
927
928	return 0;
929
930err_regulator_disable:
931	if (btmtkuart_is_standalone(bdev))
932		regulator_disable(bdev->vcc);
933err_clk_disable_unprepare:
934	if (btmtkuart_is_standalone(bdev))
935		clk_disable_unprepare(bdev->osc);
936err_hci_free_dev:
937	hci_free_dev(hdev);
938
939	return err;
940}
941
942static void btmtkuart_remove(struct serdev_device *serdev)
943{
944	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
945	struct hci_dev *hdev = bdev->hdev;
946
947	if (btmtkuart_is_standalone(bdev)) {
948		regulator_disable(bdev->vcc);
949		clk_disable_unprepare(bdev->osc);
950	}
951
952	hci_unregister_dev(hdev);
953	hci_free_dev(hdev);
954}
955
956static const struct btmtkuart_data mt7622_data __maybe_unused = {
957	.fwname = FIRMWARE_MT7622,
958};
959
960static const struct btmtkuart_data mt7663_data __maybe_unused = {
961	.flags = BTMTKUART_FLAG_STANDALONE_HW,
962	.fwname = FIRMWARE_MT7663,
963};
964
965static const struct btmtkuart_data mt7668_data __maybe_unused = {
966	.flags = BTMTKUART_FLAG_STANDALONE_HW,
967	.fwname = FIRMWARE_MT7668,
968};
969
970#ifdef CONFIG_OF
971static const struct of_device_id mtk_of_match_table[] = {
972	{ .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data},
973	{ .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data},
974	{ .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data},
975	{ }
976};
977MODULE_DEVICE_TABLE(of, mtk_of_match_table);
978#endif
979
980static struct serdev_device_driver btmtkuart_driver = {
981	.probe = btmtkuart_probe,
982	.remove = btmtkuart_remove,
983	.driver = {
984		.name = "btmtkuart",
985		.of_match_table = of_match_ptr(mtk_of_match_table),
986	},
987};
988
989module_serdev_device_driver(btmtkuart_driver);
990
991MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
992MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
993MODULE_VERSION(VERSION);
994MODULE_LICENSE("GPL");
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (c) 2018 MediaTek Inc.
   3
   4/*
   5 * Bluetooth support for MediaTek serial devices
   6 *
   7 * Author: Sean Wang <sean.wang@mediatek.com>
   8 *
   9 */
  10
  11#include <asm/unaligned.h>
  12#include <linux/atomic.h>
  13#include <linux/clk.h>
  14#include <linux/firmware.h>
  15#include <linux/gpio/consumer.h>
  16#include <linux/iopoll.h>
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/of.h>
  20#include <linux/of_device.h>
  21#include <linux/pinctrl/consumer.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/regulator/consumer.h>
  24#include <linux/serdev.h>
  25#include <linux/skbuff.h>
  26
  27#include <net/bluetooth/bluetooth.h>
  28#include <net/bluetooth/hci_core.h>
  29
  30#include "h4_recv.h"
 
  31
  32#define VERSION "0.2"
  33
  34#define FIRMWARE_MT7622		"mediatek/mt7622pr2h.bin"
  35#define FIRMWARE_MT7663		"mediatek/mt7663pr2h.bin"
  36#define FIRMWARE_MT7668		"mediatek/mt7668pr2h.bin"
  37
  38#define MTK_STP_TLR_SIZE	2
  39
  40#define BTMTKUART_TX_STATE_ACTIVE	1
  41#define BTMTKUART_TX_STATE_WAKEUP	2
  42#define BTMTKUART_TX_WAIT_VND_EVT	3
  43#define BTMTKUART_REQUIRED_WAKEUP	4
  44
  45#define BTMTKUART_FLAG_STANDALONE_HW	 BIT(0)
  46
  47enum {
  48	MTK_WMT_PATCH_DWNLD = 0x1,
  49	MTK_WMT_TEST = 0x2,
  50	MTK_WMT_WAKEUP = 0x3,
  51	MTK_WMT_HIF = 0x4,
  52	MTK_WMT_FUNC_CTRL = 0x6,
  53	MTK_WMT_RST = 0x7,
  54	MTK_WMT_SEMAPHORE = 0x17,
  55};
  56
  57enum {
  58	BTMTK_WMT_INVALID,
  59	BTMTK_WMT_PATCH_UNDONE,
  60	BTMTK_WMT_PATCH_DONE,
  61	BTMTK_WMT_ON_UNDONE,
  62	BTMTK_WMT_ON_DONE,
  63	BTMTK_WMT_ON_PROGRESS,
  64};
  65
  66struct mtk_stp_hdr {
  67	u8	prefix;
  68	__be16	dlen;
  69	u8	cs;
  70} __packed;
  71
  72struct btmtkuart_data {
  73	unsigned int flags;
  74	const char *fwname;
  75};
  76
  77struct mtk_wmt_hdr {
  78	u8	dir;
  79	u8	op;
  80	__le16	dlen;
  81	u8	flag;
  82} __packed;
  83
  84struct mtk_hci_wmt_cmd {
  85	struct mtk_wmt_hdr hdr;
  86	u8 data[256];
  87} __packed;
  88
  89struct btmtk_hci_wmt_evt {
  90	struct hci_event_hdr hhdr;
  91	struct mtk_wmt_hdr whdr;
  92} __packed;
  93
  94struct btmtk_hci_wmt_evt_funcc {
  95	struct btmtk_hci_wmt_evt hwhdr;
  96	__be16 status;
  97} __packed;
  98
  99struct btmtk_tci_sleep {
 100	u8 mode;
 101	__le16 duration;
 102	__le16 host_duration;
 103	u8 host_wakeup_pin;
 104	u8 time_compensation;
 105} __packed;
 106
 107struct btmtk_hci_wmt_params {
 108	u8 op;
 109	u8 flag;
 110	u16 dlen;
 111	const void *data;
 112	u32 *status;
 113};
 114
 115struct btmtkuart_dev {
 116	struct hci_dev *hdev;
 117	struct serdev_device *serdev;
 118
 119	struct clk *clk;
 120	struct clk *osc;
 121	struct regulator *vcc;
 122	struct gpio_desc *reset;
 123	struct gpio_desc *boot;
 124	struct pinctrl *pinctrl;
 125	struct pinctrl_state *pins_runtime;
 126	struct pinctrl_state *pins_boot;
 127	speed_t	desired_speed;
 128	speed_t	curr_speed;
 129
 130	struct work_struct tx_work;
 131	unsigned long tx_state;
 132	struct sk_buff_head txq;
 133
 134	struct sk_buff *rx_skb;
 135	struct sk_buff *evt_skb;
 136
 137	u8	stp_pad[6];
 138	u8	stp_cursor;
 139	u16	stp_dlen;
 140
 141	const struct btmtkuart_data *data;
 142};
 143
 144#define btmtkuart_is_standalone(bdev)	\
 145	((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
 146#define btmtkuart_is_builtin_soc(bdev)	\
 147	!((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
 148
 149static int mtk_hci_wmt_sync(struct hci_dev *hdev,
 150			    struct btmtk_hci_wmt_params *wmt_params)
 151{
 152	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 153	struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
 154	u32 hlen, status = BTMTK_WMT_INVALID;
 155	struct btmtk_hci_wmt_evt *wmt_evt;
 156	struct mtk_hci_wmt_cmd wc;
 157	struct mtk_wmt_hdr *hdr;
 158	int err;
 159
 
 160	hlen = sizeof(*hdr) + wmt_params->dlen;
 161	if (hlen > 255)
 162		return -EINVAL;
 
 
 163
 164	hdr = (struct mtk_wmt_hdr *)&wc;
 
 
 
 
 
 
 165	hdr->dir = 1;
 166	hdr->op = wmt_params->op;
 167	hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
 168	hdr->flag = wmt_params->flag;
 169	memcpy(wc.data, wmt_params->data, wmt_params->dlen);
 170
 171	set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
 172
 173	err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
 174	if (err < 0) {
 175		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
 176		return err;
 177	}
 178
 179	/* The vendor specific WMT commands are all answered by a vendor
 180	 * specific event and will not have the Command Status or Command
 181	 * Complete as with usual HCI command flow control.
 182	 *
 183	 * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
 184	 * state to be cleared. The driver specific event receive routine
 185	 * will clear that state and with that indicate completion of the
 186	 * WMT command.
 187	 */
 188	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
 189				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
 190	if (err == -EINTR) {
 191		bt_dev_err(hdev, "Execution of wmt command interrupted");
 192		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
 193		return err;
 194	}
 195
 196	if (err) {
 197		bt_dev_err(hdev, "Execution of wmt command timed out");
 198		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
 199		return -ETIMEDOUT;
 
 200	}
 201
 202	/* Parse and handle the return WMT event */
 203	wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
 204	if (wmt_evt->whdr.op != hdr->op) {
 205		bt_dev_err(hdev, "Wrong op received %d expected %d",
 206			   wmt_evt->whdr.op, hdr->op);
 207		err = -EIO;
 208		goto err_free_skb;
 209	}
 210
 211	switch (wmt_evt->whdr.op) {
 212	case MTK_WMT_SEMAPHORE:
 213		if (wmt_evt->whdr.flag == 2)
 214			status = BTMTK_WMT_PATCH_UNDONE;
 215		else
 216			status = BTMTK_WMT_PATCH_DONE;
 217		break;
 218	case MTK_WMT_FUNC_CTRL:
 219		wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
 220		if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
 221			status = BTMTK_WMT_ON_DONE;
 222		else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
 223			status = BTMTK_WMT_ON_PROGRESS;
 224		else
 225			status = BTMTK_WMT_ON_UNDONE;
 226		break;
 227	}
 228
 229	if (wmt_params->status)
 230		*wmt_params->status = status;
 231
 
 
 232err_free_skb:
 233	kfree_skb(bdev->evt_skb);
 234	bdev->evt_skb = NULL;
 235
 236	return err;
 237}
 238
 239static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
 240{
 241	struct btmtk_hci_wmt_params wmt_params;
 242	const struct firmware *fw;
 243	const u8 *fw_ptr;
 244	size_t fw_size;
 245	int err, dlen;
 246	u8 flag;
 247
 248	err = request_firmware(&fw, fwname, &hdev->dev);
 249	if (err < 0) {
 250		bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
 251		return err;
 252	}
 253
 254	fw_ptr = fw->data;
 255	fw_size = fw->size;
 256
 257	/* The size of patch header is 30 bytes, should be skip */
 258	if (fw_size < 30) {
 259		err = -EINVAL;
 260		goto free_fw;
 261	}
 262
 263	fw_size -= 30;
 264	fw_ptr += 30;
 265	flag = 1;
 266
 267	wmt_params.op = MTK_WMT_PATCH_DWNLD;
 268	wmt_params.status = NULL;
 269
 270	while (fw_size > 0) {
 271		dlen = min_t(int, 250, fw_size);
 272
 273		/* Tell device the position in sequence */
 274		if (fw_size - dlen <= 0)
 275			flag = 3;
 276		else if (fw_size < fw->size - 30)
 277			flag = 2;
 278
 279		wmt_params.flag = flag;
 280		wmt_params.dlen = dlen;
 281		wmt_params.data = fw_ptr;
 282
 283		err = mtk_hci_wmt_sync(hdev, &wmt_params);
 284		if (err < 0) {
 285			bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
 286				   err);
 287			goto free_fw;
 288		}
 289
 290		fw_size -= dlen;
 291		fw_ptr += dlen;
 292	}
 293
 294	wmt_params.op = MTK_WMT_RST;
 295	wmt_params.flag = 4;
 296	wmt_params.dlen = 0;
 297	wmt_params.data = NULL;
 298	wmt_params.status = NULL;
 299
 300	/* Activate funciton the firmware providing to */
 301	err = mtk_hci_wmt_sync(hdev, &wmt_params);
 302	if (err < 0) {
 303		bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
 304		goto free_fw;
 305	}
 306
 307	/* Wait a few moments for firmware activation done */
 308	usleep_range(10000, 12000);
 309
 310free_fw:
 311	release_firmware(fw);
 312	return err;
 313}
 314
 315static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
 316{
 317	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 318	struct hci_event_hdr *hdr = (void *)skb->data;
 319	int err;
 320
 321	/* Fix up the vendor event id with 0xff for vendor specific instead
 322	 * of 0xe4 so that event send via monitoring socket can be parsed
 323	 * properly.
 324	 */
 325	if (hdr->evt == 0xe4)
 326		hdr->evt = HCI_EV_VENDOR;
 327
 328	/* When someone waits for the WMT event, the skb is being cloned
 329	 * and being processed the events from there then.
 330	 */
 331	if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
 332		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
 333		if (!bdev->evt_skb) {
 334			err = -ENOMEM;
 335			goto err_out;
 336		}
 337	}
 338
 339	err = hci_recv_frame(hdev, skb);
 340	if (err < 0)
 341		goto err_free_skb;
 342
 343	if (hdr->evt == HCI_EV_VENDOR) {
 344		if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
 345				       &bdev->tx_state)) {
 346			/* Barrier to sync with other CPUs */
 347			smp_mb__after_atomic();
 348			wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
 349		}
 350	}
 351
 352	return 0;
 353
 354err_free_skb:
 355	kfree_skb(bdev->evt_skb);
 356	bdev->evt_skb = NULL;
 357
 358err_out:
 359	return err;
 360}
 361
 362static const struct h4_recv_pkt mtk_recv_pkts[] = {
 363	{ H4_RECV_ACL,      .recv = hci_recv_frame },
 364	{ H4_RECV_SCO,      .recv = hci_recv_frame },
 365	{ H4_RECV_EVENT,    .recv = btmtkuart_recv_event },
 366};
 367
 368static void btmtkuart_tx_work(struct work_struct *work)
 369{
 370	struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
 371						   tx_work);
 372	struct serdev_device *serdev = bdev->serdev;
 373	struct hci_dev *hdev = bdev->hdev;
 374
 375	while (1) {
 376		clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
 377
 378		while (1) {
 379			struct sk_buff *skb = skb_dequeue(&bdev->txq);
 380			int len;
 381
 382			if (!skb)
 383				break;
 384
 385			len = serdev_device_write_buf(serdev, skb->data,
 386						      skb->len);
 387			hdev->stat.byte_tx += len;
 388
 389			skb_pull(skb, len);
 390			if (skb->len > 0) {
 391				skb_queue_head(&bdev->txq, skb);
 392				break;
 393			}
 394
 395			switch (hci_skb_pkt_type(skb)) {
 396			case HCI_COMMAND_PKT:
 397				hdev->stat.cmd_tx++;
 398				break;
 399			case HCI_ACLDATA_PKT:
 400				hdev->stat.acl_tx++;
 401				break;
 402			case HCI_SCODATA_PKT:
 403				hdev->stat.sco_tx++;
 404				break;
 405			}
 406
 407			kfree_skb(skb);
 408		}
 409
 410		if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
 411			break;
 412	}
 413
 414	clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
 415}
 416
 417static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
 418{
 419	if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
 420		set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
 421
 422	schedule_work(&bdev->tx_work);
 423}
 424
 425static const unsigned char *
 426mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
 427	      int *sz_h4)
 428{
 429	struct mtk_stp_hdr *shdr;
 430
 431	/* The cursor is reset when all the data of STP is consumed out */
 432	if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
 433		bdev->stp_cursor = 0;
 434
 435	/* Filling pad until all STP info is obtained */
 436	while (bdev->stp_cursor < 6 && count > 0) {
 437		bdev->stp_pad[bdev->stp_cursor] = *data;
 438		bdev->stp_cursor++;
 439		data++;
 440		count--;
 441	}
 442
 443	/* Retrieve STP info and have a sanity check */
 444	if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
 445		shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
 446		bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
 447
 448		/* Resync STP when unexpected data is being read */
 449		if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
 450			bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
 451				   shdr->prefix, bdev->stp_dlen);
 452			bdev->stp_cursor = 2;
 453			bdev->stp_dlen = 0;
 454		}
 455	}
 456
 457	/* Directly quit when there's no data found for H4 can process */
 458	if (count <= 0)
 459		return NULL;
 460
 461	/* Tranlate to how much the size of data H4 can handle so far */
 462	*sz_h4 = min_t(int, count, bdev->stp_dlen);
 463
 464	/* Update the remaining size of STP packet */
 465	bdev->stp_dlen -= *sz_h4;
 466
 467	/* Data points to STP payload which can be handled by H4 */
 468	return data;
 469}
 470
 471static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
 472{
 473	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 474	const unsigned char *p_left = data, *p_h4;
 475	int sz_left = count, sz_h4, adv;
 476	int err;
 477
 478	while (sz_left > 0) {
 479		/*  The serial data received from MT7622 BT controller is
 480		 *  at all time padded around with the STP header and tailer.
 481		 *
 482		 *  A full STP packet is looking like
 483		 *   -----------------------------------
 484		 *  | STP header  |  H:4   | STP tailer |
 485		 *   -----------------------------------
 486		 *  but it doesn't guarantee to contain a full H:4 packet which
 487		 *  means that it's possible for multiple STP packets forms a
 488		 *  full H:4 packet that means extra STP header + length doesn't
 489		 *  indicate a full H:4 frame, things can fragment. Whose length
 490		 *  recorded in STP header just shows up the most length the
 491		 *  H:4 engine can handle currently.
 492		 */
 493
 494		p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
 495		if (!p_h4)
 496			break;
 497
 498		adv = p_h4 - p_left;
 499		sz_left -= adv;
 500		p_left += adv;
 501
 502		bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
 503					   sz_h4, mtk_recv_pkts,
 504					   ARRAY_SIZE(mtk_recv_pkts));
 505		if (IS_ERR(bdev->rx_skb)) {
 506			err = PTR_ERR(bdev->rx_skb);
 507			bt_dev_err(bdev->hdev,
 508				   "Frame reassembly failed (%d)", err);
 509			bdev->rx_skb = NULL;
 510			return err;
 511		}
 512
 513		sz_left -= sz_h4;
 514		p_left += sz_h4;
 515	}
 516
 517	return 0;
 518}
 519
 520static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
 521				 size_t count)
 522{
 523	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
 524	int err;
 525
 526	err = btmtkuart_recv(bdev->hdev, data, count);
 527	if (err < 0)
 528		return err;
 529
 530	bdev->hdev->stat.byte_rx += count;
 531
 532	return count;
 533}
 534
 535static void btmtkuart_write_wakeup(struct serdev_device *serdev)
 536{
 537	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
 538
 539	btmtkuart_tx_wakeup(bdev);
 540}
 541
 542static const struct serdev_device_ops btmtkuart_client_ops = {
 543	.receive_buf = btmtkuart_receive_buf,
 544	.write_wakeup = btmtkuart_write_wakeup,
 545};
 546
 547static int btmtkuart_open(struct hci_dev *hdev)
 548{
 549	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 550	struct device *dev;
 551	int err;
 552
 553	err = serdev_device_open(bdev->serdev);
 554	if (err) {
 555		bt_dev_err(hdev, "Unable to open UART device %s",
 556			   dev_name(&bdev->serdev->dev));
 557		goto err_open;
 558	}
 559
 560	if (btmtkuart_is_standalone(bdev)) {
 561		if (bdev->curr_speed != bdev->desired_speed)
 562			err = serdev_device_set_baudrate(bdev->serdev,
 563							 115200);
 564		else
 565			err = serdev_device_set_baudrate(bdev->serdev,
 566							 bdev->desired_speed);
 567
 568		if (err < 0) {
 569			bt_dev_err(hdev, "Unable to set baudrate UART device %s",
 570				   dev_name(&bdev->serdev->dev));
 571			goto  err_serdev_close;
 572		}
 573
 574		serdev_device_set_flow_control(bdev->serdev, false);
 575	}
 576
 577	bdev->stp_cursor = 2;
 578	bdev->stp_dlen = 0;
 579
 580	dev = &bdev->serdev->dev;
 581
 582	/* Enable the power domain and clock the device requires */
 583	pm_runtime_enable(dev);
 584	err = pm_runtime_get_sync(dev);
 585	if (err < 0) {
 586		pm_runtime_put_noidle(dev);
 587		goto err_disable_rpm;
 588	}
 589
 590	err = clk_prepare_enable(bdev->clk);
 591	if (err < 0)
 592		goto err_put_rpm;
 593
 594	return 0;
 595
 596err_put_rpm:
 597	pm_runtime_put_sync(dev);
 598err_disable_rpm:
 599	pm_runtime_disable(dev);
 600err_serdev_close:
 601	serdev_device_close(bdev->serdev);
 602err_open:
 603	return err;
 604}
 605
 606static int btmtkuart_close(struct hci_dev *hdev)
 607{
 608	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 609	struct device *dev = &bdev->serdev->dev;
 610
 611	/* Shutdown the clock and power domain the device requires */
 612	clk_disable_unprepare(bdev->clk);
 613	pm_runtime_put_sync(dev);
 614	pm_runtime_disable(dev);
 615
 616	serdev_device_close(bdev->serdev);
 617
 618	return 0;
 619}
 620
 621static int btmtkuart_flush(struct hci_dev *hdev)
 622{
 623	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 624
 625	/* Flush any pending characters */
 626	serdev_device_write_flush(bdev->serdev);
 627	skb_queue_purge(&bdev->txq);
 628
 629	cancel_work_sync(&bdev->tx_work);
 630
 631	kfree_skb(bdev->rx_skb);
 632	bdev->rx_skb = NULL;
 633
 634	bdev->stp_cursor = 2;
 635	bdev->stp_dlen = 0;
 636
 637	return 0;
 638}
 639
 640static int btmtkuart_func_query(struct hci_dev *hdev)
 641{
 642	struct btmtk_hci_wmt_params wmt_params;
 643	int status, err;
 644	u8 param = 0;
 645
 646	/* Query whether the function is enabled */
 647	wmt_params.op = MTK_WMT_FUNC_CTRL;
 648	wmt_params.flag = 4;
 649	wmt_params.dlen = sizeof(param);
 650	wmt_params.data = &param;
 651	wmt_params.status = &status;
 652
 653	err = mtk_hci_wmt_sync(hdev, &wmt_params);
 654	if (err < 0) {
 655		bt_dev_err(hdev, "Failed to query function status (%d)", err);
 656		return err;
 657	}
 658
 659	return status;
 660}
 661
 662static int btmtkuart_change_baudrate(struct hci_dev *hdev)
 663{
 664	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 665	struct btmtk_hci_wmt_params wmt_params;
 666	__le32 baudrate;
 667	u8 param;
 668	int err;
 669
 670	/* Indicate the device to enter the probe state the host is
 671	 * ready to change a new baudrate.
 672	 */
 673	baudrate = cpu_to_le32(bdev->desired_speed);
 674	wmt_params.op = MTK_WMT_HIF;
 675	wmt_params.flag = 1;
 676	wmt_params.dlen = 4;
 677	wmt_params.data = &baudrate;
 678	wmt_params.status = NULL;
 679
 680	err = mtk_hci_wmt_sync(hdev, &wmt_params);
 681	if (err < 0) {
 682		bt_dev_err(hdev, "Failed to device baudrate (%d)", err);
 683		return err;
 684	}
 685
 686	err = serdev_device_set_baudrate(bdev->serdev,
 687					 bdev->desired_speed);
 688	if (err < 0) {
 689		bt_dev_err(hdev, "Failed to set up host baudrate (%d)",
 690			   err);
 691		return err;
 692	}
 693
 694	serdev_device_set_flow_control(bdev->serdev, false);
 695
 696	/* Send a dummy byte 0xff to activate the new baudrate */
 697	param = 0xff;
 698	err = serdev_device_write(bdev->serdev, &param, sizeof(param),
 699				  MAX_SCHEDULE_TIMEOUT);
 700	if (err < 0 || err < sizeof(param))
 701		return err;
 702
 703	serdev_device_wait_until_sent(bdev->serdev, 0);
 704
 705	/* Wait some time for the device changing baudrate done */
 706	usleep_range(20000, 22000);
 707
 708	/* Test the new baudrate */
 709	wmt_params.op = MTK_WMT_TEST;
 710	wmt_params.flag = 7;
 711	wmt_params.dlen = 0;
 712	wmt_params.data = NULL;
 713	wmt_params.status = NULL;
 714
 715	err = mtk_hci_wmt_sync(hdev, &wmt_params);
 716	if (err < 0) {
 717		bt_dev_err(hdev, "Failed to test new baudrate (%d)",
 718			   err);
 719		return err;
 720	}
 721
 722	bdev->curr_speed = bdev->desired_speed;
 723
 724	return 0;
 725}
 726
 727static int btmtkuart_setup(struct hci_dev *hdev)
 728{
 729	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 730	struct btmtk_hci_wmt_params wmt_params;
 731	ktime_t calltime, delta, rettime;
 732	struct btmtk_tci_sleep tci_sleep;
 733	unsigned long long duration;
 734	struct sk_buff *skb;
 735	int err, status;
 736	u8 param = 0x1;
 737
 738	calltime = ktime_get();
 739
 740	/* Wakeup MCUSYS is required for certain devices before we start to
 741	 * do any setups.
 742	 */
 743	if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
 744		wmt_params.op = MTK_WMT_WAKEUP;
 745		wmt_params.flag = 3;
 746		wmt_params.dlen = 0;
 747		wmt_params.data = NULL;
 748		wmt_params.status = NULL;
 749
 750		err = mtk_hci_wmt_sync(hdev, &wmt_params);
 751		if (err < 0) {
 752			bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err);
 753			return err;
 754		}
 755
 756		clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
 757	}
 758
 759	if (btmtkuart_is_standalone(bdev))
 760		btmtkuart_change_baudrate(hdev);
 761
 762	/* Query whether the firmware is already download */
 763	wmt_params.op = MTK_WMT_SEMAPHORE;
 764	wmt_params.flag = 1;
 765	wmt_params.dlen = 0;
 766	wmt_params.data = NULL;
 767	wmt_params.status = &status;
 768
 769	err = mtk_hci_wmt_sync(hdev, &wmt_params);
 770	if (err < 0) {
 771		bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
 772		return err;
 773	}
 774
 775	if (status == BTMTK_WMT_PATCH_DONE) {
 776		bt_dev_info(hdev, "Firmware already downloaded");
 777		goto ignore_setup_fw;
 778	}
 779
 780	/* Setup a firmware which the device definitely requires */
 781	err = mtk_setup_firmware(hdev, bdev->data->fwname);
 782	if (err < 0)
 783		return err;
 784
 785ignore_setup_fw:
 786	/* Query whether the device is already enabled */
 787	err = readx_poll_timeout(btmtkuart_func_query, hdev, status,
 788				 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
 789				 2000, 5000000);
 790	/* -ETIMEDOUT happens */
 791	if (err < 0)
 792		return err;
 793
 794	/* The other errors happen in btusb_mtk_func_query */
 795	if (status < 0)
 796		return status;
 797
 798	if (status == BTMTK_WMT_ON_DONE) {
 799		bt_dev_info(hdev, "function already on");
 800		goto ignore_func_on;
 801	}
 802
 803	/* Enable Bluetooth protocol */
 804	wmt_params.op = MTK_WMT_FUNC_CTRL;
 805	wmt_params.flag = 0;
 806	wmt_params.dlen = sizeof(param);
 807	wmt_params.data = &param;
 808	wmt_params.status = NULL;
 809
 810	err = mtk_hci_wmt_sync(hdev, &wmt_params);
 811	if (err < 0) {
 812		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
 813		return err;
 814	}
 815
 816ignore_func_on:
 817	/* Apply the low power environment setup */
 818	tci_sleep.mode = 0x5;
 819	tci_sleep.duration = cpu_to_le16(0x640);
 820	tci_sleep.host_duration = cpu_to_le16(0x640);
 821	tci_sleep.host_wakeup_pin = 0;
 822	tci_sleep.time_compensation = 0;
 823
 824	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
 825			     HCI_INIT_TIMEOUT);
 826	if (IS_ERR(skb)) {
 827		err = PTR_ERR(skb);
 828		bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
 829		return err;
 830	}
 831	kfree_skb(skb);
 832
 833	rettime = ktime_get();
 834	delta = ktime_sub(rettime, calltime);
 835	duration = (unsigned long long)ktime_to_ns(delta) >> 10;
 836
 837	bt_dev_info(hdev, "Device setup in %llu usecs", duration);
 838
 839	return 0;
 840}
 841
 842static int btmtkuart_shutdown(struct hci_dev *hdev)
 843{
 844	struct btmtk_hci_wmt_params wmt_params;
 845	u8 param = 0x0;
 846	int err;
 847
 848	/* Disable the device */
 849	wmt_params.op = MTK_WMT_FUNC_CTRL;
 850	wmt_params.flag = 0;
 851	wmt_params.dlen = sizeof(param);
 852	wmt_params.data = &param;
 853	wmt_params.status = NULL;
 854
 855	err = mtk_hci_wmt_sync(hdev, &wmt_params);
 856	if (err < 0) {
 857		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
 858		return err;
 859	}
 860
 861	return 0;
 862}
 863
 864static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 865{
 866	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
 867	struct mtk_stp_hdr *shdr;
 868	int err, dlen, type = 0;
 869
 870	/* Prepend skb with frame type */
 871	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
 872
 873	/* Make sure that there is enough rooms for STP header and trailer */
 874	if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
 875	    (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
 876		err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
 877				       GFP_ATOMIC);
 878		if (err < 0)
 879			return err;
 880	}
 881
 882	/* Add the STP header */
 883	dlen = skb->len;
 884	shdr = skb_push(skb, sizeof(*shdr));
 885	shdr->prefix = 0x80;
 886	shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
 887	shdr->cs = 0;		/* MT7622 doesn't care about checksum value */
 888
 889	/* Add the STP trailer */
 890	skb_put_zero(skb, MTK_STP_TLR_SIZE);
 891
 892	skb_queue_tail(&bdev->txq, skb);
 893
 894	btmtkuart_tx_wakeup(bdev);
 895	return 0;
 896}
 897
 898static int btmtkuart_parse_dt(struct serdev_device *serdev)
 899{
 900	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
 901	struct device_node *node = serdev->dev.of_node;
 902	u32 speed = 921600;
 903	int err;
 904
 905	if (btmtkuart_is_standalone(bdev)) {
 906		of_property_read_u32(node, "current-speed", &speed);
 907
 908		bdev->desired_speed = speed;
 909
 910		bdev->vcc = devm_regulator_get(&serdev->dev, "vcc");
 911		if (IS_ERR(bdev->vcc)) {
 912			err = PTR_ERR(bdev->vcc);
 913			return err;
 914		}
 915
 916		bdev->osc = devm_clk_get_optional(&serdev->dev, "osc");
 917		if (IS_ERR(bdev->osc)) {
 918			err = PTR_ERR(bdev->osc);
 919			return err;
 920		}
 921
 922		bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot",
 923						     GPIOD_OUT_LOW);
 924		if (IS_ERR(bdev->boot)) {
 925			err = PTR_ERR(bdev->boot);
 926			return err;
 927		}
 928
 929		bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
 930		if (IS_ERR(bdev->pinctrl)) {
 931			err = PTR_ERR(bdev->pinctrl);
 932			return err;
 933		}
 934
 935		bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
 936						       "default");
 937		if (IS_ERR(bdev->pins_boot) && !bdev->boot) {
 938			err = PTR_ERR(bdev->pins_boot);
 939			dev_err(&serdev->dev,
 940				"Should assign RXD to LOW at boot stage\n");
 941			return err;
 942		}
 943
 944		bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl,
 945							  "runtime");
 946		if (IS_ERR(bdev->pins_runtime)) {
 947			err = PTR_ERR(bdev->pins_runtime);
 948			return err;
 949		}
 950
 951		bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset",
 952						      GPIOD_OUT_LOW);
 953		if (IS_ERR(bdev->reset)) {
 954			err = PTR_ERR(bdev->reset);
 955			return err;
 956		}
 957	} else if (btmtkuart_is_builtin_soc(bdev)) {
 958		bdev->clk = devm_clk_get(&serdev->dev, "ref");
 959		if (IS_ERR(bdev->clk))
 960			return PTR_ERR(bdev->clk);
 961	}
 962
 963	return 0;
 964}
 965
 966static int btmtkuart_probe(struct serdev_device *serdev)
 967{
 968	struct btmtkuart_dev *bdev;
 969	struct hci_dev *hdev;
 970	int err;
 971
 972	bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
 973	if (!bdev)
 974		return -ENOMEM;
 975
 976	bdev->data = of_device_get_match_data(&serdev->dev);
 977	if (!bdev->data)
 978		return -ENODEV;
 979
 980	bdev->serdev = serdev;
 981	serdev_device_set_drvdata(serdev, bdev);
 982
 983	serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
 984
 985	err = btmtkuart_parse_dt(serdev);
 986	if (err < 0)
 987		return err;
 988
 989	INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
 990	skb_queue_head_init(&bdev->txq);
 991
 992	/* Initialize and register HCI device */
 993	hdev = hci_alloc_dev();
 994	if (!hdev) {
 995		dev_err(&serdev->dev, "Can't allocate HCI device\n");
 996		return -ENOMEM;
 997	}
 998
 999	bdev->hdev = hdev;
1000
1001	hdev->bus = HCI_UART;
1002	hci_set_drvdata(hdev, bdev);
1003
1004	hdev->open     = btmtkuart_open;
1005	hdev->close    = btmtkuart_close;
1006	hdev->flush    = btmtkuart_flush;
1007	hdev->setup    = btmtkuart_setup;
1008	hdev->shutdown = btmtkuart_shutdown;
1009	hdev->send     = btmtkuart_send_frame;
 
1010	SET_HCIDEV_DEV(hdev, &serdev->dev);
1011
1012	hdev->manufacturer = 70;
1013	set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
1014
1015	if (btmtkuart_is_standalone(bdev)) {
1016		err = clk_prepare_enable(bdev->osc);
1017		if (err < 0)
1018			return err;
1019
1020		if (bdev->boot) {
1021			gpiod_set_value_cansleep(bdev->boot, 1);
1022		} else {
1023			/* Switch to the specific pin state for the booting
1024			 * requires.
1025			 */
1026			pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
1027		}
1028
1029		/* Power on */
1030		err = regulator_enable(bdev->vcc);
1031		if (err < 0) {
1032			clk_disable_unprepare(bdev->osc);
1033			return err;
1034		}
1035
1036		/* Reset if the reset-gpios is available otherwise the board
1037		 * -level design should be guaranteed.
1038		 */
1039		if (bdev->reset) {
1040			gpiod_set_value_cansleep(bdev->reset, 1);
1041			usleep_range(1000, 2000);
1042			gpiod_set_value_cansleep(bdev->reset, 0);
1043		}
1044
1045		/* Wait some time until device got ready and switch to the pin
1046		 * mode the device requires for UART transfers.
1047		 */
1048		msleep(50);
1049
1050		if (bdev->boot)
1051			devm_gpiod_put(&serdev->dev, bdev->boot);
1052
1053		pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
1054
1055		/* A standalone device doesn't depends on power domain on SoC,
1056		 * so mark it as no callbacks.
1057		 */
1058		pm_runtime_no_callbacks(&serdev->dev);
1059
1060		set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
1061	}
1062
1063	err = hci_register_dev(hdev);
1064	if (err < 0) {
1065		dev_err(&serdev->dev, "Can't register HCI device\n");
1066		hci_free_dev(hdev);
1067		goto err_regulator_disable;
1068	}
1069
1070	return 0;
1071
1072err_regulator_disable:
1073	if (btmtkuart_is_standalone(bdev))
1074		regulator_disable(bdev->vcc);
 
 
 
 
 
1075
1076	return err;
1077}
1078
1079static void btmtkuart_remove(struct serdev_device *serdev)
1080{
1081	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
1082	struct hci_dev *hdev = bdev->hdev;
1083
1084	if (btmtkuart_is_standalone(bdev)) {
1085		regulator_disable(bdev->vcc);
1086		clk_disable_unprepare(bdev->osc);
1087	}
1088
1089	hci_unregister_dev(hdev);
1090	hci_free_dev(hdev);
1091}
1092
1093static const struct btmtkuart_data mt7622_data = {
1094	.fwname = FIRMWARE_MT7622,
1095};
1096
1097static const struct btmtkuart_data mt7663_data = {
1098	.flags = BTMTKUART_FLAG_STANDALONE_HW,
1099	.fwname = FIRMWARE_MT7663,
1100};
1101
1102static const struct btmtkuart_data mt7668_data = {
1103	.flags = BTMTKUART_FLAG_STANDALONE_HW,
1104	.fwname = FIRMWARE_MT7668,
1105};
1106
1107#ifdef CONFIG_OF
1108static const struct of_device_id mtk_of_match_table[] = {
1109	{ .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data},
1110	{ .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data},
1111	{ .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data},
1112	{ }
1113};
1114MODULE_DEVICE_TABLE(of, mtk_of_match_table);
1115#endif
1116
1117static struct serdev_device_driver btmtkuart_driver = {
1118	.probe = btmtkuart_probe,
1119	.remove = btmtkuart_remove,
1120	.driver = {
1121		.name = "btmtkuart",
1122		.of_match_table = of_match_ptr(mtk_of_match_table),
1123	},
1124};
1125
1126module_serdev_device_driver(btmtkuart_driver);
1127
1128MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
1129MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
1130MODULE_VERSION(VERSION);
1131MODULE_LICENSE("GPL");
1132MODULE_FIRMWARE(FIRMWARE_MT7622);
1133MODULE_FIRMWARE(FIRMWARE_MT7663);
1134MODULE_FIRMWARE(FIRMWARE_MT7668);