Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 *
  3 *  Bluetooth HCI Three-wire UART driver
  4 *
  5 *  Copyright (C) 2012  Intel Corporation
  6 *
  7 *
  8 *  This program is free software; you can redistribute it and/or modify
  9 *  it under the terms of the GNU General Public License as published by
 10 *  the Free Software Foundation; either version 2 of the License, or
 11 *  (at your option) any later version.
 12 *
 13 *  This program is distributed in the hope that it will be useful,
 14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16 *  GNU General Public License for more details.
 17 *
 18 *  You should have received a copy of the GNU General Public License
 19 *  along with this program; if not, write to the Free Software
 20 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 21 *
 22 */
 23
 24#include <linux/kernel.h>
 25#include <linux/errno.h>
 26#include <linux/skbuff.h>
 27
 28#include <net/bluetooth/bluetooth.h>
 29#include <net/bluetooth/hci_core.h>
 30
 31#include "hci_uart.h"
 32
 33#define HCI_3WIRE_ACK_PKT	0
 34#define HCI_3WIRE_LINK_PKT	15
 35
 36/* Sliding window size */
 37#define H5_TX_WIN_MAX		4
 38
 39#define H5_ACK_TIMEOUT	msecs_to_jiffies(250)
 40#define H5_SYNC_TIMEOUT	msecs_to_jiffies(100)
 41
 42/*
 43 * Maximum Three-wire packet:
 44 *     4 byte header + max value for 12-bit length + 2 bytes for CRC
 45 */
 46#define H5_MAX_LEN (4 + 0xfff + 2)
 47
 48/* Convenience macros for reading Three-wire header values */
 49#define H5_HDR_SEQ(hdr)		((hdr)[0] & 0x07)
 50#define H5_HDR_ACK(hdr)		(((hdr)[0] >> 3) & 0x07)
 51#define H5_HDR_CRC(hdr)		(((hdr)[0] >> 6) & 0x01)
 52#define H5_HDR_RELIABLE(hdr)	(((hdr)[0] >> 7) & 0x01)
 53#define H5_HDR_PKT_TYPE(hdr)	((hdr)[1] & 0x0f)
 54#define H5_HDR_LEN(hdr)		((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
 55
 56#define SLIP_DELIMITER	0xc0
 57#define SLIP_ESC	0xdb
 58#define SLIP_ESC_DELIM	0xdc
 59#define SLIP_ESC_ESC	0xdd
 60
 61/* H5 state flags */
 62enum {
 63	H5_RX_ESC,	/* SLIP escape mode */
 64	H5_TX_ACK_REQ,	/* Pending ack to send */
 65};
 66
 67struct h5 {
 68	struct sk_buff_head	unack;		/* Unack'ed packets queue */
 69	struct sk_buff_head	rel;		/* Reliable packets queue */
 70	struct sk_buff_head	unrel;		/* Unreliable packets queue */
 71
 72	unsigned long		flags;
 73
 74	struct sk_buff		*rx_skb;	/* Receive buffer */
 75	size_t			rx_pending;	/* Expecting more bytes */
 76	u8			rx_ack;		/* Last ack number received */
 77
 78	int			(*rx_func)(struct hci_uart *hu, u8 c);
 79
 80	struct timer_list	timer;		/* Retransmission timer */
 81	struct hci_uart		*hu;		/* Parent HCI UART */
 82
 83	u8			tx_seq;		/* Next seq number to send */
 84	u8			tx_ack;		/* Next ack number to send */
 85	u8			tx_win;		/* Sliding window size */
 86
 87	enum {
 88		H5_UNINITIALIZED,
 89		H5_INITIALIZED,
 90		H5_ACTIVE,
 91	} state;
 92
 93	enum {
 94		H5_AWAKE,
 95		H5_SLEEPING,
 96		H5_WAKING_UP,
 97	} sleep;
 98};
 99
100static void h5_reset_rx(struct h5 *h5);
101
102static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
103{
104	struct h5 *h5 = hu->priv;
105	struct sk_buff *nskb;
106
107	nskb = alloc_skb(3, GFP_ATOMIC);
108	if (!nskb)
109		return;
110
111	hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
112
113	skb_put_data(nskb, data, len);
114
115	skb_queue_tail(&h5->unrel, nskb);
116}
117
118static u8 h5_cfg_field(struct h5 *h5)
119{
120	/* Sliding window size (first 3 bits) */
121	return h5->tx_win & 0x07;
122}
123
124static void h5_timed_event(struct timer_list *t)
125{
126	const unsigned char sync_req[] = { 0x01, 0x7e };
127	unsigned char conf_req[3] = { 0x03, 0xfc };
128	struct h5 *h5 = from_timer(h5, t, timer);
129	struct hci_uart *hu = h5->hu;
130	struct sk_buff *skb;
131	unsigned long flags;
132
133	BT_DBG("%s", hu->hdev->name);
134
135	if (h5->state == H5_UNINITIALIZED)
136		h5_link_control(hu, sync_req, sizeof(sync_req));
137
138	if (h5->state == H5_INITIALIZED) {
139		conf_req[2] = h5_cfg_field(h5);
140		h5_link_control(hu, conf_req, sizeof(conf_req));
141	}
142
143	if (h5->state != H5_ACTIVE) {
144		mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
145		goto wakeup;
146	}
147
148	if (h5->sleep != H5_AWAKE) {
149		h5->sleep = H5_SLEEPING;
150		goto wakeup;
151	}
152
153	BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
154
155	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
156
157	while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
158		h5->tx_seq = (h5->tx_seq - 1) & 0x07;
159		skb_queue_head(&h5->rel, skb);
160	}
161
162	spin_unlock_irqrestore(&h5->unack.lock, flags);
163
164wakeup:
165	hci_uart_tx_wakeup(hu);
166}
167
168static void h5_peer_reset(struct hci_uart *hu)
169{
170	struct h5 *h5 = hu->priv;
171
172	BT_ERR("Peer device has reset");
173
174	h5->state = H5_UNINITIALIZED;
175
176	del_timer(&h5->timer);
177
178	skb_queue_purge(&h5->rel);
179	skb_queue_purge(&h5->unrel);
180	skb_queue_purge(&h5->unack);
181
182	h5->tx_seq = 0;
183	h5->tx_ack = 0;
184
185	/* Send reset request to upper stack */
186	hci_reset_dev(hu->hdev);
187}
188
189static int h5_open(struct hci_uart *hu)
190{
191	struct h5 *h5;
192	const unsigned char sync[] = { 0x01, 0x7e };
193
194	BT_DBG("hu %p", hu);
195
196	h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
197	if (!h5)
198		return -ENOMEM;
199
200	hu->priv = h5;
201	h5->hu = hu;
202
203	skb_queue_head_init(&h5->unack);
204	skb_queue_head_init(&h5->rel);
205	skb_queue_head_init(&h5->unrel);
206
207	h5_reset_rx(h5);
208
209	timer_setup(&h5->timer, h5_timed_event, 0);
210
211	h5->tx_win = H5_TX_WIN_MAX;
212
213	set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
214
215	/* Send initial sync request */
216	h5_link_control(hu, sync, sizeof(sync));
217	mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
218
219	return 0;
220}
221
222static int h5_close(struct hci_uart *hu)
223{
224	struct h5 *h5 = hu->priv;
225
226	del_timer_sync(&h5->timer);
227
228	skb_queue_purge(&h5->unack);
229	skb_queue_purge(&h5->rel);
230	skb_queue_purge(&h5->unrel);
231
232	kfree(h5);
233
234	return 0;
235}
236
237static void h5_pkt_cull(struct h5 *h5)
238{
239	struct sk_buff *skb, *tmp;
240	unsigned long flags;
241	int i, to_remove;
242	u8 seq;
243
244	spin_lock_irqsave(&h5->unack.lock, flags);
245
246	to_remove = skb_queue_len(&h5->unack);
247	if (to_remove == 0)
248		goto unlock;
249
250	seq = h5->tx_seq;
251
252	while (to_remove > 0) {
253		if (h5->rx_ack == seq)
254			break;
255
256		to_remove--;
257		seq = (seq - 1) & 0x07;
258	}
259
260	if (seq != h5->rx_ack)
261		BT_ERR("Controller acked invalid packet");
262
263	i = 0;
264	skb_queue_walk_safe(&h5->unack, skb, tmp) {
265		if (i++ >= to_remove)
266			break;
267
268		__skb_unlink(skb, &h5->unack);
269		kfree_skb(skb);
270	}
271
272	if (skb_queue_empty(&h5->unack))
273		del_timer(&h5->timer);
274
275unlock:
276	spin_unlock_irqrestore(&h5->unack.lock, flags);
277}
278
279static void h5_handle_internal_rx(struct hci_uart *hu)
280{
281	struct h5 *h5 = hu->priv;
282	const unsigned char sync_req[] = { 0x01, 0x7e };
283	const unsigned char sync_rsp[] = { 0x02, 0x7d };
284	unsigned char conf_req[3] = { 0x03, 0xfc };
285	const unsigned char conf_rsp[] = { 0x04, 0x7b };
286	const unsigned char wakeup_req[] = { 0x05, 0xfa };
287	const unsigned char woken_req[] = { 0x06, 0xf9 };
288	const unsigned char sleep_req[] = { 0x07, 0x78 };
289	const unsigned char *hdr = h5->rx_skb->data;
290	const unsigned char *data = &h5->rx_skb->data[4];
291
292	BT_DBG("%s", hu->hdev->name);
293
294	if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
295		return;
296
297	if (H5_HDR_LEN(hdr) < 2)
298		return;
299
300	conf_req[2] = h5_cfg_field(h5);
301
302	if (memcmp(data, sync_req, 2) == 0) {
303		if (h5->state == H5_ACTIVE)
304			h5_peer_reset(hu);
305		h5_link_control(hu, sync_rsp, 2);
306	} else if (memcmp(data, sync_rsp, 2) == 0) {
307		if (h5->state == H5_ACTIVE)
308			h5_peer_reset(hu);
309		h5->state = H5_INITIALIZED;
310		h5_link_control(hu, conf_req, 3);
311	} else if (memcmp(data, conf_req, 2) == 0) {
312		h5_link_control(hu, conf_rsp, 2);
313		h5_link_control(hu, conf_req, 3);
314	} else if (memcmp(data, conf_rsp, 2) == 0) {
315		if (H5_HDR_LEN(hdr) > 2)
316			h5->tx_win = (data[2] & 0x07);
317		BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
318		h5->state = H5_ACTIVE;
319		hci_uart_init_ready(hu);
320		return;
321	} else if (memcmp(data, sleep_req, 2) == 0) {
322		BT_DBG("Peer went to sleep");
323		h5->sleep = H5_SLEEPING;
324		return;
325	} else if (memcmp(data, woken_req, 2) == 0) {
326		BT_DBG("Peer woke up");
327		h5->sleep = H5_AWAKE;
328	} else if (memcmp(data, wakeup_req, 2) == 0) {
329		BT_DBG("Peer requested wakeup");
330		h5_link_control(hu, woken_req, 2);
331		h5->sleep = H5_AWAKE;
332	} else {
333		BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
334		return;
335	}
336
337	hci_uart_tx_wakeup(hu);
338}
339
340static void h5_complete_rx_pkt(struct hci_uart *hu)
341{
342	struct h5 *h5 = hu->priv;
343	const unsigned char *hdr = h5->rx_skb->data;
344
345	if (H5_HDR_RELIABLE(hdr)) {
346		h5->tx_ack = (h5->tx_ack + 1) % 8;
347		set_bit(H5_TX_ACK_REQ, &h5->flags);
348		hci_uart_tx_wakeup(hu);
349	}
350
351	h5->rx_ack = H5_HDR_ACK(hdr);
352
353	h5_pkt_cull(h5);
354
355	switch (H5_HDR_PKT_TYPE(hdr)) {
356	case HCI_EVENT_PKT:
357	case HCI_ACLDATA_PKT:
358	case HCI_SCODATA_PKT:
359		hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
360
361		/* Remove Three-wire header */
362		skb_pull(h5->rx_skb, 4);
363
364		hci_recv_frame(hu->hdev, h5->rx_skb);
365		h5->rx_skb = NULL;
366
367		break;
368
369	default:
370		h5_handle_internal_rx(hu);
371		break;
372	}
373
374	h5_reset_rx(h5);
375}
376
377static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
378{
379	h5_complete_rx_pkt(hu);
380
381	return 0;
382}
383
384static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
385{
386	struct h5 *h5 = hu->priv;
387	const unsigned char *hdr = h5->rx_skb->data;
388
389	if (H5_HDR_CRC(hdr)) {
390		h5->rx_func = h5_rx_crc;
391		h5->rx_pending = 2;
392	} else {
393		h5_complete_rx_pkt(hu);
394	}
395
396	return 0;
397}
398
399static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
400{
401	struct h5 *h5 = hu->priv;
402	const unsigned char *hdr = h5->rx_skb->data;
403
404	BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
405	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
406	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
407	       H5_HDR_LEN(hdr));
408
409	if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
410		BT_ERR("Invalid header checksum");
411		h5_reset_rx(h5);
412		return 0;
413	}
414
415	if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
416		BT_ERR("Out-of-order packet arrived (%u != %u)",
417		       H5_HDR_SEQ(hdr), h5->tx_ack);
418		h5_reset_rx(h5);
419		return 0;
420	}
421
422	if (h5->state != H5_ACTIVE &&
423	    H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
424		BT_ERR("Non-link packet received in non-active state");
425		h5_reset_rx(h5);
426		return 0;
427	}
428
429	h5->rx_func = h5_rx_payload;
430	h5->rx_pending = H5_HDR_LEN(hdr);
431
432	return 0;
433}
434
435static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
436{
437	struct h5 *h5 = hu->priv;
438
439	if (c == SLIP_DELIMITER)
440		return 1;
441
442	h5->rx_func = h5_rx_3wire_hdr;
443	h5->rx_pending = 4;
444
445	h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
446	if (!h5->rx_skb) {
447		BT_ERR("Can't allocate mem for new packet");
448		h5_reset_rx(h5);
449		return -ENOMEM;
450	}
451
452	h5->rx_skb->dev = (void *)hu->hdev;
453
454	return 0;
455}
456
457static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
458{
459	struct h5 *h5 = hu->priv;
460
461	if (c == SLIP_DELIMITER)
462		h5->rx_func = h5_rx_pkt_start;
463
464	return 1;
465}
466
467static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
468{
469	const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
470	const u8 *byte = &c;
471
472	if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
473		set_bit(H5_RX_ESC, &h5->flags);
474		return;
475	}
476
477	if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
478		switch (c) {
479		case SLIP_ESC_DELIM:
480			byte = &delim;
481			break;
482		case SLIP_ESC_ESC:
483			byte = &esc;
484			break;
485		default:
486			BT_ERR("Invalid esc byte 0x%02hhx", c);
487			h5_reset_rx(h5);
488			return;
489		}
490	}
491
492	skb_put_data(h5->rx_skb, byte, 1);
493	h5->rx_pending--;
494
495	BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
496}
497
498static void h5_reset_rx(struct h5 *h5)
499{
500	if (h5->rx_skb) {
501		kfree_skb(h5->rx_skb);
502		h5->rx_skb = NULL;
503	}
504
505	h5->rx_func = h5_rx_delimiter;
506	h5->rx_pending = 0;
507	clear_bit(H5_RX_ESC, &h5->flags);
508}
509
510static int h5_recv(struct hci_uart *hu, const void *data, int count)
511{
512	struct h5 *h5 = hu->priv;
513	const unsigned char *ptr = data;
514
515	BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
516	       count);
517
518	while (count > 0) {
519		int processed;
520
521		if (h5->rx_pending > 0) {
522			if (*ptr == SLIP_DELIMITER) {
523				BT_ERR("Too short H5 packet");
524				h5_reset_rx(h5);
525				continue;
526			}
527
528			h5_unslip_one_byte(h5, *ptr);
529
530			ptr++; count--;
531			continue;
532		}
533
534		processed = h5->rx_func(hu, *ptr);
535		if (processed < 0)
536			return processed;
537
538		ptr += processed;
539		count -= processed;
540	}
541
542	return 0;
543}
544
545static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
546{
547	struct h5 *h5 = hu->priv;
548
549	if (skb->len > 0xfff) {
550		BT_ERR("Packet too long (%u bytes)", skb->len);
551		kfree_skb(skb);
552		return 0;
553	}
554
555	if (h5->state != H5_ACTIVE) {
556		BT_ERR("Ignoring HCI data in non-active state");
557		kfree_skb(skb);
558		return 0;
559	}
560
561	switch (hci_skb_pkt_type(skb)) {
562	case HCI_ACLDATA_PKT:
563	case HCI_COMMAND_PKT:
564		skb_queue_tail(&h5->rel, skb);
565		break;
566
567	case HCI_SCODATA_PKT:
568		skb_queue_tail(&h5->unrel, skb);
569		break;
570
571	default:
572		BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
573		kfree_skb(skb);
574		break;
575	}
576
577	return 0;
578}
579
580static void h5_slip_delim(struct sk_buff *skb)
581{
582	const char delim = SLIP_DELIMITER;
583
584	skb_put_data(skb, &delim, 1);
585}
586
587static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
588{
589	const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
590	const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
591
592	switch (c) {
593	case SLIP_DELIMITER:
594		skb_put_data(skb, &esc_delim, 2);
595		break;
596	case SLIP_ESC:
597		skb_put_data(skb, &esc_esc, 2);
598		break;
599	default:
600		skb_put_data(skb, &c, 1);
601	}
602}
603
604static bool valid_packet_type(u8 type)
605{
606	switch (type) {
607	case HCI_ACLDATA_PKT:
608	case HCI_COMMAND_PKT:
609	case HCI_SCODATA_PKT:
610	case HCI_3WIRE_LINK_PKT:
611	case HCI_3WIRE_ACK_PKT:
612		return true;
613	default:
614		return false;
615	}
616}
617
618static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
619				      const u8 *data, size_t len)
620{
621	struct h5 *h5 = hu->priv;
622	struct sk_buff *nskb;
623	u8 hdr[4];
624	int i;
625
626	if (!valid_packet_type(pkt_type)) {
627		BT_ERR("Unknown packet type %u", pkt_type);
628		return NULL;
629	}
630
631	/*
632	 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
633	 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
634	 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
635	 * delimiters at start and end).
636	 */
637	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
638	if (!nskb)
639		return NULL;
640
641	hci_skb_pkt_type(nskb) = pkt_type;
642
643	h5_slip_delim(nskb);
644
645	hdr[0] = h5->tx_ack << 3;
646	clear_bit(H5_TX_ACK_REQ, &h5->flags);
647
648	/* Reliable packet? */
649	if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
650		hdr[0] |= 1 << 7;
651		hdr[0] |= h5->tx_seq;
652		h5->tx_seq = (h5->tx_seq + 1) % 8;
653	}
654
655	hdr[1] = pkt_type | ((len & 0x0f) << 4);
656	hdr[2] = len >> 4;
657	hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
658
659	BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
660	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
661	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
662	       H5_HDR_LEN(hdr));
663
664	for (i = 0; i < 4; i++)
665		h5_slip_one_byte(nskb, hdr[i]);
666
667	for (i = 0; i < len; i++)
668		h5_slip_one_byte(nskb, data[i]);
669
670	h5_slip_delim(nskb);
671
672	return nskb;
673}
674
675static struct sk_buff *h5_dequeue(struct hci_uart *hu)
676{
677	struct h5 *h5 = hu->priv;
678	unsigned long flags;
679	struct sk_buff *skb, *nskb;
680
681	if (h5->sleep != H5_AWAKE) {
682		const unsigned char wakeup_req[] = { 0x05, 0xfa };
683
684		if (h5->sleep == H5_WAKING_UP)
685			return NULL;
686
687		h5->sleep = H5_WAKING_UP;
688		BT_DBG("Sending wakeup request");
689
690		mod_timer(&h5->timer, jiffies + HZ / 100);
691		return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
692	}
693
694	skb = skb_dequeue(&h5->unrel);
695	if (skb) {
696		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
697				      skb->data, skb->len);
698		if (nskb) {
699			kfree_skb(skb);
700			return nskb;
701		}
702
703		skb_queue_head(&h5->unrel, skb);
704		BT_ERR("Could not dequeue pkt because alloc_skb failed");
705	}
706
707	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
708
709	if (h5->unack.qlen >= h5->tx_win)
710		goto unlock;
711
712	skb = skb_dequeue(&h5->rel);
713	if (skb) {
714		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
715				      skb->data, skb->len);
716		if (nskb) {
717			__skb_queue_tail(&h5->unack, skb);
718			mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
719			spin_unlock_irqrestore(&h5->unack.lock, flags);
720			return nskb;
721		}
722
723		skb_queue_head(&h5->rel, skb);
724		BT_ERR("Could not dequeue pkt because alloc_skb failed");
725	}
726
727unlock:
728	spin_unlock_irqrestore(&h5->unack.lock, flags);
729
730	if (test_bit(H5_TX_ACK_REQ, &h5->flags))
731		return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
732
733	return NULL;
734}
735
736static int h5_flush(struct hci_uart *hu)
737{
738	BT_DBG("hu %p", hu);
739	return 0;
740}
741
742static const struct hci_uart_proto h5p = {
743	.id		= HCI_UART_3WIRE,
744	.name		= "Three-wire (H5)",
745	.open		= h5_open,
746	.close		= h5_close,
747	.recv		= h5_recv,
748	.enqueue	= h5_enqueue,
749	.dequeue	= h5_dequeue,
750	.flush		= h5_flush,
751};
752
753int __init h5_init(void)
754{
755	return hci_uart_register_proto(&h5p);
756}
757
758int __exit h5_deinit(void)
759{
760	return hci_uart_unregister_proto(&h5p);
761}
v4.10.11
  1/*
  2 *
  3 *  Bluetooth HCI Three-wire UART driver
  4 *
  5 *  Copyright (C) 2012  Intel Corporation
  6 *
  7 *
  8 *  This program is free software; you can redistribute it and/or modify
  9 *  it under the terms of the GNU General Public License as published by
 10 *  the Free Software Foundation; either version 2 of the License, or
 11 *  (at your option) any later version.
 12 *
 13 *  This program is distributed in the hope that it will be useful,
 14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16 *  GNU General Public License for more details.
 17 *
 18 *  You should have received a copy of the GNU General Public License
 19 *  along with this program; if not, write to the Free Software
 20 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 21 *
 22 */
 23
 24#include <linux/kernel.h>
 25#include <linux/errno.h>
 26#include <linux/skbuff.h>
 27
 28#include <net/bluetooth/bluetooth.h>
 29#include <net/bluetooth/hci_core.h>
 30
 31#include "hci_uart.h"
 32
 33#define HCI_3WIRE_ACK_PKT	0
 34#define HCI_3WIRE_LINK_PKT	15
 35
 36/* Sliding window size */
 37#define H5_TX_WIN_MAX		4
 38
 39#define H5_ACK_TIMEOUT	msecs_to_jiffies(250)
 40#define H5_SYNC_TIMEOUT	msecs_to_jiffies(100)
 41
 42/*
 43 * Maximum Three-wire packet:
 44 *     4 byte header + max value for 12-bit length + 2 bytes for CRC
 45 */
 46#define H5_MAX_LEN (4 + 0xfff + 2)
 47
 48/* Convenience macros for reading Three-wire header values */
 49#define H5_HDR_SEQ(hdr)		((hdr)[0] & 0x07)
 50#define H5_HDR_ACK(hdr)		(((hdr)[0] >> 3) & 0x07)
 51#define H5_HDR_CRC(hdr)		(((hdr)[0] >> 6) & 0x01)
 52#define H5_HDR_RELIABLE(hdr)	(((hdr)[0] >> 7) & 0x01)
 53#define H5_HDR_PKT_TYPE(hdr)	((hdr)[1] & 0x0f)
 54#define H5_HDR_LEN(hdr)		((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
 55
 56#define SLIP_DELIMITER	0xc0
 57#define SLIP_ESC	0xdb
 58#define SLIP_ESC_DELIM	0xdc
 59#define SLIP_ESC_ESC	0xdd
 60
 61/* H5 state flags */
 62enum {
 63	H5_RX_ESC,	/* SLIP escape mode */
 64	H5_TX_ACK_REQ,	/* Pending ack to send */
 65};
 66
 67struct h5 {
 68	struct sk_buff_head	unack;		/* Unack'ed packets queue */
 69	struct sk_buff_head	rel;		/* Reliable packets queue */
 70	struct sk_buff_head	unrel;		/* Unreliable packets queue */
 71
 72	unsigned long		flags;
 73
 74	struct sk_buff		*rx_skb;	/* Receive buffer */
 75	size_t			rx_pending;	/* Expecting more bytes */
 76	u8			rx_ack;		/* Last ack number received */
 77
 78	int			(*rx_func)(struct hci_uart *hu, u8 c);
 79
 80	struct timer_list	timer;		/* Retransmission timer */
 
 81
 82	u8			tx_seq;		/* Next seq number to send */
 83	u8			tx_ack;		/* Next ack number to send */
 84	u8			tx_win;		/* Sliding window size */
 85
 86	enum {
 87		H5_UNINITIALIZED,
 88		H5_INITIALIZED,
 89		H5_ACTIVE,
 90	} state;
 91
 92	enum {
 93		H5_AWAKE,
 94		H5_SLEEPING,
 95		H5_WAKING_UP,
 96	} sleep;
 97};
 98
 99static void h5_reset_rx(struct h5 *h5);
100
101static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
102{
103	struct h5 *h5 = hu->priv;
104	struct sk_buff *nskb;
105
106	nskb = alloc_skb(3, GFP_ATOMIC);
107	if (!nskb)
108		return;
109
110	hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
111
112	memcpy(skb_put(nskb, len), data, len);
113
114	skb_queue_tail(&h5->unrel, nskb);
115}
116
117static u8 h5_cfg_field(struct h5 *h5)
118{
119	/* Sliding window size (first 3 bits) */
120	return h5->tx_win & 0x07;
121}
122
123static void h5_timed_event(unsigned long arg)
124{
125	const unsigned char sync_req[] = { 0x01, 0x7e };
126	unsigned char conf_req[3] = { 0x03, 0xfc };
127	struct hci_uart *hu = (struct hci_uart *)arg;
128	struct h5 *h5 = hu->priv;
129	struct sk_buff *skb;
130	unsigned long flags;
131
132	BT_DBG("%s", hu->hdev->name);
133
134	if (h5->state == H5_UNINITIALIZED)
135		h5_link_control(hu, sync_req, sizeof(sync_req));
136
137	if (h5->state == H5_INITIALIZED) {
138		conf_req[2] = h5_cfg_field(h5);
139		h5_link_control(hu, conf_req, sizeof(conf_req));
140	}
141
142	if (h5->state != H5_ACTIVE) {
143		mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
144		goto wakeup;
145	}
146
147	if (h5->sleep != H5_AWAKE) {
148		h5->sleep = H5_SLEEPING;
149		goto wakeup;
150	}
151
152	BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
153
154	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
155
156	while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
157		h5->tx_seq = (h5->tx_seq - 1) & 0x07;
158		skb_queue_head(&h5->rel, skb);
159	}
160
161	spin_unlock_irqrestore(&h5->unack.lock, flags);
162
163wakeup:
164	hci_uart_tx_wakeup(hu);
165}
166
167static void h5_peer_reset(struct hci_uart *hu)
168{
169	struct h5 *h5 = hu->priv;
170
171	BT_ERR("Peer device has reset");
172
173	h5->state = H5_UNINITIALIZED;
174
175	del_timer(&h5->timer);
176
177	skb_queue_purge(&h5->rel);
178	skb_queue_purge(&h5->unrel);
179	skb_queue_purge(&h5->unack);
180
181	h5->tx_seq = 0;
182	h5->tx_ack = 0;
183
184	/* Send reset request to upper stack */
185	hci_reset_dev(hu->hdev);
186}
187
188static int h5_open(struct hci_uart *hu)
189{
190	struct h5 *h5;
191	const unsigned char sync[] = { 0x01, 0x7e };
192
193	BT_DBG("hu %p", hu);
194
195	h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
196	if (!h5)
197		return -ENOMEM;
198
199	hu->priv = h5;
 
200
201	skb_queue_head_init(&h5->unack);
202	skb_queue_head_init(&h5->rel);
203	skb_queue_head_init(&h5->unrel);
204
205	h5_reset_rx(h5);
206
207	setup_timer(&h5->timer, h5_timed_event, (unsigned long)hu);
208
209	h5->tx_win = H5_TX_WIN_MAX;
210
211	set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
212
213	/* Send initial sync request */
214	h5_link_control(hu, sync, sizeof(sync));
215	mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
216
217	return 0;
218}
219
220static int h5_close(struct hci_uart *hu)
221{
222	struct h5 *h5 = hu->priv;
223
224	del_timer_sync(&h5->timer);
225
226	skb_queue_purge(&h5->unack);
227	skb_queue_purge(&h5->rel);
228	skb_queue_purge(&h5->unrel);
229
230	kfree(h5);
231
232	return 0;
233}
234
235static void h5_pkt_cull(struct h5 *h5)
236{
237	struct sk_buff *skb, *tmp;
238	unsigned long flags;
239	int i, to_remove;
240	u8 seq;
241
242	spin_lock_irqsave(&h5->unack.lock, flags);
243
244	to_remove = skb_queue_len(&h5->unack);
245	if (to_remove == 0)
246		goto unlock;
247
248	seq = h5->tx_seq;
249
250	while (to_remove > 0) {
251		if (h5->rx_ack == seq)
252			break;
253
254		to_remove--;
255		seq = (seq - 1) & 0x07;
256	}
257
258	if (seq != h5->rx_ack)
259		BT_ERR("Controller acked invalid packet");
260
261	i = 0;
262	skb_queue_walk_safe(&h5->unack, skb, tmp) {
263		if (i++ >= to_remove)
264			break;
265
266		__skb_unlink(skb, &h5->unack);
267		kfree_skb(skb);
268	}
269
270	if (skb_queue_empty(&h5->unack))
271		del_timer(&h5->timer);
272
273unlock:
274	spin_unlock_irqrestore(&h5->unack.lock, flags);
275}
276
277static void h5_handle_internal_rx(struct hci_uart *hu)
278{
279	struct h5 *h5 = hu->priv;
280	const unsigned char sync_req[] = { 0x01, 0x7e };
281	const unsigned char sync_rsp[] = { 0x02, 0x7d };
282	unsigned char conf_req[3] = { 0x03, 0xfc };
283	const unsigned char conf_rsp[] = { 0x04, 0x7b };
284	const unsigned char wakeup_req[] = { 0x05, 0xfa };
285	const unsigned char woken_req[] = { 0x06, 0xf9 };
286	const unsigned char sleep_req[] = { 0x07, 0x78 };
287	const unsigned char *hdr = h5->rx_skb->data;
288	const unsigned char *data = &h5->rx_skb->data[4];
289
290	BT_DBG("%s", hu->hdev->name);
291
292	if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
293		return;
294
295	if (H5_HDR_LEN(hdr) < 2)
296		return;
297
298	conf_req[2] = h5_cfg_field(h5);
299
300	if (memcmp(data, sync_req, 2) == 0) {
301		if (h5->state == H5_ACTIVE)
302			h5_peer_reset(hu);
303		h5_link_control(hu, sync_rsp, 2);
304	} else if (memcmp(data, sync_rsp, 2) == 0) {
305		if (h5->state == H5_ACTIVE)
306			h5_peer_reset(hu);
307		h5->state = H5_INITIALIZED;
308		h5_link_control(hu, conf_req, 3);
309	} else if (memcmp(data, conf_req, 2) == 0) {
310		h5_link_control(hu, conf_rsp, 2);
311		h5_link_control(hu, conf_req, 3);
312	} else if (memcmp(data, conf_rsp, 2) == 0) {
313		if (H5_HDR_LEN(hdr) > 2)
314			h5->tx_win = (data[2] & 0x07);
315		BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
316		h5->state = H5_ACTIVE;
317		hci_uart_init_ready(hu);
318		return;
319	} else if (memcmp(data, sleep_req, 2) == 0) {
320		BT_DBG("Peer went to sleep");
321		h5->sleep = H5_SLEEPING;
322		return;
323	} else if (memcmp(data, woken_req, 2) == 0) {
324		BT_DBG("Peer woke up");
325		h5->sleep = H5_AWAKE;
326	} else if (memcmp(data, wakeup_req, 2) == 0) {
327		BT_DBG("Peer requested wakeup");
328		h5_link_control(hu, woken_req, 2);
329		h5->sleep = H5_AWAKE;
330	} else {
331		BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
332		return;
333	}
334
335	hci_uart_tx_wakeup(hu);
336}
337
338static void h5_complete_rx_pkt(struct hci_uart *hu)
339{
340	struct h5 *h5 = hu->priv;
341	const unsigned char *hdr = h5->rx_skb->data;
342
343	if (H5_HDR_RELIABLE(hdr)) {
344		h5->tx_ack = (h5->tx_ack + 1) % 8;
345		set_bit(H5_TX_ACK_REQ, &h5->flags);
346		hci_uart_tx_wakeup(hu);
347	}
348
349	h5->rx_ack = H5_HDR_ACK(hdr);
350
351	h5_pkt_cull(h5);
352
353	switch (H5_HDR_PKT_TYPE(hdr)) {
354	case HCI_EVENT_PKT:
355	case HCI_ACLDATA_PKT:
356	case HCI_SCODATA_PKT:
357		hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
358
359		/* Remove Three-wire header */
360		skb_pull(h5->rx_skb, 4);
361
362		hci_recv_frame(hu->hdev, h5->rx_skb);
363		h5->rx_skb = NULL;
364
365		break;
366
367	default:
368		h5_handle_internal_rx(hu);
369		break;
370	}
371
372	h5_reset_rx(h5);
373}
374
375static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
376{
377	h5_complete_rx_pkt(hu);
378
379	return 0;
380}
381
382static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
383{
384	struct h5 *h5 = hu->priv;
385	const unsigned char *hdr = h5->rx_skb->data;
386
387	if (H5_HDR_CRC(hdr)) {
388		h5->rx_func = h5_rx_crc;
389		h5->rx_pending = 2;
390	} else {
391		h5_complete_rx_pkt(hu);
392	}
393
394	return 0;
395}
396
397static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
398{
399	struct h5 *h5 = hu->priv;
400	const unsigned char *hdr = h5->rx_skb->data;
401
402	BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
403	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
404	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
405	       H5_HDR_LEN(hdr));
406
407	if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
408		BT_ERR("Invalid header checksum");
409		h5_reset_rx(h5);
410		return 0;
411	}
412
413	if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
414		BT_ERR("Out-of-order packet arrived (%u != %u)",
415		       H5_HDR_SEQ(hdr), h5->tx_ack);
416		h5_reset_rx(h5);
417		return 0;
418	}
419
420	if (h5->state != H5_ACTIVE &&
421	    H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
422		BT_ERR("Non-link packet received in non-active state");
423		h5_reset_rx(h5);
424		return 0;
425	}
426
427	h5->rx_func = h5_rx_payload;
428	h5->rx_pending = H5_HDR_LEN(hdr);
429
430	return 0;
431}
432
433static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
434{
435	struct h5 *h5 = hu->priv;
436
437	if (c == SLIP_DELIMITER)
438		return 1;
439
440	h5->rx_func = h5_rx_3wire_hdr;
441	h5->rx_pending = 4;
442
443	h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
444	if (!h5->rx_skb) {
445		BT_ERR("Can't allocate mem for new packet");
446		h5_reset_rx(h5);
447		return -ENOMEM;
448	}
449
450	h5->rx_skb->dev = (void *)hu->hdev;
451
452	return 0;
453}
454
455static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
456{
457	struct h5 *h5 = hu->priv;
458
459	if (c == SLIP_DELIMITER)
460		h5->rx_func = h5_rx_pkt_start;
461
462	return 1;
463}
464
465static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
466{
467	const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
468	const u8 *byte = &c;
469
470	if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
471		set_bit(H5_RX_ESC, &h5->flags);
472		return;
473	}
474
475	if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
476		switch (c) {
477		case SLIP_ESC_DELIM:
478			byte = &delim;
479			break;
480		case SLIP_ESC_ESC:
481			byte = &esc;
482			break;
483		default:
484			BT_ERR("Invalid esc byte 0x%02hhx", c);
485			h5_reset_rx(h5);
486			return;
487		}
488	}
489
490	memcpy(skb_put(h5->rx_skb, 1), byte, 1);
491	h5->rx_pending--;
492
493	BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
494}
495
496static void h5_reset_rx(struct h5 *h5)
497{
498	if (h5->rx_skb) {
499		kfree_skb(h5->rx_skb);
500		h5->rx_skb = NULL;
501	}
502
503	h5->rx_func = h5_rx_delimiter;
504	h5->rx_pending = 0;
505	clear_bit(H5_RX_ESC, &h5->flags);
506}
507
508static int h5_recv(struct hci_uart *hu, const void *data, int count)
509{
510	struct h5 *h5 = hu->priv;
511	const unsigned char *ptr = data;
512
513	BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
514	       count);
515
516	while (count > 0) {
517		int processed;
518
519		if (h5->rx_pending > 0) {
520			if (*ptr == SLIP_DELIMITER) {
521				BT_ERR("Too short H5 packet");
522				h5_reset_rx(h5);
523				continue;
524			}
525
526			h5_unslip_one_byte(h5, *ptr);
527
528			ptr++; count--;
529			continue;
530		}
531
532		processed = h5->rx_func(hu, *ptr);
533		if (processed < 0)
534			return processed;
535
536		ptr += processed;
537		count -= processed;
538	}
539
540	return 0;
541}
542
543static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
544{
545	struct h5 *h5 = hu->priv;
546
547	if (skb->len > 0xfff) {
548		BT_ERR("Packet too long (%u bytes)", skb->len);
549		kfree_skb(skb);
550		return 0;
551	}
552
553	if (h5->state != H5_ACTIVE) {
554		BT_ERR("Ignoring HCI data in non-active state");
555		kfree_skb(skb);
556		return 0;
557	}
558
559	switch (hci_skb_pkt_type(skb)) {
560	case HCI_ACLDATA_PKT:
561	case HCI_COMMAND_PKT:
562		skb_queue_tail(&h5->rel, skb);
563		break;
564
565	case HCI_SCODATA_PKT:
566		skb_queue_tail(&h5->unrel, skb);
567		break;
568
569	default:
570		BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
571		kfree_skb(skb);
572		break;
573	}
574
575	return 0;
576}
577
578static void h5_slip_delim(struct sk_buff *skb)
579{
580	const char delim = SLIP_DELIMITER;
581
582	memcpy(skb_put(skb, 1), &delim, 1);
583}
584
585static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
586{
587	const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
588	const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
589
590	switch (c) {
591	case SLIP_DELIMITER:
592		memcpy(skb_put(skb, 2), &esc_delim, 2);
593		break;
594	case SLIP_ESC:
595		memcpy(skb_put(skb, 2), &esc_esc, 2);
596		break;
597	default:
598		memcpy(skb_put(skb, 1), &c, 1);
599	}
600}
601
602static bool valid_packet_type(u8 type)
603{
604	switch (type) {
605	case HCI_ACLDATA_PKT:
606	case HCI_COMMAND_PKT:
607	case HCI_SCODATA_PKT:
608	case HCI_3WIRE_LINK_PKT:
609	case HCI_3WIRE_ACK_PKT:
610		return true;
611	default:
612		return false;
613	}
614}
615
616static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
617				      const u8 *data, size_t len)
618{
619	struct h5 *h5 = hu->priv;
620	struct sk_buff *nskb;
621	u8 hdr[4];
622	int i;
623
624	if (!valid_packet_type(pkt_type)) {
625		BT_ERR("Unknown packet type %u", pkt_type);
626		return NULL;
627	}
628
629	/*
630	 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
631	 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
632	 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
633	 * delimiters at start and end).
634	 */
635	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
636	if (!nskb)
637		return NULL;
638
639	hci_skb_pkt_type(nskb) = pkt_type;
640
641	h5_slip_delim(nskb);
642
643	hdr[0] = h5->tx_ack << 3;
644	clear_bit(H5_TX_ACK_REQ, &h5->flags);
645
646	/* Reliable packet? */
647	if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
648		hdr[0] |= 1 << 7;
649		hdr[0] |= h5->tx_seq;
650		h5->tx_seq = (h5->tx_seq + 1) % 8;
651	}
652
653	hdr[1] = pkt_type | ((len & 0x0f) << 4);
654	hdr[2] = len >> 4;
655	hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
656
657	BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
658	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
659	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
660	       H5_HDR_LEN(hdr));
661
662	for (i = 0; i < 4; i++)
663		h5_slip_one_byte(nskb, hdr[i]);
664
665	for (i = 0; i < len; i++)
666		h5_slip_one_byte(nskb, data[i]);
667
668	h5_slip_delim(nskb);
669
670	return nskb;
671}
672
673static struct sk_buff *h5_dequeue(struct hci_uart *hu)
674{
675	struct h5 *h5 = hu->priv;
676	unsigned long flags;
677	struct sk_buff *skb, *nskb;
678
679	if (h5->sleep != H5_AWAKE) {
680		const unsigned char wakeup_req[] = { 0x05, 0xfa };
681
682		if (h5->sleep == H5_WAKING_UP)
683			return NULL;
684
685		h5->sleep = H5_WAKING_UP;
686		BT_DBG("Sending wakeup request");
687
688		mod_timer(&h5->timer, jiffies + HZ / 100);
689		return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
690	}
691
692	skb = skb_dequeue(&h5->unrel);
693	if (skb) {
694		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
695				      skb->data, skb->len);
696		if (nskb) {
697			kfree_skb(skb);
698			return nskb;
699		}
700
701		skb_queue_head(&h5->unrel, skb);
702		BT_ERR("Could not dequeue pkt because alloc_skb failed");
703	}
704
705	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
706
707	if (h5->unack.qlen >= h5->tx_win)
708		goto unlock;
709
710	skb = skb_dequeue(&h5->rel);
711	if (skb) {
712		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
713				      skb->data, skb->len);
714		if (nskb) {
715			__skb_queue_tail(&h5->unack, skb);
716			mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
717			spin_unlock_irqrestore(&h5->unack.lock, flags);
718			return nskb;
719		}
720
721		skb_queue_head(&h5->rel, skb);
722		BT_ERR("Could not dequeue pkt because alloc_skb failed");
723	}
724
725unlock:
726	spin_unlock_irqrestore(&h5->unack.lock, flags);
727
728	if (test_bit(H5_TX_ACK_REQ, &h5->flags))
729		return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
730
731	return NULL;
732}
733
734static int h5_flush(struct hci_uart *hu)
735{
736	BT_DBG("hu %p", hu);
737	return 0;
738}
739
740static const struct hci_uart_proto h5p = {
741	.id		= HCI_UART_3WIRE,
742	.name		= "Three-wire (H5)",
743	.open		= h5_open,
744	.close		= h5_close,
745	.recv		= h5_recv,
746	.enqueue	= h5_enqueue,
747	.dequeue	= h5_dequeue,
748	.flush		= h5_flush,
749};
750
751int __init h5_init(void)
752{
753	return hci_uart_register_proto(&h5p);
754}
755
756int __exit h5_deinit(void)
757{
758	return hci_uart_unregister_proto(&h5p);
759}