Loading...
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h>
42
43#include <net/bluetooth/bluetooth.h>
44#include <net/bluetooth/hci_core.h>
45
46/* Handle HCI Event packets */
47
48static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
49{
50 __u8 status = *((__u8 *) skb->data);
51
52 BT_DBG("%s status 0x%x", hdev->name, status);
53
54 if (status) {
55 hci_dev_lock(hdev);
56 mgmt_stop_discovery_failed(hdev, status);
57 hci_dev_unlock(hdev);
58 return;
59 }
60
61 clear_bit(HCI_INQUIRY, &hdev->flags);
62
63 hci_dev_lock(hdev);
64 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
65 hci_dev_unlock(hdev);
66
67 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
68
69 hci_conn_check_pending(hdev);
70}
71
72static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73{
74 __u8 status = *((__u8 *) skb->data);
75
76 BT_DBG("%s status 0x%x", hdev->name, status);
77
78 if (status)
79 return;
80
81 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82}
83
84static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
85{
86 __u8 status = *((__u8 *) skb->data);
87
88 BT_DBG("%s status 0x%x", hdev->name, status);
89
90 if (status)
91 return;
92
93 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
94
95 hci_conn_check_pending(hdev);
96}
97
98static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
99{
100 BT_DBG("%s", hdev->name);
101}
102
103static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
104{
105 struct hci_rp_role_discovery *rp = (void *) skb->data;
106 struct hci_conn *conn;
107
108 BT_DBG("%s status 0x%x", hdev->name, rp->status);
109
110 if (rp->status)
111 return;
112
113 hci_dev_lock(hdev);
114
115 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
116 if (conn) {
117 if (rp->role)
118 conn->link_mode &= ~HCI_LM_MASTER;
119 else
120 conn->link_mode |= HCI_LM_MASTER;
121 }
122
123 hci_dev_unlock(hdev);
124}
125
126static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
127{
128 struct hci_rp_read_link_policy *rp = (void *) skb->data;
129 struct hci_conn *conn;
130
131 BT_DBG("%s status 0x%x", hdev->name, rp->status);
132
133 if (rp->status)
134 return;
135
136 hci_dev_lock(hdev);
137
138 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
139 if (conn)
140 conn->link_policy = __le16_to_cpu(rp->policy);
141
142 hci_dev_unlock(hdev);
143}
144
145static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
146{
147 struct hci_rp_write_link_policy *rp = (void *) skb->data;
148 struct hci_conn *conn;
149 void *sent;
150
151 BT_DBG("%s status 0x%x", hdev->name, rp->status);
152
153 if (rp->status)
154 return;
155
156 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
157 if (!sent)
158 return;
159
160 hci_dev_lock(hdev);
161
162 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
163 if (conn)
164 conn->link_policy = get_unaligned_le16(sent + 2);
165
166 hci_dev_unlock(hdev);
167}
168
169static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
170{
171 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
172
173 BT_DBG("%s status 0x%x", hdev->name, rp->status);
174
175 if (rp->status)
176 return;
177
178 hdev->link_policy = __le16_to_cpu(rp->policy);
179}
180
181static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
182{
183 __u8 status = *((__u8 *) skb->data);
184 void *sent;
185
186 BT_DBG("%s status 0x%x", hdev->name, status);
187
188 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
189 if (!sent)
190 return;
191
192 if (!status)
193 hdev->link_policy = get_unaligned_le16(sent);
194
195 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
196}
197
198static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
199{
200 __u8 status = *((__u8 *) skb->data);
201
202 BT_DBG("%s status 0x%x", hdev->name, status);
203
204 clear_bit(HCI_RESET, &hdev->flags);
205
206 hci_req_complete(hdev, HCI_OP_RESET, status);
207
208 /* Reset all non-persistent flags */
209 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
210 BIT(HCI_PERIODIC_INQ));
211
212 hdev->discovery.state = DISCOVERY_STOPPED;
213}
214
215static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216{
217 __u8 status = *((__u8 *) skb->data);
218 void *sent;
219
220 BT_DBG("%s status 0x%x", hdev->name, status);
221
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (!sent)
224 return;
225
226 hci_dev_lock(hdev);
227
228 if (test_bit(HCI_MGMT, &hdev->dev_flags))
229 mgmt_set_local_name_complete(hdev, sent, status);
230 else if (!status)
231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
232
233 hci_dev_unlock(hdev);
234
235 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
236}
237
238static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
239{
240 struct hci_rp_read_local_name *rp = (void *) skb->data;
241
242 BT_DBG("%s status 0x%x", hdev->name, rp->status);
243
244 if (rp->status)
245 return;
246
247 if (test_bit(HCI_SETUP, &hdev->dev_flags))
248 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
249}
250
251static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
252{
253 __u8 status = *((__u8 *) skb->data);
254 void *sent;
255
256 BT_DBG("%s status 0x%x", hdev->name, status);
257
258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
259 if (!sent)
260 return;
261
262 if (!status) {
263 __u8 param = *((__u8 *) sent);
264
265 if (param == AUTH_ENABLED)
266 set_bit(HCI_AUTH, &hdev->flags);
267 else
268 clear_bit(HCI_AUTH, &hdev->flags);
269 }
270
271 if (test_bit(HCI_MGMT, &hdev->dev_flags))
272 mgmt_auth_enable_complete(hdev, status);
273
274 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
275}
276
277static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
278{
279 __u8 status = *((__u8 *) skb->data);
280 void *sent;
281
282 BT_DBG("%s status 0x%x", hdev->name, status);
283
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent)
286 return;
287
288 if (!status) {
289 __u8 param = *((__u8 *) sent);
290
291 if (param)
292 set_bit(HCI_ENCRYPT, &hdev->flags);
293 else
294 clear_bit(HCI_ENCRYPT, &hdev->flags);
295 }
296
297 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
298}
299
300static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
301{
302 __u8 param, status = *((__u8 *) skb->data);
303 int old_pscan, old_iscan;
304 void *sent;
305
306 BT_DBG("%s status 0x%x", hdev->name, status);
307
308 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
309 if (!sent)
310 return;
311
312 param = *((__u8 *) sent);
313
314 hci_dev_lock(hdev);
315
316 if (status != 0) {
317 mgmt_write_scan_failed(hdev, param, status);
318 hdev->discov_timeout = 0;
319 goto done;
320 }
321
322 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
323 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
324
325 if (param & SCAN_INQUIRY) {
326 set_bit(HCI_ISCAN, &hdev->flags);
327 if (!old_iscan)
328 mgmt_discoverable(hdev, 1);
329 if (hdev->discov_timeout > 0) {
330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 to);
333 }
334 } else if (old_iscan)
335 mgmt_discoverable(hdev, 0);
336
337 if (param & SCAN_PAGE) {
338 set_bit(HCI_PSCAN, &hdev->flags);
339 if (!old_pscan)
340 mgmt_connectable(hdev, 1);
341 } else if (old_pscan)
342 mgmt_connectable(hdev, 0);
343
344done:
345 hci_dev_unlock(hdev);
346 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
347}
348
349static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350{
351 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
352
353 BT_DBG("%s status 0x%x", hdev->name, rp->status);
354
355 if (rp->status)
356 return;
357
358 memcpy(hdev->dev_class, rp->dev_class, 3);
359
360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362}
363
364static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
365{
366 __u8 status = *((__u8 *) skb->data);
367 void *sent;
368
369 BT_DBG("%s status 0x%x", hdev->name, status);
370
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
372 if (!sent)
373 return;
374
375 hci_dev_lock(hdev);
376
377 if (status == 0)
378 memcpy(hdev->dev_class, sent, 3);
379
380 if (test_bit(HCI_MGMT, &hdev->dev_flags))
381 mgmt_set_class_of_dev_complete(hdev, sent, status);
382
383 hci_dev_unlock(hdev);
384}
385
386static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
387{
388 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
389 __u16 setting;
390
391 BT_DBG("%s status 0x%x", hdev->name, rp->status);
392
393 if (rp->status)
394 return;
395
396 setting = __le16_to_cpu(rp->voice_setting);
397
398 if (hdev->voice_setting == setting)
399 return;
400
401 hdev->voice_setting = setting;
402
403 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
404
405 if (hdev->notify)
406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407}
408
409static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
410{
411 __u8 status = *((__u8 *) skb->data);
412 __u16 setting;
413 void *sent;
414
415 BT_DBG("%s status 0x%x", hdev->name, status);
416
417 if (status)
418 return;
419
420 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
421 if (!sent)
422 return;
423
424 setting = get_unaligned_le16(sent);
425
426 if (hdev->voice_setting == setting)
427 return;
428
429 hdev->voice_setting = setting;
430
431 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
432
433 if (hdev->notify)
434 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
435}
436
437static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
438{
439 __u8 status = *((__u8 *) skb->data);
440
441 BT_DBG("%s status 0x%x", hdev->name, status);
442
443 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
444}
445
446static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
447{
448 __u8 status = *((__u8 *) skb->data);
449 void *sent;
450
451 BT_DBG("%s status 0x%x", hdev->name, status);
452
453 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
454 if (!sent)
455 return;
456
457 if (test_bit(HCI_MGMT, &hdev->dev_flags))
458 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
459 else if (!status) {
460 if (*((u8 *) sent))
461 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
462 else
463 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
464 }
465}
466
467static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
468{
469 if (hdev->features[6] & LMP_EXT_INQ)
470 return 2;
471
472 if (hdev->features[3] & LMP_RSSI_INQ)
473 return 1;
474
475 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
476 hdev->lmp_subver == 0x0757)
477 return 1;
478
479 if (hdev->manufacturer == 15) {
480 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
481 return 1;
482 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
483 return 1;
484 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
485 return 1;
486 }
487
488 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
489 hdev->lmp_subver == 0x1805)
490 return 1;
491
492 return 0;
493}
494
495static void hci_setup_inquiry_mode(struct hci_dev *hdev)
496{
497 u8 mode;
498
499 mode = hci_get_inquiry_mode(hdev);
500
501 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
502}
503
504static void hci_setup_event_mask(struct hci_dev *hdev)
505{
506 /* The second byte is 0xff instead of 0x9f (two reserved bits
507 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
508 * command otherwise */
509 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
510
511 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
512 * any event mask for pre 1.2 devices */
513 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514 return;
515
516 events[4] |= 0x01; /* Flow Specification Complete */
517 events[4] |= 0x02; /* Inquiry Result with RSSI */
518 events[4] |= 0x04; /* Read Remote Extended Features Complete */
519 events[5] |= 0x08; /* Synchronous Connection Complete */
520 events[5] |= 0x10; /* Synchronous Connection Changed */
521
522 if (hdev->features[3] & LMP_RSSI_INQ)
523 events[4] |= 0x02; /* Inquiry Result with RSSI */
524
525 if (hdev->features[5] & LMP_SNIFF_SUBR)
526 events[5] |= 0x20; /* Sniff Subrating */
527
528 if (hdev->features[5] & LMP_PAUSE_ENC)
529 events[5] |= 0x80; /* Encryption Key Refresh Complete */
530
531 if (hdev->features[6] & LMP_EXT_INQ)
532 events[5] |= 0x40; /* Extended Inquiry Result */
533
534 if (hdev->features[6] & LMP_NO_FLUSH)
535 events[7] |= 0x01; /* Enhanced Flush Complete */
536
537 if (hdev->features[7] & LMP_LSTO)
538 events[6] |= 0x80; /* Link Supervision Timeout Changed */
539
540 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
541 events[6] |= 0x01; /* IO Capability Request */
542 events[6] |= 0x02; /* IO Capability Response */
543 events[6] |= 0x04; /* User Confirmation Request */
544 events[6] |= 0x08; /* User Passkey Request */
545 events[6] |= 0x10; /* Remote OOB Data Request */
546 events[6] |= 0x20; /* Simple Pairing Complete */
547 events[7] |= 0x04; /* User Passkey Notification */
548 events[7] |= 0x08; /* Keypress Notification */
549 events[7] |= 0x10; /* Remote Host Supported
550 * Features Notification */
551 }
552
553 if (hdev->features[4] & LMP_LE)
554 events[7] |= 0x20; /* LE Meta-Event */
555
556 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
557}
558
559static void hci_setup(struct hci_dev *hdev)
560{
561 if (hdev->dev_type != HCI_BREDR)
562 return;
563
564 hci_setup_event_mask(hdev);
565
566 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
567 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
568
569 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
570 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
571 u8 mode = 0x01;
572 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
573 sizeof(mode), &mode);
574 } else {
575 struct hci_cp_write_eir cp;
576
577 memset(hdev->eir, 0, sizeof(hdev->eir));
578 memset(&cp, 0, sizeof(cp));
579
580 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
581 }
582 }
583
584 if (hdev->features[3] & LMP_RSSI_INQ)
585 hci_setup_inquiry_mode(hdev);
586
587 if (hdev->features[7] & LMP_INQ_TX_PWR)
588 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
589
590 if (hdev->features[7] & LMP_EXTFEATURES) {
591 struct hci_cp_read_local_ext_features cp;
592
593 cp.page = 0x01;
594 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
595 &cp);
596 }
597
598 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
599 u8 enable = 1;
600 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
601 &enable);
602 }
603}
604
605static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
606{
607 struct hci_rp_read_local_version *rp = (void *) skb->data;
608
609 BT_DBG("%s status 0x%x", hdev->name, rp->status);
610
611 if (rp->status)
612 goto done;
613
614 hdev->hci_ver = rp->hci_ver;
615 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
616 hdev->lmp_ver = rp->lmp_ver;
617 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
618 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
619
620 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
621 hdev->manufacturer,
622 hdev->hci_ver, hdev->hci_rev);
623
624 if (test_bit(HCI_INIT, &hdev->flags))
625 hci_setup(hdev);
626
627done:
628 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
629}
630
631static void hci_setup_link_policy(struct hci_dev *hdev)
632{
633 struct hci_cp_write_def_link_policy cp;
634 u16 link_policy = 0;
635
636 if (hdev->features[0] & LMP_RSWITCH)
637 link_policy |= HCI_LP_RSWITCH;
638 if (hdev->features[0] & LMP_HOLD)
639 link_policy |= HCI_LP_HOLD;
640 if (hdev->features[0] & LMP_SNIFF)
641 link_policy |= HCI_LP_SNIFF;
642 if (hdev->features[1] & LMP_PARK)
643 link_policy |= HCI_LP_PARK;
644
645 cp.policy = cpu_to_le16(link_policy);
646 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
647}
648
649static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
650{
651 struct hci_rp_read_local_commands *rp = (void *) skb->data;
652
653 BT_DBG("%s status 0x%x", hdev->name, rp->status);
654
655 if (rp->status)
656 goto done;
657
658 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
659
660 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
661 hci_setup_link_policy(hdev);
662
663done:
664 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
665}
666
667static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
668{
669 struct hci_rp_read_local_features *rp = (void *) skb->data;
670
671 BT_DBG("%s status 0x%x", hdev->name, rp->status);
672
673 if (rp->status)
674 return;
675
676 memcpy(hdev->features, rp->features, 8);
677
678 /* Adjust default settings according to features
679 * supported by device. */
680
681 if (hdev->features[0] & LMP_3SLOT)
682 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
683
684 if (hdev->features[0] & LMP_5SLOT)
685 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
686
687 if (hdev->features[1] & LMP_HV2) {
688 hdev->pkt_type |= (HCI_HV2);
689 hdev->esco_type |= (ESCO_HV2);
690 }
691
692 if (hdev->features[1] & LMP_HV3) {
693 hdev->pkt_type |= (HCI_HV3);
694 hdev->esco_type |= (ESCO_HV3);
695 }
696
697 if (hdev->features[3] & LMP_ESCO)
698 hdev->esco_type |= (ESCO_EV3);
699
700 if (hdev->features[4] & LMP_EV4)
701 hdev->esco_type |= (ESCO_EV4);
702
703 if (hdev->features[4] & LMP_EV5)
704 hdev->esco_type |= (ESCO_EV5);
705
706 if (hdev->features[5] & LMP_EDR_ESCO_2M)
707 hdev->esco_type |= (ESCO_2EV3);
708
709 if (hdev->features[5] & LMP_EDR_ESCO_3M)
710 hdev->esco_type |= (ESCO_3EV3);
711
712 if (hdev->features[5] & LMP_EDR_3S_ESCO)
713 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
714
715 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
716 hdev->features[0], hdev->features[1],
717 hdev->features[2], hdev->features[3],
718 hdev->features[4], hdev->features[5],
719 hdev->features[6], hdev->features[7]);
720}
721
722static void hci_set_le_support(struct hci_dev *hdev)
723{
724 struct hci_cp_write_le_host_supported cp;
725
726 memset(&cp, 0, sizeof(cp));
727
728 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
729 cp.le = 1;
730 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
731 }
732
733 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
734 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
735 &cp);
736}
737
738static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
739 struct sk_buff *skb)
740{
741 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
742
743 BT_DBG("%s status 0x%x", hdev->name, rp->status);
744
745 if (rp->status)
746 goto done;
747
748 switch (rp->page) {
749 case 0:
750 memcpy(hdev->features, rp->features, 8);
751 break;
752 case 1:
753 memcpy(hdev->host_features, rp->features, 8);
754 break;
755 }
756
757 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
758 hci_set_le_support(hdev);
759
760done:
761 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
762}
763
764static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
765 struct sk_buff *skb)
766{
767 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
768
769 BT_DBG("%s status 0x%x", hdev->name, rp->status);
770
771 if (rp->status)
772 return;
773
774 hdev->flow_ctl_mode = rp->mode;
775
776 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
777}
778
779static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
780{
781 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
782
783 BT_DBG("%s status 0x%x", hdev->name, rp->status);
784
785 if (rp->status)
786 return;
787
788 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
789 hdev->sco_mtu = rp->sco_mtu;
790 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
791 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
792
793 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
794 hdev->sco_mtu = 64;
795 hdev->sco_pkts = 8;
796 }
797
798 hdev->acl_cnt = hdev->acl_pkts;
799 hdev->sco_cnt = hdev->sco_pkts;
800
801 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
802 hdev->acl_mtu, hdev->acl_pkts,
803 hdev->sco_mtu, hdev->sco_pkts);
804}
805
806static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
807{
808 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
809
810 BT_DBG("%s status 0x%x", hdev->name, rp->status);
811
812 if (!rp->status)
813 bacpy(&hdev->bdaddr, &rp->bdaddr);
814
815 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
816}
817
818static void hci_cc_read_data_block_size(struct hci_dev *hdev,
819 struct sk_buff *skb)
820{
821 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
822
823 BT_DBG("%s status 0x%x", hdev->name, rp->status);
824
825 if (rp->status)
826 return;
827
828 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
829 hdev->block_len = __le16_to_cpu(rp->block_len);
830 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
831
832 hdev->block_cnt = hdev->num_blocks;
833
834 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
835 hdev->block_cnt, hdev->block_len);
836
837 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
838}
839
840static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
841{
842 __u8 status = *((__u8 *) skb->data);
843
844 BT_DBG("%s status 0x%x", hdev->name, status);
845
846 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
847}
848
849static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
850 struct sk_buff *skb)
851{
852 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
853
854 BT_DBG("%s status 0x%x", hdev->name, rp->status);
855
856 if (rp->status)
857 return;
858
859 hdev->amp_status = rp->amp_status;
860 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
861 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
862 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
863 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
864 hdev->amp_type = rp->amp_type;
865 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
866 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
867 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
868 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
869
870 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
871}
872
873static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
874 struct sk_buff *skb)
875{
876 __u8 status = *((__u8 *) skb->data);
877
878 BT_DBG("%s status 0x%x", hdev->name, status);
879
880 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
881}
882
883static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
884{
885 __u8 status = *((__u8 *) skb->data);
886
887 BT_DBG("%s status 0x%x", hdev->name, status);
888
889 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
890}
891
892static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
893 struct sk_buff *skb)
894{
895 __u8 status = *((__u8 *) skb->data);
896
897 BT_DBG("%s status 0x%x", hdev->name, status);
898
899 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
900}
901
902static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
903 struct sk_buff *skb)
904{
905 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
906
907 BT_DBG("%s status 0x%x", hdev->name, rp->status);
908
909 if (!rp->status)
910 hdev->inq_tx_power = rp->tx_power;
911
912 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
913}
914
915static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
916{
917 __u8 status = *((__u8 *) skb->data);
918
919 BT_DBG("%s status 0x%x", hdev->name, status);
920
921 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
922}
923
924static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
925{
926 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
927 struct hci_cp_pin_code_reply *cp;
928 struct hci_conn *conn;
929
930 BT_DBG("%s status 0x%x", hdev->name, rp->status);
931
932 hci_dev_lock(hdev);
933
934 if (test_bit(HCI_MGMT, &hdev->dev_flags))
935 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
936
937 if (rp->status != 0)
938 goto unlock;
939
940 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
941 if (!cp)
942 goto unlock;
943
944 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
945 if (conn)
946 conn->pin_length = cp->pin_len;
947
948unlock:
949 hci_dev_unlock(hdev);
950}
951
952static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
953{
954 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
955
956 BT_DBG("%s status 0x%x", hdev->name, rp->status);
957
958 hci_dev_lock(hdev);
959
960 if (test_bit(HCI_MGMT, &hdev->dev_flags))
961 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
962 rp->status);
963
964 hci_dev_unlock(hdev);
965}
966
967static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
968 struct sk_buff *skb)
969{
970 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
971
972 BT_DBG("%s status 0x%x", hdev->name, rp->status);
973
974 if (rp->status)
975 return;
976
977 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
978 hdev->le_pkts = rp->le_max_pkt;
979
980 hdev->le_cnt = hdev->le_pkts;
981
982 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
983
984 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
985}
986
987static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
988{
989 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
990
991 BT_DBG("%s status 0x%x", hdev->name, rp->status);
992
993 hci_dev_lock(hdev);
994
995 if (test_bit(HCI_MGMT, &hdev->dev_flags))
996 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
997 rp->status);
998
999 hci_dev_unlock(hdev);
1000}
1001
1002static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1004{
1005 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1006
1007 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1008
1009 hci_dev_lock(hdev);
1010
1011 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1012 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1013 ACL_LINK, 0, rp->status);
1014
1015 hci_dev_unlock(hdev);
1016}
1017
1018static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1019{
1020 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1021
1022 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1023
1024 hci_dev_lock(hdev);
1025
1026 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1027 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1028 0, rp->status);
1029
1030 hci_dev_unlock(hdev);
1031}
1032
1033static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1034 struct sk_buff *skb)
1035{
1036 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1037
1038 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1039
1040 hci_dev_lock(hdev);
1041
1042 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1043 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1044 ACL_LINK, 0, rp->status);
1045
1046 hci_dev_unlock(hdev);
1047}
1048
1049static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1050 struct sk_buff *skb)
1051{
1052 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1053
1054 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1055
1056 hci_dev_lock(hdev);
1057 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1058 rp->randomizer, rp->status);
1059 hci_dev_unlock(hdev);
1060}
1061
1062static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1063{
1064 __u8 status = *((__u8 *) skb->data);
1065
1066 BT_DBG("%s status 0x%x", hdev->name, status);
1067
1068 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1069
1070 if (status) {
1071 hci_dev_lock(hdev);
1072 mgmt_start_discovery_failed(hdev, status);
1073 hci_dev_unlock(hdev);
1074 return;
1075 }
1076}
1077
1078static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1079 struct sk_buff *skb)
1080{
1081 struct hci_cp_le_set_scan_enable *cp;
1082 __u8 status = *((__u8 *) skb->data);
1083
1084 BT_DBG("%s status 0x%x", hdev->name, status);
1085
1086 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1087 if (!cp)
1088 return;
1089
1090 switch (cp->enable) {
1091 case LE_SCANNING_ENABLED:
1092 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1093
1094 if (status) {
1095 hci_dev_lock(hdev);
1096 mgmt_start_discovery_failed(hdev, status);
1097 hci_dev_unlock(hdev);
1098 return;
1099 }
1100
1101 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1102
1103 hci_dev_lock(hdev);
1104 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1105 hci_dev_unlock(hdev);
1106 break;
1107
1108 case LE_SCANNING_DISABLED:
1109 if (status) {
1110 hci_dev_lock(hdev);
1111 mgmt_stop_discovery_failed(hdev, status);
1112 hci_dev_unlock(hdev);
1113 return;
1114 }
1115
1116 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1117
1118 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1119 hdev->discovery.state == DISCOVERY_FINDING) {
1120 mgmt_interleaved_discovery(hdev);
1121 } else {
1122 hci_dev_lock(hdev);
1123 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1124 hci_dev_unlock(hdev);
1125 }
1126
1127 break;
1128
1129 default:
1130 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1131 break;
1132 }
1133}
1134
1135static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1136{
1137 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1138
1139 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1140
1141 if (rp->status)
1142 return;
1143
1144 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1145}
1146
1147static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1148{
1149 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1150
1151 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1152
1153 if (rp->status)
1154 return;
1155
1156 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1157}
1158
1159static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1160 struct sk_buff *skb)
1161{
1162 struct hci_cp_write_le_host_supported *sent;
1163 __u8 status = *((__u8 *) skb->data);
1164
1165 BT_DBG("%s status 0x%x", hdev->name, status);
1166
1167 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1168 if (!sent)
1169 return;
1170
1171 if (!status) {
1172 if (sent->le)
1173 hdev->host_features[0] |= LMP_HOST_LE;
1174 else
1175 hdev->host_features[0] &= ~LMP_HOST_LE;
1176 }
1177
1178 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1179 !test_bit(HCI_INIT, &hdev->flags))
1180 mgmt_le_enable_complete(hdev, sent->le, status);
1181
1182 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1183}
1184
1185static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1186{
1187 BT_DBG("%s status 0x%x", hdev->name, status);
1188
1189 if (status) {
1190 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1191 hci_conn_check_pending(hdev);
1192 hci_dev_lock(hdev);
1193 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1194 mgmt_start_discovery_failed(hdev, status);
1195 hci_dev_unlock(hdev);
1196 return;
1197 }
1198
1199 set_bit(HCI_INQUIRY, &hdev->flags);
1200
1201 hci_dev_lock(hdev);
1202 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1203 hci_dev_unlock(hdev);
1204}
1205
1206static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1207{
1208 struct hci_cp_create_conn *cp;
1209 struct hci_conn *conn;
1210
1211 BT_DBG("%s status 0x%x", hdev->name, status);
1212
1213 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1214 if (!cp)
1215 return;
1216
1217 hci_dev_lock(hdev);
1218
1219 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1220
1221 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1222
1223 if (status) {
1224 if (conn && conn->state == BT_CONNECT) {
1225 if (status != 0x0c || conn->attempt > 2) {
1226 conn->state = BT_CLOSED;
1227 hci_proto_connect_cfm(conn, status);
1228 hci_conn_del(conn);
1229 } else
1230 conn->state = BT_CONNECT2;
1231 }
1232 } else {
1233 if (!conn) {
1234 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1235 if (conn) {
1236 conn->out = true;
1237 conn->link_mode |= HCI_LM_MASTER;
1238 } else
1239 BT_ERR("No memory for new connection");
1240 }
1241 }
1242
1243 hci_dev_unlock(hdev);
1244}
1245
1246static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1247{
1248 struct hci_cp_add_sco *cp;
1249 struct hci_conn *acl, *sco;
1250 __u16 handle;
1251
1252 BT_DBG("%s status 0x%x", hdev->name, status);
1253
1254 if (!status)
1255 return;
1256
1257 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1258 if (!cp)
1259 return;
1260
1261 handle = __le16_to_cpu(cp->handle);
1262
1263 BT_DBG("%s handle %d", hdev->name, handle);
1264
1265 hci_dev_lock(hdev);
1266
1267 acl = hci_conn_hash_lookup_handle(hdev, handle);
1268 if (acl) {
1269 sco = acl->link;
1270 if (sco) {
1271 sco->state = BT_CLOSED;
1272
1273 hci_proto_connect_cfm(sco, status);
1274 hci_conn_del(sco);
1275 }
1276 }
1277
1278 hci_dev_unlock(hdev);
1279}
1280
1281static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1282{
1283 struct hci_cp_auth_requested *cp;
1284 struct hci_conn *conn;
1285
1286 BT_DBG("%s status 0x%x", hdev->name, status);
1287
1288 if (!status)
1289 return;
1290
1291 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1292 if (!cp)
1293 return;
1294
1295 hci_dev_lock(hdev);
1296
1297 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1298 if (conn) {
1299 if (conn->state == BT_CONFIG) {
1300 hci_proto_connect_cfm(conn, status);
1301 hci_conn_put(conn);
1302 }
1303 }
1304
1305 hci_dev_unlock(hdev);
1306}
1307
1308static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1309{
1310 struct hci_cp_set_conn_encrypt *cp;
1311 struct hci_conn *conn;
1312
1313 BT_DBG("%s status 0x%x", hdev->name, status);
1314
1315 if (!status)
1316 return;
1317
1318 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1319 if (!cp)
1320 return;
1321
1322 hci_dev_lock(hdev);
1323
1324 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1325 if (conn) {
1326 if (conn->state == BT_CONFIG) {
1327 hci_proto_connect_cfm(conn, status);
1328 hci_conn_put(conn);
1329 }
1330 }
1331
1332 hci_dev_unlock(hdev);
1333}
1334
1335static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1336 struct hci_conn *conn)
1337{
1338 if (conn->state != BT_CONFIG || !conn->out)
1339 return 0;
1340
1341 if (conn->pending_sec_level == BT_SECURITY_SDP)
1342 return 0;
1343
1344 /* Only request authentication for SSP connections or non-SSP
1345 * devices with sec_level HIGH or if MITM protection is requested */
1346 if (!hci_conn_ssp_enabled(conn) &&
1347 conn->pending_sec_level != BT_SECURITY_HIGH &&
1348 !(conn->auth_type & 0x01))
1349 return 0;
1350
1351 return 1;
1352}
1353
1354static inline int hci_resolve_name(struct hci_dev *hdev,
1355 struct inquiry_entry *e)
1356{
1357 struct hci_cp_remote_name_req cp;
1358
1359 memset(&cp, 0, sizeof(cp));
1360
1361 bacpy(&cp.bdaddr, &e->data.bdaddr);
1362 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1363 cp.pscan_mode = e->data.pscan_mode;
1364 cp.clock_offset = e->data.clock_offset;
1365
1366 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1367}
1368
1369static bool hci_resolve_next_name(struct hci_dev *hdev)
1370{
1371 struct discovery_state *discov = &hdev->discovery;
1372 struct inquiry_entry *e;
1373
1374 if (list_empty(&discov->resolve))
1375 return false;
1376
1377 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1378 if (!e)
1379 return false;
1380
1381 if (hci_resolve_name(hdev, e) == 0) {
1382 e->name_state = NAME_PENDING;
1383 return true;
1384 }
1385
1386 return false;
1387}
1388
1389static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1390 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1391{
1392 struct discovery_state *discov = &hdev->discovery;
1393 struct inquiry_entry *e;
1394
1395 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1396 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1397 name_len, conn->dev_class);
1398
1399 if (discov->state == DISCOVERY_STOPPED)
1400 return;
1401
1402 if (discov->state == DISCOVERY_STOPPING)
1403 goto discov_complete;
1404
1405 if (discov->state != DISCOVERY_RESOLVING)
1406 return;
1407
1408 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1409 /* If the device was not found in a list of found devices names of which
1410 * are pending. there is no need to continue resolving a next name as it
1411 * will be done upon receiving another Remote Name Request Complete
1412 * Event */
1413 if (!e)
1414 return;
1415
1416 list_del(&e->list);
1417 if (name) {
1418 e->name_state = NAME_KNOWN;
1419 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1420 e->data.rssi, name, name_len);
1421 } else {
1422 e->name_state = NAME_NOT_KNOWN;
1423 }
1424
1425 if (hci_resolve_next_name(hdev))
1426 return;
1427
1428discov_complete:
1429 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1430}
1431
1432static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1433{
1434 struct hci_cp_remote_name_req *cp;
1435 struct hci_conn *conn;
1436
1437 BT_DBG("%s status 0x%x", hdev->name, status);
1438
1439 /* If successful wait for the name req complete event before
1440 * checking for the need to do authentication */
1441 if (!status)
1442 return;
1443
1444 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1445 if (!cp)
1446 return;
1447
1448 hci_dev_lock(hdev);
1449
1450 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1451
1452 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1453 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1454
1455 if (!conn)
1456 goto unlock;
1457
1458 if (!hci_outgoing_auth_needed(hdev, conn))
1459 goto unlock;
1460
1461 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1462 struct hci_cp_auth_requested cp;
1463 cp.handle = __cpu_to_le16(conn->handle);
1464 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1465 }
1466
1467unlock:
1468 hci_dev_unlock(hdev);
1469}
1470
1471static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1472{
1473 struct hci_cp_read_remote_features *cp;
1474 struct hci_conn *conn;
1475
1476 BT_DBG("%s status 0x%x", hdev->name, status);
1477
1478 if (!status)
1479 return;
1480
1481 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1482 if (!cp)
1483 return;
1484
1485 hci_dev_lock(hdev);
1486
1487 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1488 if (conn) {
1489 if (conn->state == BT_CONFIG) {
1490 hci_proto_connect_cfm(conn, status);
1491 hci_conn_put(conn);
1492 }
1493 }
1494
1495 hci_dev_unlock(hdev);
1496}
1497
1498static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1499{
1500 struct hci_cp_read_remote_ext_features *cp;
1501 struct hci_conn *conn;
1502
1503 BT_DBG("%s status 0x%x", hdev->name, status);
1504
1505 if (!status)
1506 return;
1507
1508 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1509 if (!cp)
1510 return;
1511
1512 hci_dev_lock(hdev);
1513
1514 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1515 if (conn) {
1516 if (conn->state == BT_CONFIG) {
1517 hci_proto_connect_cfm(conn, status);
1518 hci_conn_put(conn);
1519 }
1520 }
1521
1522 hci_dev_unlock(hdev);
1523}
1524
1525static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1526{
1527 struct hci_cp_setup_sync_conn *cp;
1528 struct hci_conn *acl, *sco;
1529 __u16 handle;
1530
1531 BT_DBG("%s status 0x%x", hdev->name, status);
1532
1533 if (!status)
1534 return;
1535
1536 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1537 if (!cp)
1538 return;
1539
1540 handle = __le16_to_cpu(cp->handle);
1541
1542 BT_DBG("%s handle %d", hdev->name, handle);
1543
1544 hci_dev_lock(hdev);
1545
1546 acl = hci_conn_hash_lookup_handle(hdev, handle);
1547 if (acl) {
1548 sco = acl->link;
1549 if (sco) {
1550 sco->state = BT_CLOSED;
1551
1552 hci_proto_connect_cfm(sco, status);
1553 hci_conn_del(sco);
1554 }
1555 }
1556
1557 hci_dev_unlock(hdev);
1558}
1559
1560static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1561{
1562 struct hci_cp_sniff_mode *cp;
1563 struct hci_conn *conn;
1564
1565 BT_DBG("%s status 0x%x", hdev->name, status);
1566
1567 if (!status)
1568 return;
1569
1570 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1571 if (!cp)
1572 return;
1573
1574 hci_dev_lock(hdev);
1575
1576 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1577 if (conn) {
1578 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1579
1580 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1581 hci_sco_setup(conn, status);
1582 }
1583
1584 hci_dev_unlock(hdev);
1585}
1586
1587static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1588{
1589 struct hci_cp_exit_sniff_mode *cp;
1590 struct hci_conn *conn;
1591
1592 BT_DBG("%s status 0x%x", hdev->name, status);
1593
1594 if (!status)
1595 return;
1596
1597 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1598 if (!cp)
1599 return;
1600
1601 hci_dev_lock(hdev);
1602
1603 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1604 if (conn) {
1605 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1606
1607 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1608 hci_sco_setup(conn, status);
1609 }
1610
1611 hci_dev_unlock(hdev);
1612}
1613
1614static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1615{
1616 struct hci_cp_disconnect *cp;
1617 struct hci_conn *conn;
1618
1619 if (!status)
1620 return;
1621
1622 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1623 if (!cp)
1624 return;
1625
1626 hci_dev_lock(hdev);
1627
1628 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1629 if (conn)
1630 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1631 conn->dst_type, status);
1632
1633 hci_dev_unlock(hdev);
1634}
1635
1636static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1637{
1638 struct hci_cp_le_create_conn *cp;
1639 struct hci_conn *conn;
1640
1641 BT_DBG("%s status 0x%x", hdev->name, status);
1642
1643 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1644 if (!cp)
1645 return;
1646
1647 hci_dev_lock(hdev);
1648
1649 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1650
1651 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1652 conn);
1653
1654 if (status) {
1655 if (conn && conn->state == BT_CONNECT) {
1656 conn->state = BT_CLOSED;
1657 mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
1658 conn->dst_type, status);
1659 hci_proto_connect_cfm(conn, status);
1660 hci_conn_del(conn);
1661 }
1662 } else {
1663 if (!conn) {
1664 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1665 if (conn) {
1666 conn->dst_type = cp->peer_addr_type;
1667 conn->out = true;
1668 } else {
1669 BT_ERR("No memory for new connection");
1670 }
1671 }
1672 }
1673
1674 hci_dev_unlock(hdev);
1675}
1676
1677static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1678{
1679 BT_DBG("%s status 0x%x", hdev->name, status);
1680}
1681
1682static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1683{
1684 __u8 status = *((__u8 *) skb->data);
1685 struct discovery_state *discov = &hdev->discovery;
1686 struct inquiry_entry *e;
1687
1688 BT_DBG("%s status %d", hdev->name, status);
1689
1690 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1691
1692 hci_conn_check_pending(hdev);
1693
1694 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1695 return;
1696
1697 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1698 return;
1699
1700 hci_dev_lock(hdev);
1701
1702 if (discov->state != DISCOVERY_FINDING)
1703 goto unlock;
1704
1705 if (list_empty(&discov->resolve)) {
1706 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1707 goto unlock;
1708 }
1709
1710 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1711 if (e && hci_resolve_name(hdev, e) == 0) {
1712 e->name_state = NAME_PENDING;
1713 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1714 } else {
1715 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1716 }
1717
1718unlock:
1719 hci_dev_unlock(hdev);
1720}
1721
1722static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1723{
1724 struct inquiry_data data;
1725 struct inquiry_info *info = (void *) (skb->data + 1);
1726 int num_rsp = *((__u8 *) skb->data);
1727
1728 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1729
1730 if (!num_rsp)
1731 return;
1732
1733 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1734 return;
1735
1736 hci_dev_lock(hdev);
1737
1738 for (; num_rsp; num_rsp--, info++) {
1739 bool name_known, ssp;
1740
1741 bacpy(&data.bdaddr, &info->bdaddr);
1742 data.pscan_rep_mode = info->pscan_rep_mode;
1743 data.pscan_period_mode = info->pscan_period_mode;
1744 data.pscan_mode = info->pscan_mode;
1745 memcpy(data.dev_class, info->dev_class, 3);
1746 data.clock_offset = info->clock_offset;
1747 data.rssi = 0x00;
1748 data.ssp_mode = 0x00;
1749
1750 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1751 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1752 info->dev_class, 0, !name_known, ssp, NULL,
1753 0);
1754 }
1755
1756 hci_dev_unlock(hdev);
1757}
1758
1759static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1760{
1761 struct hci_ev_conn_complete *ev = (void *) skb->data;
1762 struct hci_conn *conn;
1763
1764 BT_DBG("%s", hdev->name);
1765
1766 hci_dev_lock(hdev);
1767
1768 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1769 if (!conn) {
1770 if (ev->link_type != SCO_LINK)
1771 goto unlock;
1772
1773 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1774 if (!conn)
1775 goto unlock;
1776
1777 conn->type = SCO_LINK;
1778 }
1779
1780 if (!ev->status) {
1781 conn->handle = __le16_to_cpu(ev->handle);
1782
1783 if (conn->type == ACL_LINK) {
1784 conn->state = BT_CONFIG;
1785 hci_conn_hold(conn);
1786
1787 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1788 !hci_find_link_key(hdev, &ev->bdaddr))
1789 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1790 else
1791 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1792 } else
1793 conn->state = BT_CONNECTED;
1794
1795 hci_conn_hold_device(conn);
1796 hci_conn_add_sysfs(conn);
1797
1798 if (test_bit(HCI_AUTH, &hdev->flags))
1799 conn->link_mode |= HCI_LM_AUTH;
1800
1801 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1802 conn->link_mode |= HCI_LM_ENCRYPT;
1803
1804 /* Get remote features */
1805 if (conn->type == ACL_LINK) {
1806 struct hci_cp_read_remote_features cp;
1807 cp.handle = ev->handle;
1808 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1809 sizeof(cp), &cp);
1810 }
1811
1812 /* Set packet type for incoming connection */
1813 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1814 struct hci_cp_change_conn_ptype cp;
1815 cp.handle = ev->handle;
1816 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1817 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1818 &cp);
1819 }
1820 } else {
1821 conn->state = BT_CLOSED;
1822 if (conn->type == ACL_LINK)
1823 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1824 conn->dst_type, ev->status);
1825 }
1826
1827 if (conn->type == ACL_LINK)
1828 hci_sco_setup(conn, ev->status);
1829
1830 if (ev->status) {
1831 hci_proto_connect_cfm(conn, ev->status);
1832 hci_conn_del(conn);
1833 } else if (ev->link_type != ACL_LINK)
1834 hci_proto_connect_cfm(conn, ev->status);
1835
1836unlock:
1837 hci_dev_unlock(hdev);
1838
1839 hci_conn_check_pending(hdev);
1840}
1841
1842static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1843{
1844 struct hci_ev_conn_request *ev = (void *) skb->data;
1845 int mask = hdev->link_mode;
1846
1847 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1848 batostr(&ev->bdaddr), ev->link_type);
1849
1850 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1851
1852 if ((mask & HCI_LM_ACCEPT) &&
1853 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1854 /* Connection accepted */
1855 struct inquiry_entry *ie;
1856 struct hci_conn *conn;
1857
1858 hci_dev_lock(hdev);
1859
1860 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1861 if (ie)
1862 memcpy(ie->data.dev_class, ev->dev_class, 3);
1863
1864 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1865 if (!conn) {
1866 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1867 if (!conn) {
1868 BT_ERR("No memory for new connection");
1869 hci_dev_unlock(hdev);
1870 return;
1871 }
1872 }
1873
1874 memcpy(conn->dev_class, ev->dev_class, 3);
1875 conn->state = BT_CONNECT;
1876
1877 hci_dev_unlock(hdev);
1878
1879 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1880 struct hci_cp_accept_conn_req cp;
1881
1882 bacpy(&cp.bdaddr, &ev->bdaddr);
1883
1884 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1885 cp.role = 0x00; /* Become master */
1886 else
1887 cp.role = 0x01; /* Remain slave */
1888
1889 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1890 &cp);
1891 } else {
1892 struct hci_cp_accept_sync_conn_req cp;
1893
1894 bacpy(&cp.bdaddr, &ev->bdaddr);
1895 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1896
1897 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1898 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1899 cp.max_latency = cpu_to_le16(0xffff);
1900 cp.content_format = cpu_to_le16(hdev->voice_setting);
1901 cp.retrans_effort = 0xff;
1902
1903 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1904 sizeof(cp), &cp);
1905 }
1906 } else {
1907 /* Connection rejected */
1908 struct hci_cp_reject_conn_req cp;
1909
1910 bacpy(&cp.bdaddr, &ev->bdaddr);
1911 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1912 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1913 }
1914}
1915
1916static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1917{
1918 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1919 struct hci_conn *conn;
1920
1921 BT_DBG("%s status %d", hdev->name, ev->status);
1922
1923 hci_dev_lock(hdev);
1924
1925 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1926 if (!conn)
1927 goto unlock;
1928
1929 if (ev->status == 0)
1930 conn->state = BT_CLOSED;
1931
1932 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1933 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1934 if (ev->status != 0)
1935 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1936 conn->dst_type, ev->status);
1937 else
1938 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1939 conn->dst_type);
1940 }
1941
1942 if (ev->status == 0) {
1943 if (conn->type == ACL_LINK && conn->flush_key)
1944 hci_remove_link_key(hdev, &conn->dst);
1945 hci_proto_disconn_cfm(conn, ev->reason);
1946 hci_conn_del(conn);
1947 }
1948
1949unlock:
1950 hci_dev_unlock(hdev);
1951}
1952
1953static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1954{
1955 struct hci_ev_auth_complete *ev = (void *) skb->data;
1956 struct hci_conn *conn;
1957
1958 BT_DBG("%s status %d", hdev->name, ev->status);
1959
1960 hci_dev_lock(hdev);
1961
1962 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1963 if (!conn)
1964 goto unlock;
1965
1966 if (!ev->status) {
1967 if (!hci_conn_ssp_enabled(conn) &&
1968 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1969 BT_INFO("re-auth of legacy device is not possible.");
1970 } else {
1971 conn->link_mode |= HCI_LM_AUTH;
1972 conn->sec_level = conn->pending_sec_level;
1973 }
1974 } else {
1975 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1976 ev->status);
1977 }
1978
1979 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1980 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1981
1982 if (conn->state == BT_CONFIG) {
1983 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1984 struct hci_cp_set_conn_encrypt cp;
1985 cp.handle = ev->handle;
1986 cp.encrypt = 0x01;
1987 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1988 &cp);
1989 } else {
1990 conn->state = BT_CONNECTED;
1991 hci_proto_connect_cfm(conn, ev->status);
1992 hci_conn_put(conn);
1993 }
1994 } else {
1995 hci_auth_cfm(conn, ev->status);
1996
1997 hci_conn_hold(conn);
1998 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1999 hci_conn_put(conn);
2000 }
2001
2002 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2003 if (!ev->status) {
2004 struct hci_cp_set_conn_encrypt cp;
2005 cp.handle = ev->handle;
2006 cp.encrypt = 0x01;
2007 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2008 &cp);
2009 } else {
2010 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2011 hci_encrypt_cfm(conn, ev->status, 0x00);
2012 }
2013 }
2014
2015unlock:
2016 hci_dev_unlock(hdev);
2017}
2018
2019static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2020{
2021 struct hci_ev_remote_name *ev = (void *) skb->data;
2022 struct hci_conn *conn;
2023
2024 BT_DBG("%s", hdev->name);
2025
2026 hci_conn_check_pending(hdev);
2027
2028 hci_dev_lock(hdev);
2029
2030 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2031
2032 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2033 goto check_auth;
2034
2035 if (ev->status == 0)
2036 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2037 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2038 else
2039 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2040
2041check_auth:
2042 if (!conn)
2043 goto unlock;
2044
2045 if (!hci_outgoing_auth_needed(hdev, conn))
2046 goto unlock;
2047
2048 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2049 struct hci_cp_auth_requested cp;
2050 cp.handle = __cpu_to_le16(conn->handle);
2051 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2052 }
2053
2054unlock:
2055 hci_dev_unlock(hdev);
2056}
2057
2058static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2059{
2060 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2061 struct hci_conn *conn;
2062
2063 BT_DBG("%s status %d", hdev->name, ev->status);
2064
2065 hci_dev_lock(hdev);
2066
2067 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2068 if (conn) {
2069 if (!ev->status) {
2070 if (ev->encrypt) {
2071 /* Encryption implies authentication */
2072 conn->link_mode |= HCI_LM_AUTH;
2073 conn->link_mode |= HCI_LM_ENCRYPT;
2074 conn->sec_level = conn->pending_sec_level;
2075 } else
2076 conn->link_mode &= ~HCI_LM_ENCRYPT;
2077 }
2078
2079 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2080
2081 if (ev->status && conn->state == BT_CONNECTED) {
2082 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2083 hci_conn_put(conn);
2084 goto unlock;
2085 }
2086
2087 if (conn->state == BT_CONFIG) {
2088 if (!ev->status)
2089 conn->state = BT_CONNECTED;
2090
2091 hci_proto_connect_cfm(conn, ev->status);
2092 hci_conn_put(conn);
2093 } else
2094 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2095 }
2096
2097unlock:
2098 hci_dev_unlock(hdev);
2099}
2100
2101static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2102{
2103 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2104 struct hci_conn *conn;
2105
2106 BT_DBG("%s status %d", hdev->name, ev->status);
2107
2108 hci_dev_lock(hdev);
2109
2110 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2111 if (conn) {
2112 if (!ev->status)
2113 conn->link_mode |= HCI_LM_SECURE;
2114
2115 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2116
2117 hci_key_change_cfm(conn, ev->status);
2118 }
2119
2120 hci_dev_unlock(hdev);
2121}
2122
2123static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2124{
2125 struct hci_ev_remote_features *ev = (void *) skb->data;
2126 struct hci_conn *conn;
2127
2128 BT_DBG("%s status %d", hdev->name, ev->status);
2129
2130 hci_dev_lock(hdev);
2131
2132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2133 if (!conn)
2134 goto unlock;
2135
2136 if (!ev->status)
2137 memcpy(conn->features, ev->features, 8);
2138
2139 if (conn->state != BT_CONFIG)
2140 goto unlock;
2141
2142 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2143 struct hci_cp_read_remote_ext_features cp;
2144 cp.handle = ev->handle;
2145 cp.page = 0x01;
2146 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2147 sizeof(cp), &cp);
2148 goto unlock;
2149 }
2150
2151 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2152 struct hci_cp_remote_name_req cp;
2153 memset(&cp, 0, sizeof(cp));
2154 bacpy(&cp.bdaddr, &conn->dst);
2155 cp.pscan_rep_mode = 0x02;
2156 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2157 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2158 mgmt_device_connected(hdev, &conn->dst, conn->type,
2159 conn->dst_type, 0, NULL, 0,
2160 conn->dev_class);
2161
2162 if (!hci_outgoing_auth_needed(hdev, conn)) {
2163 conn->state = BT_CONNECTED;
2164 hci_proto_connect_cfm(conn, ev->status);
2165 hci_conn_put(conn);
2166 }
2167
2168unlock:
2169 hci_dev_unlock(hdev);
2170}
2171
2172static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2173{
2174 BT_DBG("%s", hdev->name);
2175}
2176
2177static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2178{
2179 BT_DBG("%s", hdev->name);
2180}
2181
2182static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2183{
2184 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2185 __u16 opcode;
2186
2187 skb_pull(skb, sizeof(*ev));
2188
2189 opcode = __le16_to_cpu(ev->opcode);
2190
2191 switch (opcode) {
2192 case HCI_OP_INQUIRY_CANCEL:
2193 hci_cc_inquiry_cancel(hdev, skb);
2194 break;
2195
2196 case HCI_OP_PERIODIC_INQ:
2197 hci_cc_periodic_inq(hdev, skb);
2198 break;
2199
2200 case HCI_OP_EXIT_PERIODIC_INQ:
2201 hci_cc_exit_periodic_inq(hdev, skb);
2202 break;
2203
2204 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2205 hci_cc_remote_name_req_cancel(hdev, skb);
2206 break;
2207
2208 case HCI_OP_ROLE_DISCOVERY:
2209 hci_cc_role_discovery(hdev, skb);
2210 break;
2211
2212 case HCI_OP_READ_LINK_POLICY:
2213 hci_cc_read_link_policy(hdev, skb);
2214 break;
2215
2216 case HCI_OP_WRITE_LINK_POLICY:
2217 hci_cc_write_link_policy(hdev, skb);
2218 break;
2219
2220 case HCI_OP_READ_DEF_LINK_POLICY:
2221 hci_cc_read_def_link_policy(hdev, skb);
2222 break;
2223
2224 case HCI_OP_WRITE_DEF_LINK_POLICY:
2225 hci_cc_write_def_link_policy(hdev, skb);
2226 break;
2227
2228 case HCI_OP_RESET:
2229 hci_cc_reset(hdev, skb);
2230 break;
2231
2232 case HCI_OP_WRITE_LOCAL_NAME:
2233 hci_cc_write_local_name(hdev, skb);
2234 break;
2235
2236 case HCI_OP_READ_LOCAL_NAME:
2237 hci_cc_read_local_name(hdev, skb);
2238 break;
2239
2240 case HCI_OP_WRITE_AUTH_ENABLE:
2241 hci_cc_write_auth_enable(hdev, skb);
2242 break;
2243
2244 case HCI_OP_WRITE_ENCRYPT_MODE:
2245 hci_cc_write_encrypt_mode(hdev, skb);
2246 break;
2247
2248 case HCI_OP_WRITE_SCAN_ENABLE:
2249 hci_cc_write_scan_enable(hdev, skb);
2250 break;
2251
2252 case HCI_OP_READ_CLASS_OF_DEV:
2253 hci_cc_read_class_of_dev(hdev, skb);
2254 break;
2255
2256 case HCI_OP_WRITE_CLASS_OF_DEV:
2257 hci_cc_write_class_of_dev(hdev, skb);
2258 break;
2259
2260 case HCI_OP_READ_VOICE_SETTING:
2261 hci_cc_read_voice_setting(hdev, skb);
2262 break;
2263
2264 case HCI_OP_WRITE_VOICE_SETTING:
2265 hci_cc_write_voice_setting(hdev, skb);
2266 break;
2267
2268 case HCI_OP_HOST_BUFFER_SIZE:
2269 hci_cc_host_buffer_size(hdev, skb);
2270 break;
2271
2272 case HCI_OP_WRITE_SSP_MODE:
2273 hci_cc_write_ssp_mode(hdev, skb);
2274 break;
2275
2276 case HCI_OP_READ_LOCAL_VERSION:
2277 hci_cc_read_local_version(hdev, skb);
2278 break;
2279
2280 case HCI_OP_READ_LOCAL_COMMANDS:
2281 hci_cc_read_local_commands(hdev, skb);
2282 break;
2283
2284 case HCI_OP_READ_LOCAL_FEATURES:
2285 hci_cc_read_local_features(hdev, skb);
2286 break;
2287
2288 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2289 hci_cc_read_local_ext_features(hdev, skb);
2290 break;
2291
2292 case HCI_OP_READ_BUFFER_SIZE:
2293 hci_cc_read_buffer_size(hdev, skb);
2294 break;
2295
2296 case HCI_OP_READ_BD_ADDR:
2297 hci_cc_read_bd_addr(hdev, skb);
2298 break;
2299
2300 case HCI_OP_READ_DATA_BLOCK_SIZE:
2301 hci_cc_read_data_block_size(hdev, skb);
2302 break;
2303
2304 case HCI_OP_WRITE_CA_TIMEOUT:
2305 hci_cc_write_ca_timeout(hdev, skb);
2306 break;
2307
2308 case HCI_OP_READ_FLOW_CONTROL_MODE:
2309 hci_cc_read_flow_control_mode(hdev, skb);
2310 break;
2311
2312 case HCI_OP_READ_LOCAL_AMP_INFO:
2313 hci_cc_read_local_amp_info(hdev, skb);
2314 break;
2315
2316 case HCI_OP_DELETE_STORED_LINK_KEY:
2317 hci_cc_delete_stored_link_key(hdev, skb);
2318 break;
2319
2320 case HCI_OP_SET_EVENT_MASK:
2321 hci_cc_set_event_mask(hdev, skb);
2322 break;
2323
2324 case HCI_OP_WRITE_INQUIRY_MODE:
2325 hci_cc_write_inquiry_mode(hdev, skb);
2326 break;
2327
2328 case HCI_OP_READ_INQ_RSP_TX_POWER:
2329 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2330 break;
2331
2332 case HCI_OP_SET_EVENT_FLT:
2333 hci_cc_set_event_flt(hdev, skb);
2334 break;
2335
2336 case HCI_OP_PIN_CODE_REPLY:
2337 hci_cc_pin_code_reply(hdev, skb);
2338 break;
2339
2340 case HCI_OP_PIN_CODE_NEG_REPLY:
2341 hci_cc_pin_code_neg_reply(hdev, skb);
2342 break;
2343
2344 case HCI_OP_READ_LOCAL_OOB_DATA:
2345 hci_cc_read_local_oob_data_reply(hdev, skb);
2346 break;
2347
2348 case HCI_OP_LE_READ_BUFFER_SIZE:
2349 hci_cc_le_read_buffer_size(hdev, skb);
2350 break;
2351
2352 case HCI_OP_USER_CONFIRM_REPLY:
2353 hci_cc_user_confirm_reply(hdev, skb);
2354 break;
2355
2356 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2357 hci_cc_user_confirm_neg_reply(hdev, skb);
2358 break;
2359
2360 case HCI_OP_USER_PASSKEY_REPLY:
2361 hci_cc_user_passkey_reply(hdev, skb);
2362 break;
2363
2364 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2365 hci_cc_user_passkey_neg_reply(hdev, skb);
2366 break;
2367
2368 case HCI_OP_LE_SET_SCAN_PARAM:
2369 hci_cc_le_set_scan_param(hdev, skb);
2370 break;
2371
2372 case HCI_OP_LE_SET_SCAN_ENABLE:
2373 hci_cc_le_set_scan_enable(hdev, skb);
2374 break;
2375
2376 case HCI_OP_LE_LTK_REPLY:
2377 hci_cc_le_ltk_reply(hdev, skb);
2378 break;
2379
2380 case HCI_OP_LE_LTK_NEG_REPLY:
2381 hci_cc_le_ltk_neg_reply(hdev, skb);
2382 break;
2383
2384 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2385 hci_cc_write_le_host_supported(hdev, skb);
2386 break;
2387
2388 default:
2389 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2390 break;
2391 }
2392
2393 if (ev->opcode != HCI_OP_NOP)
2394 del_timer(&hdev->cmd_timer);
2395
2396 if (ev->ncmd) {
2397 atomic_set(&hdev->cmd_cnt, 1);
2398 if (!skb_queue_empty(&hdev->cmd_q))
2399 queue_work(hdev->workqueue, &hdev->cmd_work);
2400 }
2401}
2402
2403static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2404{
2405 struct hci_ev_cmd_status *ev = (void *) skb->data;
2406 __u16 opcode;
2407
2408 skb_pull(skb, sizeof(*ev));
2409
2410 opcode = __le16_to_cpu(ev->opcode);
2411
2412 switch (opcode) {
2413 case HCI_OP_INQUIRY:
2414 hci_cs_inquiry(hdev, ev->status);
2415 break;
2416
2417 case HCI_OP_CREATE_CONN:
2418 hci_cs_create_conn(hdev, ev->status);
2419 break;
2420
2421 case HCI_OP_ADD_SCO:
2422 hci_cs_add_sco(hdev, ev->status);
2423 break;
2424
2425 case HCI_OP_AUTH_REQUESTED:
2426 hci_cs_auth_requested(hdev, ev->status);
2427 break;
2428
2429 case HCI_OP_SET_CONN_ENCRYPT:
2430 hci_cs_set_conn_encrypt(hdev, ev->status);
2431 break;
2432
2433 case HCI_OP_REMOTE_NAME_REQ:
2434 hci_cs_remote_name_req(hdev, ev->status);
2435 break;
2436
2437 case HCI_OP_READ_REMOTE_FEATURES:
2438 hci_cs_read_remote_features(hdev, ev->status);
2439 break;
2440
2441 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2442 hci_cs_read_remote_ext_features(hdev, ev->status);
2443 break;
2444
2445 case HCI_OP_SETUP_SYNC_CONN:
2446 hci_cs_setup_sync_conn(hdev, ev->status);
2447 break;
2448
2449 case HCI_OP_SNIFF_MODE:
2450 hci_cs_sniff_mode(hdev, ev->status);
2451 break;
2452
2453 case HCI_OP_EXIT_SNIFF_MODE:
2454 hci_cs_exit_sniff_mode(hdev, ev->status);
2455 break;
2456
2457 case HCI_OP_DISCONNECT:
2458 hci_cs_disconnect(hdev, ev->status);
2459 break;
2460
2461 case HCI_OP_LE_CREATE_CONN:
2462 hci_cs_le_create_conn(hdev, ev->status);
2463 break;
2464
2465 case HCI_OP_LE_START_ENC:
2466 hci_cs_le_start_enc(hdev, ev->status);
2467 break;
2468
2469 default:
2470 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2471 break;
2472 }
2473
2474 if (ev->opcode != HCI_OP_NOP)
2475 del_timer(&hdev->cmd_timer);
2476
2477 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2478 atomic_set(&hdev->cmd_cnt, 1);
2479 if (!skb_queue_empty(&hdev->cmd_q))
2480 queue_work(hdev->workqueue, &hdev->cmd_work);
2481 }
2482}
2483
2484static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2485{
2486 struct hci_ev_role_change *ev = (void *) skb->data;
2487 struct hci_conn *conn;
2488
2489 BT_DBG("%s status %d", hdev->name, ev->status);
2490
2491 hci_dev_lock(hdev);
2492
2493 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2494 if (conn) {
2495 if (!ev->status) {
2496 if (ev->role)
2497 conn->link_mode &= ~HCI_LM_MASTER;
2498 else
2499 conn->link_mode |= HCI_LM_MASTER;
2500 }
2501
2502 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2503
2504 hci_role_switch_cfm(conn, ev->status, ev->role);
2505 }
2506
2507 hci_dev_unlock(hdev);
2508}
2509
2510static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2511{
2512 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2513 int i;
2514
2515 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2516 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2517 return;
2518 }
2519
2520 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2521 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2522 BT_DBG("%s bad parameters", hdev->name);
2523 return;
2524 }
2525
2526 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2527
2528 for (i = 0; i < ev->num_hndl; i++) {
2529 struct hci_comp_pkts_info *info = &ev->handles[i];
2530 struct hci_conn *conn;
2531 __u16 handle, count;
2532
2533 handle = __le16_to_cpu(info->handle);
2534 count = __le16_to_cpu(info->count);
2535
2536 conn = hci_conn_hash_lookup_handle(hdev, handle);
2537 if (!conn)
2538 continue;
2539
2540 conn->sent -= count;
2541
2542 switch (conn->type) {
2543 case ACL_LINK:
2544 hdev->acl_cnt += count;
2545 if (hdev->acl_cnt > hdev->acl_pkts)
2546 hdev->acl_cnt = hdev->acl_pkts;
2547 break;
2548
2549 case LE_LINK:
2550 if (hdev->le_pkts) {
2551 hdev->le_cnt += count;
2552 if (hdev->le_cnt > hdev->le_pkts)
2553 hdev->le_cnt = hdev->le_pkts;
2554 } else {
2555 hdev->acl_cnt += count;
2556 if (hdev->acl_cnt > hdev->acl_pkts)
2557 hdev->acl_cnt = hdev->acl_pkts;
2558 }
2559 break;
2560
2561 case SCO_LINK:
2562 hdev->sco_cnt += count;
2563 if (hdev->sco_cnt > hdev->sco_pkts)
2564 hdev->sco_cnt = hdev->sco_pkts;
2565 break;
2566
2567 default:
2568 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2569 break;
2570 }
2571 }
2572
2573 queue_work(hdev->workqueue, &hdev->tx_work);
2574}
2575
2576static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2577 struct sk_buff *skb)
2578{
2579 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2580 int i;
2581
2582 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2583 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2584 return;
2585 }
2586
2587 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2588 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2589 BT_DBG("%s bad parameters", hdev->name);
2590 return;
2591 }
2592
2593 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2594 ev->num_hndl);
2595
2596 for (i = 0; i < ev->num_hndl; i++) {
2597 struct hci_comp_blocks_info *info = &ev->handles[i];
2598 struct hci_conn *conn;
2599 __u16 handle, block_count;
2600
2601 handle = __le16_to_cpu(info->handle);
2602 block_count = __le16_to_cpu(info->blocks);
2603
2604 conn = hci_conn_hash_lookup_handle(hdev, handle);
2605 if (!conn)
2606 continue;
2607
2608 conn->sent -= block_count;
2609
2610 switch (conn->type) {
2611 case ACL_LINK:
2612 hdev->block_cnt += block_count;
2613 if (hdev->block_cnt > hdev->num_blocks)
2614 hdev->block_cnt = hdev->num_blocks;
2615 break;
2616
2617 default:
2618 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2619 break;
2620 }
2621 }
2622
2623 queue_work(hdev->workqueue, &hdev->tx_work);
2624}
2625
2626static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2627{
2628 struct hci_ev_mode_change *ev = (void *) skb->data;
2629 struct hci_conn *conn;
2630
2631 BT_DBG("%s status %d", hdev->name, ev->status);
2632
2633 hci_dev_lock(hdev);
2634
2635 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2636 if (conn) {
2637 conn->mode = ev->mode;
2638 conn->interval = __le16_to_cpu(ev->interval);
2639
2640 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2641 if (conn->mode == HCI_CM_ACTIVE)
2642 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2643 else
2644 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2645 }
2646
2647 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2648 hci_sco_setup(conn, ev->status);
2649 }
2650
2651 hci_dev_unlock(hdev);
2652}
2653
2654static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2655{
2656 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2657 struct hci_conn *conn;
2658
2659 BT_DBG("%s", hdev->name);
2660
2661 hci_dev_lock(hdev);
2662
2663 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2664 if (!conn)
2665 goto unlock;
2666
2667 if (conn->state == BT_CONNECTED) {
2668 hci_conn_hold(conn);
2669 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2670 hci_conn_put(conn);
2671 }
2672
2673 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2674 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2675 sizeof(ev->bdaddr), &ev->bdaddr);
2676 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2677 u8 secure;
2678
2679 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2680 secure = 1;
2681 else
2682 secure = 0;
2683
2684 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2685 }
2686
2687unlock:
2688 hci_dev_unlock(hdev);
2689}
2690
2691static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2692{
2693 struct hci_ev_link_key_req *ev = (void *) skb->data;
2694 struct hci_cp_link_key_reply cp;
2695 struct hci_conn *conn;
2696 struct link_key *key;
2697
2698 BT_DBG("%s", hdev->name);
2699
2700 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2701 return;
2702
2703 hci_dev_lock(hdev);
2704
2705 key = hci_find_link_key(hdev, &ev->bdaddr);
2706 if (!key) {
2707 BT_DBG("%s link key not found for %s", hdev->name,
2708 batostr(&ev->bdaddr));
2709 goto not_found;
2710 }
2711
2712 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2713 batostr(&ev->bdaddr));
2714
2715 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2716 key->type == HCI_LK_DEBUG_COMBINATION) {
2717 BT_DBG("%s ignoring debug key", hdev->name);
2718 goto not_found;
2719 }
2720
2721 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2722 if (conn) {
2723 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2724 conn->auth_type != 0xff &&
2725 (conn->auth_type & 0x01)) {
2726 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2727 goto not_found;
2728 }
2729
2730 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2731 conn->pending_sec_level == BT_SECURITY_HIGH) {
2732 BT_DBG("%s ignoring key unauthenticated for high \
2733 security", hdev->name);
2734 goto not_found;
2735 }
2736
2737 conn->key_type = key->type;
2738 conn->pin_length = key->pin_len;
2739 }
2740
2741 bacpy(&cp.bdaddr, &ev->bdaddr);
2742 memcpy(cp.link_key, key->val, 16);
2743
2744 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2745
2746 hci_dev_unlock(hdev);
2747
2748 return;
2749
2750not_found:
2751 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2752 hci_dev_unlock(hdev);
2753}
2754
2755static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2756{
2757 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2758 struct hci_conn *conn;
2759 u8 pin_len = 0;
2760
2761 BT_DBG("%s", hdev->name);
2762
2763 hci_dev_lock(hdev);
2764
2765 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2766 if (conn) {
2767 hci_conn_hold(conn);
2768 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2769 pin_len = conn->pin_length;
2770
2771 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2772 conn->key_type = ev->key_type;
2773
2774 hci_conn_put(conn);
2775 }
2776
2777 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2778 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2779 ev->key_type, pin_len);
2780
2781 hci_dev_unlock(hdev);
2782}
2783
2784static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2785{
2786 struct hci_ev_clock_offset *ev = (void *) skb->data;
2787 struct hci_conn *conn;
2788
2789 BT_DBG("%s status %d", hdev->name, ev->status);
2790
2791 hci_dev_lock(hdev);
2792
2793 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2794 if (conn && !ev->status) {
2795 struct inquiry_entry *ie;
2796
2797 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2798 if (ie) {
2799 ie->data.clock_offset = ev->clock_offset;
2800 ie->timestamp = jiffies;
2801 }
2802 }
2803
2804 hci_dev_unlock(hdev);
2805}
2806
2807static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2808{
2809 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2810 struct hci_conn *conn;
2811
2812 BT_DBG("%s status %d", hdev->name, ev->status);
2813
2814 hci_dev_lock(hdev);
2815
2816 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2817 if (conn && !ev->status)
2818 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2819
2820 hci_dev_unlock(hdev);
2821}
2822
2823static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2824{
2825 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2826 struct inquiry_entry *ie;
2827
2828 BT_DBG("%s", hdev->name);
2829
2830 hci_dev_lock(hdev);
2831
2832 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2833 if (ie) {
2834 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2835 ie->timestamp = jiffies;
2836 }
2837
2838 hci_dev_unlock(hdev);
2839}
2840
2841static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2842{
2843 struct inquiry_data data;
2844 int num_rsp = *((__u8 *) skb->data);
2845 bool name_known, ssp;
2846
2847 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2848
2849 if (!num_rsp)
2850 return;
2851
2852 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2853 return;
2854
2855 hci_dev_lock(hdev);
2856
2857 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2858 struct inquiry_info_with_rssi_and_pscan_mode *info;
2859 info = (void *) (skb->data + 1);
2860
2861 for (; num_rsp; num_rsp--, info++) {
2862 bacpy(&data.bdaddr, &info->bdaddr);
2863 data.pscan_rep_mode = info->pscan_rep_mode;
2864 data.pscan_period_mode = info->pscan_period_mode;
2865 data.pscan_mode = info->pscan_mode;
2866 memcpy(data.dev_class, info->dev_class, 3);
2867 data.clock_offset = info->clock_offset;
2868 data.rssi = info->rssi;
2869 data.ssp_mode = 0x00;
2870
2871 name_known = hci_inquiry_cache_update(hdev, &data,
2872 false, &ssp);
2873 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2874 info->dev_class, info->rssi,
2875 !name_known, ssp, NULL, 0);
2876 }
2877 } else {
2878 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2879
2880 for (; num_rsp; num_rsp--, info++) {
2881 bacpy(&data.bdaddr, &info->bdaddr);
2882 data.pscan_rep_mode = info->pscan_rep_mode;
2883 data.pscan_period_mode = info->pscan_period_mode;
2884 data.pscan_mode = 0x00;
2885 memcpy(data.dev_class, info->dev_class, 3);
2886 data.clock_offset = info->clock_offset;
2887 data.rssi = info->rssi;
2888 data.ssp_mode = 0x00;
2889 name_known = hci_inquiry_cache_update(hdev, &data,
2890 false, &ssp);
2891 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2892 info->dev_class, info->rssi,
2893 !name_known, ssp, NULL, 0);
2894 }
2895 }
2896
2897 hci_dev_unlock(hdev);
2898}
2899
2900static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2901{
2902 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2903 struct hci_conn *conn;
2904
2905 BT_DBG("%s", hdev->name);
2906
2907 hci_dev_lock(hdev);
2908
2909 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2910 if (!conn)
2911 goto unlock;
2912
2913 if (!ev->status && ev->page == 0x01) {
2914 struct inquiry_entry *ie;
2915
2916 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2917 if (ie)
2918 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2919
2920 if (ev->features[0] & LMP_HOST_SSP)
2921 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2922 }
2923
2924 if (conn->state != BT_CONFIG)
2925 goto unlock;
2926
2927 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2928 struct hci_cp_remote_name_req cp;
2929 memset(&cp, 0, sizeof(cp));
2930 bacpy(&cp.bdaddr, &conn->dst);
2931 cp.pscan_rep_mode = 0x02;
2932 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2933 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2934 mgmt_device_connected(hdev, &conn->dst, conn->type,
2935 conn->dst_type, 0, NULL, 0,
2936 conn->dev_class);
2937
2938 if (!hci_outgoing_auth_needed(hdev, conn)) {
2939 conn->state = BT_CONNECTED;
2940 hci_proto_connect_cfm(conn, ev->status);
2941 hci_conn_put(conn);
2942 }
2943
2944unlock:
2945 hci_dev_unlock(hdev);
2946}
2947
2948static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2949{
2950 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2951 struct hci_conn *conn;
2952
2953 BT_DBG("%s status %d", hdev->name, ev->status);
2954
2955 hci_dev_lock(hdev);
2956
2957 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2958 if (!conn) {
2959 if (ev->link_type == ESCO_LINK)
2960 goto unlock;
2961
2962 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2963 if (!conn)
2964 goto unlock;
2965
2966 conn->type = SCO_LINK;
2967 }
2968
2969 switch (ev->status) {
2970 case 0x00:
2971 conn->handle = __le16_to_cpu(ev->handle);
2972 conn->state = BT_CONNECTED;
2973
2974 hci_conn_hold_device(conn);
2975 hci_conn_add_sysfs(conn);
2976 break;
2977
2978 case 0x11: /* Unsupported Feature or Parameter Value */
2979 case 0x1c: /* SCO interval rejected */
2980 case 0x1a: /* Unsupported Remote Feature */
2981 case 0x1f: /* Unspecified error */
2982 if (conn->out && conn->attempt < 2) {
2983 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2984 (hdev->esco_type & EDR_ESCO_MASK);
2985 hci_setup_sync(conn, conn->link->handle);
2986 goto unlock;
2987 }
2988 /* fall through */
2989
2990 default:
2991 conn->state = BT_CLOSED;
2992 break;
2993 }
2994
2995 hci_proto_connect_cfm(conn, ev->status);
2996 if (ev->status)
2997 hci_conn_del(conn);
2998
2999unlock:
3000 hci_dev_unlock(hdev);
3001}
3002
3003static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3004{
3005 BT_DBG("%s", hdev->name);
3006}
3007
3008static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3009{
3010 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3011
3012 BT_DBG("%s status %d", hdev->name, ev->status);
3013}
3014
3015static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
3016{
3017 struct inquiry_data data;
3018 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3019 int num_rsp = *((__u8 *) skb->data);
3020 size_t eir_len;
3021
3022 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3023
3024 if (!num_rsp)
3025 return;
3026
3027 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3028 return;
3029
3030 hci_dev_lock(hdev);
3031
3032 for (; num_rsp; num_rsp--, info++) {
3033 bool name_known, ssp;
3034
3035 bacpy(&data.bdaddr, &info->bdaddr);
3036 data.pscan_rep_mode = info->pscan_rep_mode;
3037 data.pscan_period_mode = info->pscan_period_mode;
3038 data.pscan_mode = 0x00;
3039 memcpy(data.dev_class, info->dev_class, 3);
3040 data.clock_offset = info->clock_offset;
3041 data.rssi = info->rssi;
3042 data.ssp_mode = 0x01;
3043
3044 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3045 name_known = eir_has_data_type(info->data,
3046 sizeof(info->data),
3047 EIR_NAME_COMPLETE);
3048 else
3049 name_known = true;
3050
3051 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3052 &ssp);
3053 eir_len = eir_get_length(info->data, sizeof(info->data));
3054 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3055 info->dev_class, info->rssi, !name_known,
3056 ssp, info->data, eir_len);
3057 }
3058
3059 hci_dev_unlock(hdev);
3060}
3061
3062static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3063 struct sk_buff *skb)
3064{
3065 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3066 struct hci_conn *conn;
3067
3068 BT_DBG("%s status %u handle %u", hdev->name, ev->status,
3069 __le16_to_cpu(ev->handle));
3070
3071 hci_dev_lock(hdev);
3072
3073 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3074 if (!conn)
3075 goto unlock;
3076
3077 if (!ev->status)
3078 conn->sec_level = conn->pending_sec_level;
3079
3080 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3081
3082 if (ev->status && conn->state == BT_CONNECTED) {
3083 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3084 hci_conn_put(conn);
3085 goto unlock;
3086 }
3087
3088 if (conn->state == BT_CONFIG) {
3089 if (!ev->status)
3090 conn->state = BT_CONNECTED;
3091
3092 hci_proto_connect_cfm(conn, ev->status);
3093 hci_conn_put(conn);
3094 } else {
3095 hci_auth_cfm(conn, ev->status);
3096
3097 hci_conn_hold(conn);
3098 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3099 hci_conn_put(conn);
3100 }
3101
3102unlock:
3103 hci_dev_unlock(hdev);
3104}
3105
3106static inline u8 hci_get_auth_req(struct hci_conn *conn)
3107{
3108 /* If remote requests dedicated bonding follow that lead */
3109 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3110 /* If both remote and local IO capabilities allow MITM
3111 * protection then require it, otherwise don't */
3112 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3113 return 0x02;
3114 else
3115 return 0x03;
3116 }
3117
3118 /* If remote requests no-bonding follow that lead */
3119 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3120 return conn->remote_auth | (conn->auth_type & 0x01);
3121
3122 return conn->auth_type;
3123}
3124
3125static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3126{
3127 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3128 struct hci_conn *conn;
3129
3130 BT_DBG("%s", hdev->name);
3131
3132 hci_dev_lock(hdev);
3133
3134 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3135 if (!conn)
3136 goto unlock;
3137
3138 hci_conn_hold(conn);
3139
3140 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3141 goto unlock;
3142
3143 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3144 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3145 struct hci_cp_io_capability_reply cp;
3146
3147 bacpy(&cp.bdaddr, &ev->bdaddr);
3148 /* Change the IO capability from KeyboardDisplay
3149 * to DisplayYesNo as it is not supported by BT spec. */
3150 cp.capability = (conn->io_capability == 0x04) ?
3151 0x01 : conn->io_capability;
3152 conn->auth_type = hci_get_auth_req(conn);
3153 cp.authentication = conn->auth_type;
3154
3155 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3156 hci_find_remote_oob_data(hdev, &conn->dst))
3157 cp.oob_data = 0x01;
3158 else
3159 cp.oob_data = 0x00;
3160
3161 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3162 sizeof(cp), &cp);
3163 } else {
3164 struct hci_cp_io_capability_neg_reply cp;
3165
3166 bacpy(&cp.bdaddr, &ev->bdaddr);
3167 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3168
3169 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3170 sizeof(cp), &cp);
3171 }
3172
3173unlock:
3174 hci_dev_unlock(hdev);
3175}
3176
3177static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3178{
3179 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3180 struct hci_conn *conn;
3181
3182 BT_DBG("%s", hdev->name);
3183
3184 hci_dev_lock(hdev);
3185
3186 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3187 if (!conn)
3188 goto unlock;
3189
3190 conn->remote_cap = ev->capability;
3191 conn->remote_auth = ev->authentication;
3192 if (ev->oob_data)
3193 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3194
3195unlock:
3196 hci_dev_unlock(hdev);
3197}
3198
3199static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3200 struct sk_buff *skb)
3201{
3202 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3203 int loc_mitm, rem_mitm, confirm_hint = 0;
3204 struct hci_conn *conn;
3205
3206 BT_DBG("%s", hdev->name);
3207
3208 hci_dev_lock(hdev);
3209
3210 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3211 goto unlock;
3212
3213 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3214 if (!conn)
3215 goto unlock;
3216
3217 loc_mitm = (conn->auth_type & 0x01);
3218 rem_mitm = (conn->remote_auth & 0x01);
3219
3220 /* If we require MITM but the remote device can't provide that
3221 * (it has NoInputNoOutput) then reject the confirmation
3222 * request. The only exception is when we're dedicated bonding
3223 * initiators (connect_cfm_cb set) since then we always have the MITM
3224 * bit set. */
3225 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3226 BT_DBG("Rejecting request: remote device can't provide MITM");
3227 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3228 sizeof(ev->bdaddr), &ev->bdaddr);
3229 goto unlock;
3230 }
3231
3232 /* If no side requires MITM protection; auto-accept */
3233 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3234 (!rem_mitm || conn->io_capability == 0x03)) {
3235
3236 /* If we're not the initiators request authorization to
3237 * proceed from user space (mgmt_user_confirm with
3238 * confirm_hint set to 1). */
3239 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3240 BT_DBG("Confirming auto-accept as acceptor");
3241 confirm_hint = 1;
3242 goto confirm;
3243 }
3244
3245 BT_DBG("Auto-accept of user confirmation with %ums delay",
3246 hdev->auto_accept_delay);
3247
3248 if (hdev->auto_accept_delay > 0) {
3249 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3250 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3251 goto unlock;
3252 }
3253
3254 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3255 sizeof(ev->bdaddr), &ev->bdaddr);
3256 goto unlock;
3257 }
3258
3259confirm:
3260 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3261 confirm_hint);
3262
3263unlock:
3264 hci_dev_unlock(hdev);
3265}
3266
3267static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3268 struct sk_buff *skb)
3269{
3270 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3271
3272 BT_DBG("%s", hdev->name);
3273
3274 hci_dev_lock(hdev);
3275
3276 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3277 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3278
3279 hci_dev_unlock(hdev);
3280}
3281
3282static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3283{
3284 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3285 struct hci_conn *conn;
3286
3287 BT_DBG("%s", hdev->name);
3288
3289 hci_dev_lock(hdev);
3290
3291 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3292 if (!conn)
3293 goto unlock;
3294
3295 /* To avoid duplicate auth_failed events to user space we check
3296 * the HCI_CONN_AUTH_PEND flag which will be set if we
3297 * initiated the authentication. A traditional auth_complete
3298 * event gets always produced as initiator and is also mapped to
3299 * the mgmt_auth_failed event */
3300 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3301 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3302 ev->status);
3303
3304 hci_conn_put(conn);
3305
3306unlock:
3307 hci_dev_unlock(hdev);
3308}
3309
3310static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3311{
3312 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3313 struct inquiry_entry *ie;
3314
3315 BT_DBG("%s", hdev->name);
3316
3317 hci_dev_lock(hdev);
3318
3319 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3320 if (ie)
3321 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3322
3323 hci_dev_unlock(hdev);
3324}
3325
3326static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3327 struct sk_buff *skb)
3328{
3329 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3330 struct oob_data *data;
3331
3332 BT_DBG("%s", hdev->name);
3333
3334 hci_dev_lock(hdev);
3335
3336 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3337 goto unlock;
3338
3339 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3340 if (data) {
3341 struct hci_cp_remote_oob_data_reply cp;
3342
3343 bacpy(&cp.bdaddr, &ev->bdaddr);
3344 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3345 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3346
3347 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3348 &cp);
3349 } else {
3350 struct hci_cp_remote_oob_data_neg_reply cp;
3351
3352 bacpy(&cp.bdaddr, &ev->bdaddr);
3353 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3354 &cp);
3355 }
3356
3357unlock:
3358 hci_dev_unlock(hdev);
3359}
3360
3361static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3362{
3363 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3364 struct hci_conn *conn;
3365
3366 BT_DBG("%s status %d", hdev->name, ev->status);
3367
3368 hci_dev_lock(hdev);
3369
3370 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3371 if (!conn) {
3372 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3373 if (!conn) {
3374 BT_ERR("No memory for new connection");
3375 hci_dev_unlock(hdev);
3376 return;
3377 }
3378
3379 conn->dst_type = ev->bdaddr_type;
3380 }
3381
3382 if (ev->status) {
3383 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3384 conn->dst_type, ev->status);
3385 hci_proto_connect_cfm(conn, ev->status);
3386 conn->state = BT_CLOSED;
3387 hci_conn_del(conn);
3388 goto unlock;
3389 }
3390
3391 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3392 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3393 conn->dst_type, 0, NULL, 0, NULL);
3394
3395 conn->sec_level = BT_SECURITY_LOW;
3396 conn->handle = __le16_to_cpu(ev->handle);
3397 conn->state = BT_CONNECTED;
3398
3399 hci_conn_hold_device(conn);
3400 hci_conn_add_sysfs(conn);
3401
3402 hci_proto_connect_cfm(conn, ev->status);
3403
3404unlock:
3405 hci_dev_unlock(hdev);
3406}
3407
3408static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3409 struct sk_buff *skb)
3410{
3411 u8 num_reports = skb->data[0];
3412 void *ptr = &skb->data[1];
3413 s8 rssi;
3414
3415 hci_dev_lock(hdev);
3416
3417 while (num_reports--) {
3418 struct hci_ev_le_advertising_info *ev = ptr;
3419
3420 rssi = ev->data[ev->length];
3421 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3422 NULL, rssi, 0, 1, ev->data, ev->length);
3423
3424 ptr += sizeof(*ev) + ev->length + 1;
3425 }
3426
3427 hci_dev_unlock(hdev);
3428}
3429
3430static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3431 struct sk_buff *skb)
3432{
3433 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3434 struct hci_cp_le_ltk_reply cp;
3435 struct hci_cp_le_ltk_neg_reply neg;
3436 struct hci_conn *conn;
3437 struct smp_ltk *ltk;
3438
3439 BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
3440
3441 hci_dev_lock(hdev);
3442
3443 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3444 if (conn == NULL)
3445 goto not_found;
3446
3447 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3448 if (ltk == NULL)
3449 goto not_found;
3450
3451 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3452 cp.handle = cpu_to_le16(conn->handle);
3453
3454 if (ltk->authenticated)
3455 conn->sec_level = BT_SECURITY_HIGH;
3456
3457 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3458
3459 if (ltk->type & HCI_SMP_STK) {
3460 list_del(<k->list);
3461 kfree(ltk);
3462 }
3463
3464 hci_dev_unlock(hdev);
3465
3466 return;
3467
3468not_found:
3469 neg.handle = ev->handle;
3470 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3471 hci_dev_unlock(hdev);
3472}
3473
3474static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3475{
3476 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3477
3478 skb_pull(skb, sizeof(*le_ev));
3479
3480 switch (le_ev->subevent) {
3481 case HCI_EV_LE_CONN_COMPLETE:
3482 hci_le_conn_complete_evt(hdev, skb);
3483 break;
3484
3485 case HCI_EV_LE_ADVERTISING_REPORT:
3486 hci_le_adv_report_evt(hdev, skb);
3487 break;
3488
3489 case HCI_EV_LE_LTK_REQ:
3490 hci_le_ltk_request_evt(hdev, skb);
3491 break;
3492
3493 default:
3494 break;
3495 }
3496}
3497
3498void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3499{
3500 struct hci_event_hdr *hdr = (void *) skb->data;
3501 __u8 event = hdr->evt;
3502
3503 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3504
3505 switch (event) {
3506 case HCI_EV_INQUIRY_COMPLETE:
3507 hci_inquiry_complete_evt(hdev, skb);
3508 break;
3509
3510 case HCI_EV_INQUIRY_RESULT:
3511 hci_inquiry_result_evt(hdev, skb);
3512 break;
3513
3514 case HCI_EV_CONN_COMPLETE:
3515 hci_conn_complete_evt(hdev, skb);
3516 break;
3517
3518 case HCI_EV_CONN_REQUEST:
3519 hci_conn_request_evt(hdev, skb);
3520 break;
3521
3522 case HCI_EV_DISCONN_COMPLETE:
3523 hci_disconn_complete_evt(hdev, skb);
3524 break;
3525
3526 case HCI_EV_AUTH_COMPLETE:
3527 hci_auth_complete_evt(hdev, skb);
3528 break;
3529
3530 case HCI_EV_REMOTE_NAME:
3531 hci_remote_name_evt(hdev, skb);
3532 break;
3533
3534 case HCI_EV_ENCRYPT_CHANGE:
3535 hci_encrypt_change_evt(hdev, skb);
3536 break;
3537
3538 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3539 hci_change_link_key_complete_evt(hdev, skb);
3540 break;
3541
3542 case HCI_EV_REMOTE_FEATURES:
3543 hci_remote_features_evt(hdev, skb);
3544 break;
3545
3546 case HCI_EV_REMOTE_VERSION:
3547 hci_remote_version_evt(hdev, skb);
3548 break;
3549
3550 case HCI_EV_QOS_SETUP_COMPLETE:
3551 hci_qos_setup_complete_evt(hdev, skb);
3552 break;
3553
3554 case HCI_EV_CMD_COMPLETE:
3555 hci_cmd_complete_evt(hdev, skb);
3556 break;
3557
3558 case HCI_EV_CMD_STATUS:
3559 hci_cmd_status_evt(hdev, skb);
3560 break;
3561
3562 case HCI_EV_ROLE_CHANGE:
3563 hci_role_change_evt(hdev, skb);
3564 break;
3565
3566 case HCI_EV_NUM_COMP_PKTS:
3567 hci_num_comp_pkts_evt(hdev, skb);
3568 break;
3569
3570 case HCI_EV_MODE_CHANGE:
3571 hci_mode_change_evt(hdev, skb);
3572 break;
3573
3574 case HCI_EV_PIN_CODE_REQ:
3575 hci_pin_code_request_evt(hdev, skb);
3576 break;
3577
3578 case HCI_EV_LINK_KEY_REQ:
3579 hci_link_key_request_evt(hdev, skb);
3580 break;
3581
3582 case HCI_EV_LINK_KEY_NOTIFY:
3583 hci_link_key_notify_evt(hdev, skb);
3584 break;
3585
3586 case HCI_EV_CLOCK_OFFSET:
3587 hci_clock_offset_evt(hdev, skb);
3588 break;
3589
3590 case HCI_EV_PKT_TYPE_CHANGE:
3591 hci_pkt_type_change_evt(hdev, skb);
3592 break;
3593
3594 case HCI_EV_PSCAN_REP_MODE:
3595 hci_pscan_rep_mode_evt(hdev, skb);
3596 break;
3597
3598 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3599 hci_inquiry_result_with_rssi_evt(hdev, skb);
3600 break;
3601
3602 case HCI_EV_REMOTE_EXT_FEATURES:
3603 hci_remote_ext_features_evt(hdev, skb);
3604 break;
3605
3606 case HCI_EV_SYNC_CONN_COMPLETE:
3607 hci_sync_conn_complete_evt(hdev, skb);
3608 break;
3609
3610 case HCI_EV_SYNC_CONN_CHANGED:
3611 hci_sync_conn_changed_evt(hdev, skb);
3612 break;
3613
3614 case HCI_EV_SNIFF_SUBRATE:
3615 hci_sniff_subrate_evt(hdev, skb);
3616 break;
3617
3618 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3619 hci_extended_inquiry_result_evt(hdev, skb);
3620 break;
3621
3622 case HCI_EV_KEY_REFRESH_COMPLETE:
3623 hci_key_refresh_complete_evt(hdev, skb);
3624 break;
3625
3626 case HCI_EV_IO_CAPA_REQUEST:
3627 hci_io_capa_request_evt(hdev, skb);
3628 break;
3629
3630 case HCI_EV_IO_CAPA_REPLY:
3631 hci_io_capa_reply_evt(hdev, skb);
3632 break;
3633
3634 case HCI_EV_USER_CONFIRM_REQUEST:
3635 hci_user_confirm_request_evt(hdev, skb);
3636 break;
3637
3638 case HCI_EV_USER_PASSKEY_REQUEST:
3639 hci_user_passkey_request_evt(hdev, skb);
3640 break;
3641
3642 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3643 hci_simple_pair_complete_evt(hdev, skb);
3644 break;
3645
3646 case HCI_EV_REMOTE_HOST_FEATURES:
3647 hci_remote_host_features_evt(hdev, skb);
3648 break;
3649
3650 case HCI_EV_LE_META:
3651 hci_le_meta_evt(hdev, skb);
3652 break;
3653
3654 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3655 hci_remote_oob_data_request_evt(hdev, skb);
3656 break;
3657
3658 case HCI_EV_NUM_COMP_BLOCKS:
3659 hci_num_comp_blocks_evt(hdev, skb);
3660 break;
3661
3662 default:
3663 BT_DBG("%s event 0x%x", hdev->name, event);
3664 break;
3665 }
3666
3667 kfree_skb(skb);
3668 hdev->stat.evt_rx++;
3669}