Loading...
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/idr.h>
30#include <linux/rfkill.h>
31#include <linux/debugfs.h>
32#include <linux/crypto.h>
33#include <asm/unaligned.h>
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37#include <net/bluetooth/l2cap.h>
38#include <net/bluetooth/mgmt.h>
39
40#include "hci_request.h"
41#include "hci_debugfs.h"
42#include "smp.h"
43#include "leds.h"
44
45static void hci_rx_work(struct work_struct *work);
46static void hci_cmd_work(struct work_struct *work);
47static void hci_tx_work(struct work_struct *work);
48
49/* HCI device list */
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53/* HCI callback list */
54LIST_HEAD(hci_cb_list);
55DEFINE_MUTEX(hci_cb_list_lock);
56
57/* HCI ID Numbering */
58static DEFINE_IDA(hci_index_ida);
59
60/* ---- HCI debugfs entries ---- */
61
62static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94 return -EALREADY;
95
96 hci_req_sync_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_sync_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 kfree_skb(skb);
109
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
138 char buf[32];
139 size_t buf_size = min(count, (sizeof(buf)-1));
140 bool enable;
141 int err;
142
143 if (copy_from_user(buf, user_buf, buf_size))
144 return -EFAULT;
145
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
148 return -EINVAL;
149
150 /* When the diagnostic flags are not persistent and the transport
151 * is not active, then there is no need for the vendor callback.
152 *
153 * Instead just store the desired value. If needed the setting
154 * will be programmed when the controller gets powered on.
155 */
156 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157 !test_bit(HCI_RUNNING, &hdev->flags))
158 goto done;
159
160 hci_req_sync_lock(hdev);
161 err = hdev->set_diag(hdev, enable);
162 hci_req_sync_unlock(hdev);
163
164 if (err < 0)
165 return err;
166
167done:
168 if (enable)
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170 else
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173 return count;
174}
175
176static const struct file_operations vendor_diag_fops = {
177 .open = simple_open,
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
181};
182
183static void hci_debugfs_create_basic(struct hci_dev *hdev)
184{
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186 &dut_mode_fops);
187
188 if (hdev->set_diag)
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190 &vendor_diag_fops);
191}
192
193static int hci_reset_req(struct hci_request *req, unsigned long opt)
194{
195 BT_DBG("%s %ld", req->hdev->name, opt);
196
197 /* Reset device */
198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
200 return 0;
201}
202
203static void bredr_init(struct hci_request *req)
204{
205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207 /* Read Local Supported Features */
208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210 /* Read Local Version */
211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213 /* Read BD Address */
214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215}
216
217static void amp_init1(struct hci_request *req)
218{
219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221 /* Read Local Version */
222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224 /* Read Local Supported Commands */
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227 /* Read Local AMP Info */
228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230 /* Read Data Blk size */
231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233 /* Read Flow Control Mode */
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236 /* Read Location Data */
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238}
239
240static int amp_init2(struct hci_request *req)
241{
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
244 * stage init.
245 */
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249 return 0;
250}
251
252static int hci_init1_req(struct hci_request *req, unsigned long opt)
253{
254 struct hci_dev *hdev = req->hdev;
255
256 BT_DBG("%s %ld", hdev->name, opt);
257
258 /* Reset */
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260 hci_reset_req(req, 0);
261
262 switch (hdev->dev_type) {
263 case HCI_BREDR:
264 bredr_init(req);
265 break;
266
267 case HCI_AMP:
268 amp_init1(req);
269 break;
270
271 default:
272 BT_ERR("Unknown device type %d", hdev->dev_type);
273 break;
274 }
275
276 return 0;
277}
278
279static void bredr_setup(struct hci_request *req)
280{
281 __le16 param;
282 __u8 flt_type;
283
284 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
285 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
286
287 /* Read Class of Device */
288 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
289
290 /* Read Local Name */
291 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
292
293 /* Read Voice Setting */
294 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
295
296 /* Read Number of Supported IAC */
297 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
298
299 /* Read Current IAC LAP */
300 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
301
302 /* Clear Event Filters */
303 flt_type = HCI_FLT_CLEAR_ALL;
304 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
305
306 /* Connection accept timeout ~20 secs */
307 param = cpu_to_le16(0x7d00);
308 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
309}
310
311static void le_setup(struct hci_request *req)
312{
313 struct hci_dev *hdev = req->hdev;
314
315 /* Read LE Buffer Size */
316 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
317
318 /* Read LE Local Supported Features */
319 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
320
321 /* Read LE Supported States */
322 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
323
324 /* LE-only controllers have LE implicitly enabled */
325 if (!lmp_bredr_capable(hdev))
326 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
327}
328
329static void hci_setup_event_mask(struct hci_request *req)
330{
331 struct hci_dev *hdev = req->hdev;
332
333 /* The second byte is 0xff instead of 0x9f (two reserved bits
334 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335 * command otherwise.
336 */
337 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
338
339 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
340 * any event mask for pre 1.2 devices.
341 */
342 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343 return;
344
345 if (lmp_bredr_capable(hdev)) {
346 events[4] |= 0x01; /* Flow Specification Complete */
347 } else {
348 /* Use a different default for LE-only devices */
349 memset(events, 0, sizeof(events));
350 events[1] |= 0x20; /* Command Complete */
351 events[1] |= 0x40; /* Command Status */
352 events[1] |= 0x80; /* Hardware Error */
353
354 /* If the controller supports the Disconnect command, enable
355 * the corresponding event. In addition enable packet flow
356 * control related events.
357 */
358 if (hdev->commands[0] & 0x20) {
359 events[0] |= 0x10; /* Disconnection Complete */
360 events[2] |= 0x04; /* Number of Completed Packets */
361 events[3] |= 0x02; /* Data Buffer Overflow */
362 }
363
364 /* If the controller supports the Read Remote Version
365 * Information command, enable the corresponding event.
366 */
367 if (hdev->commands[2] & 0x80)
368 events[1] |= 0x08; /* Read Remote Version Information
369 * Complete
370 */
371
372 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
373 events[0] |= 0x80; /* Encryption Change */
374 events[5] |= 0x80; /* Encryption Key Refresh Complete */
375 }
376 }
377
378 if (lmp_inq_rssi_capable(hdev) ||
379 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
380 events[4] |= 0x02; /* Inquiry Result with RSSI */
381
382 if (lmp_ext_feat_capable(hdev))
383 events[4] |= 0x04; /* Read Remote Extended Features Complete */
384
385 if (lmp_esco_capable(hdev)) {
386 events[5] |= 0x08; /* Synchronous Connection Complete */
387 events[5] |= 0x10; /* Synchronous Connection Changed */
388 }
389
390 if (lmp_sniffsubr_capable(hdev))
391 events[5] |= 0x20; /* Sniff Subrating */
392
393 if (lmp_pause_enc_capable(hdev))
394 events[5] |= 0x80; /* Encryption Key Refresh Complete */
395
396 if (lmp_ext_inq_capable(hdev))
397 events[5] |= 0x40; /* Extended Inquiry Result */
398
399 if (lmp_no_flush_capable(hdev))
400 events[7] |= 0x01; /* Enhanced Flush Complete */
401
402 if (lmp_lsto_capable(hdev))
403 events[6] |= 0x80; /* Link Supervision Timeout Changed */
404
405 if (lmp_ssp_capable(hdev)) {
406 events[6] |= 0x01; /* IO Capability Request */
407 events[6] |= 0x02; /* IO Capability Response */
408 events[6] |= 0x04; /* User Confirmation Request */
409 events[6] |= 0x08; /* User Passkey Request */
410 events[6] |= 0x10; /* Remote OOB Data Request */
411 events[6] |= 0x20; /* Simple Pairing Complete */
412 events[7] |= 0x04; /* User Passkey Notification */
413 events[7] |= 0x08; /* Keypress Notification */
414 events[7] |= 0x10; /* Remote Host Supported
415 * Features Notification
416 */
417 }
418
419 if (lmp_le_capable(hdev))
420 events[7] |= 0x20; /* LE Meta-Event */
421
422 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
423}
424
425static int hci_init2_req(struct hci_request *req, unsigned long opt)
426{
427 struct hci_dev *hdev = req->hdev;
428
429 if (hdev->dev_type == HCI_AMP)
430 return amp_init2(req);
431
432 if (lmp_bredr_capable(hdev))
433 bredr_setup(req);
434 else
435 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
436
437 if (lmp_le_capable(hdev))
438 le_setup(req);
439
440 /* All Bluetooth 1.2 and later controllers should support the
441 * HCI command for reading the local supported commands.
442 *
443 * Unfortunately some controllers indicate Bluetooth 1.2 support,
444 * but do not have support for this command. If that is the case,
445 * the driver can quirk the behavior and skip reading the local
446 * supported commands.
447 */
448 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
449 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
450 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
451
452 if (lmp_ssp_capable(hdev)) {
453 /* When SSP is available, then the host features page
454 * should also be available as well. However some
455 * controllers list the max_page as 0 as long as SSP
456 * has not been enabled. To achieve proper debugging
457 * output, force the minimum max_page to 1 at least.
458 */
459 hdev->max_page = 0x01;
460
461 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
462 u8 mode = 0x01;
463
464 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
465 sizeof(mode), &mode);
466 } else {
467 struct hci_cp_write_eir cp;
468
469 memset(hdev->eir, 0, sizeof(hdev->eir));
470 memset(&cp, 0, sizeof(cp));
471
472 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
473 }
474 }
475
476 if (lmp_inq_rssi_capable(hdev) ||
477 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
478 u8 mode;
479
480 /* If Extended Inquiry Result events are supported, then
481 * they are clearly preferred over Inquiry Result with RSSI
482 * events.
483 */
484 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
485
486 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 }
488
489 if (lmp_inq_tx_pwr_capable(hdev))
490 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
491
492 if (lmp_ext_feat_capable(hdev)) {
493 struct hci_cp_read_local_ext_features cp;
494
495 cp.page = 0x01;
496 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
497 sizeof(cp), &cp);
498 }
499
500 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
501 u8 enable = 1;
502 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
503 &enable);
504 }
505
506 return 0;
507}
508
509static void hci_setup_link_policy(struct hci_request *req)
510{
511 struct hci_dev *hdev = req->hdev;
512 struct hci_cp_write_def_link_policy cp;
513 u16 link_policy = 0;
514
515 if (lmp_rswitch_capable(hdev))
516 link_policy |= HCI_LP_RSWITCH;
517 if (lmp_hold_capable(hdev))
518 link_policy |= HCI_LP_HOLD;
519 if (lmp_sniff_capable(hdev))
520 link_policy |= HCI_LP_SNIFF;
521 if (lmp_park_capable(hdev))
522 link_policy |= HCI_LP_PARK;
523
524 cp.policy = cpu_to_le16(link_policy);
525 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
526}
527
528static void hci_set_le_support(struct hci_request *req)
529{
530 struct hci_dev *hdev = req->hdev;
531 struct hci_cp_write_le_host_supported cp;
532
533 /* LE-only devices do not support explicit enablement */
534 if (!lmp_bredr_capable(hdev))
535 return;
536
537 memset(&cp, 0, sizeof(cp));
538
539 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
540 cp.le = 0x01;
541 cp.simul = 0x00;
542 }
543
544 if (cp.le != lmp_host_le_capable(hdev))
545 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
546 &cp);
547}
548
549static void hci_set_event_mask_page_2(struct hci_request *req)
550{
551 struct hci_dev *hdev = req->hdev;
552 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
553
554 /* If Connectionless Slave Broadcast master role is supported
555 * enable all necessary events for it.
556 */
557 if (lmp_csb_master_capable(hdev)) {
558 events[1] |= 0x40; /* Triggered Clock Capture */
559 events[1] |= 0x80; /* Synchronization Train Complete */
560 events[2] |= 0x10; /* Slave Page Response Timeout */
561 events[2] |= 0x20; /* CSB Channel Map Change */
562 }
563
564 /* If Connectionless Slave Broadcast slave role is supported
565 * enable all necessary events for it.
566 */
567 if (lmp_csb_slave_capable(hdev)) {
568 events[2] |= 0x01; /* Synchronization Train Received */
569 events[2] |= 0x02; /* CSB Receive */
570 events[2] |= 0x04; /* CSB Timeout */
571 events[2] |= 0x08; /* Truncated Page Complete */
572 }
573
574 /* Enable Authenticated Payload Timeout Expired event if supported */
575 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
576 events[2] |= 0x80;
577
578 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
579}
580
581static int hci_init3_req(struct hci_request *req, unsigned long opt)
582{
583 struct hci_dev *hdev = req->hdev;
584 u8 p;
585
586 hci_setup_event_mask(req);
587
588 if (hdev->commands[6] & 0x20 &&
589 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
590 struct hci_cp_read_stored_link_key cp;
591
592 bacpy(&cp.bdaddr, BDADDR_ANY);
593 cp.read_all = 0x01;
594 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
595 }
596
597 if (hdev->commands[5] & 0x10)
598 hci_setup_link_policy(req);
599
600 if (hdev->commands[8] & 0x01)
601 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
602
603 /* Some older Broadcom based Bluetooth 1.2 controllers do not
604 * support the Read Page Scan Type command. Check support for
605 * this command in the bit mask of supported commands.
606 */
607 if (hdev->commands[13] & 0x01)
608 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
609
610 if (lmp_le_capable(hdev)) {
611 u8 events[8];
612
613 memset(events, 0, sizeof(events));
614
615 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
616 events[0] |= 0x10; /* LE Long Term Key Request */
617
618 /* If controller supports the Connection Parameters Request
619 * Link Layer Procedure, enable the corresponding event.
620 */
621 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
622 events[0] |= 0x20; /* LE Remote Connection
623 * Parameter Request
624 */
625
626 /* If the controller supports the Data Length Extension
627 * feature, enable the corresponding event.
628 */
629 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
630 events[0] |= 0x40; /* LE Data Length Change */
631
632 /* If the controller supports Extended Scanner Filter
633 * Policies, enable the correspondig event.
634 */
635 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
636 events[1] |= 0x04; /* LE Direct Advertising
637 * Report
638 */
639
640 /* If the controller supports the LE Set Scan Enable command,
641 * enable the corresponding advertising report event.
642 */
643 if (hdev->commands[26] & 0x08)
644 events[0] |= 0x02; /* LE Advertising Report */
645
646 /* If the controller supports the LE Create Connection
647 * command, enable the corresponding event.
648 */
649 if (hdev->commands[26] & 0x10)
650 events[0] |= 0x01; /* LE Connection Complete */
651
652 /* If the controller supports the LE Connection Update
653 * command, enable the corresponding event.
654 */
655 if (hdev->commands[27] & 0x04)
656 events[0] |= 0x04; /* LE Connection Update
657 * Complete
658 */
659
660 /* If the controller supports the LE Read Remote Used Features
661 * command, enable the corresponding event.
662 */
663 if (hdev->commands[27] & 0x20)
664 events[0] |= 0x08; /* LE Read Remote Used
665 * Features Complete
666 */
667
668 /* If the controller supports the LE Read Local P-256
669 * Public Key command, enable the corresponding event.
670 */
671 if (hdev->commands[34] & 0x02)
672 events[0] |= 0x80; /* LE Read Local P-256
673 * Public Key Complete
674 */
675
676 /* If the controller supports the LE Generate DHKey
677 * command, enable the corresponding event.
678 */
679 if (hdev->commands[34] & 0x04)
680 events[1] |= 0x01; /* LE Generate DHKey Complete */
681
682 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
683 events);
684
685 if (hdev->commands[25] & 0x40) {
686 /* Read LE Advertising Channel TX Power */
687 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
688 }
689
690 if (hdev->commands[26] & 0x40) {
691 /* Read LE White List Size */
692 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
693 0, NULL);
694 }
695
696 if (hdev->commands[26] & 0x80) {
697 /* Clear LE White List */
698 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
699 }
700
701 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
702 /* Read LE Maximum Data Length */
703 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
704
705 /* Read LE Suggested Default Data Length */
706 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
707 }
708
709 hci_set_le_support(req);
710 }
711
712 /* Read features beyond page 1 if available */
713 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
714 struct hci_cp_read_local_ext_features cp;
715
716 cp.page = p;
717 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
718 sizeof(cp), &cp);
719 }
720
721 return 0;
722}
723
724static int hci_init4_req(struct hci_request *req, unsigned long opt)
725{
726 struct hci_dev *hdev = req->hdev;
727
728 /* Some Broadcom based Bluetooth controllers do not support the
729 * Delete Stored Link Key command. They are clearly indicating its
730 * absence in the bit mask of supported commands.
731 *
732 * Check the supported commands and only if the the command is marked
733 * as supported send it. If not supported assume that the controller
734 * does not have actual support for stored link keys which makes this
735 * command redundant anyway.
736 *
737 * Some controllers indicate that they support handling deleting
738 * stored link keys, but they don't. The quirk lets a driver
739 * just disable this command.
740 */
741 if (hdev->commands[6] & 0x80 &&
742 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
743 struct hci_cp_delete_stored_link_key cp;
744
745 bacpy(&cp.bdaddr, BDADDR_ANY);
746 cp.delete_all = 0x01;
747 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
748 sizeof(cp), &cp);
749 }
750
751 /* Set event mask page 2 if the HCI command for it is supported */
752 if (hdev->commands[22] & 0x04)
753 hci_set_event_mask_page_2(req);
754
755 /* Read local codec list if the HCI command is supported */
756 if (hdev->commands[29] & 0x20)
757 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
758
759 /* Get MWS transport configuration if the HCI command is supported */
760 if (hdev->commands[30] & 0x08)
761 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
762
763 /* Check for Synchronization Train support */
764 if (lmp_sync_train_capable(hdev))
765 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
766
767 /* Enable Secure Connections if supported and configured */
768 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
769 bredr_sc_enabled(hdev)) {
770 u8 support = 0x01;
771
772 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
773 sizeof(support), &support);
774 }
775
776 return 0;
777}
778
779static int __hci_init(struct hci_dev *hdev)
780{
781 int err;
782
783 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
784 if (err < 0)
785 return err;
786
787 if (hci_dev_test_flag(hdev, HCI_SETUP))
788 hci_debugfs_create_basic(hdev);
789
790 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
791 if (err < 0)
792 return err;
793
794 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
795 * BR/EDR/LE type controllers. AMP controllers only need the
796 * first two stages of init.
797 */
798 if (hdev->dev_type != HCI_BREDR)
799 return 0;
800
801 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
802 if (err < 0)
803 return err;
804
805 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
806 if (err < 0)
807 return err;
808
809 /* This function is only called when the controller is actually in
810 * configured state. When the controller is marked as unconfigured,
811 * this initialization procedure is not run.
812 *
813 * It means that it is possible that a controller runs through its
814 * setup phase and then discovers missing settings. If that is the
815 * case, then this function will not be called. It then will only
816 * be called during the config phase.
817 *
818 * So only when in setup phase or config phase, create the debugfs
819 * entries and register the SMP channels.
820 */
821 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
822 !hci_dev_test_flag(hdev, HCI_CONFIG))
823 return 0;
824
825 hci_debugfs_create_common(hdev);
826
827 if (lmp_bredr_capable(hdev))
828 hci_debugfs_create_bredr(hdev);
829
830 if (lmp_le_capable(hdev))
831 hci_debugfs_create_le(hdev);
832
833 return 0;
834}
835
836static int hci_init0_req(struct hci_request *req, unsigned long opt)
837{
838 struct hci_dev *hdev = req->hdev;
839
840 BT_DBG("%s %ld", hdev->name, opt);
841
842 /* Reset */
843 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
844 hci_reset_req(req, 0);
845
846 /* Read Local Version */
847 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
848
849 /* Read BD Address */
850 if (hdev->set_bdaddr)
851 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
852
853 return 0;
854}
855
856static int __hci_unconf_init(struct hci_dev *hdev)
857{
858 int err;
859
860 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
861 return 0;
862
863 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
864 if (err < 0)
865 return err;
866
867 if (hci_dev_test_flag(hdev, HCI_SETUP))
868 hci_debugfs_create_basic(hdev);
869
870 return 0;
871}
872
873static int hci_scan_req(struct hci_request *req, unsigned long opt)
874{
875 __u8 scan = opt;
876
877 BT_DBG("%s %x", req->hdev->name, scan);
878
879 /* Inquiry and Page scans */
880 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
881 return 0;
882}
883
884static int hci_auth_req(struct hci_request *req, unsigned long opt)
885{
886 __u8 auth = opt;
887
888 BT_DBG("%s %x", req->hdev->name, auth);
889
890 /* Authentication */
891 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
892 return 0;
893}
894
895static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
896{
897 __u8 encrypt = opt;
898
899 BT_DBG("%s %x", req->hdev->name, encrypt);
900
901 /* Encryption */
902 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
903 return 0;
904}
905
906static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
907{
908 __le16 policy = cpu_to_le16(opt);
909
910 BT_DBG("%s %x", req->hdev->name, policy);
911
912 /* Default link policy */
913 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
914 return 0;
915}
916
917/* Get HCI device by index.
918 * Device is held on return. */
919struct hci_dev *hci_dev_get(int index)
920{
921 struct hci_dev *hdev = NULL, *d;
922
923 BT_DBG("%d", index);
924
925 if (index < 0)
926 return NULL;
927
928 read_lock(&hci_dev_list_lock);
929 list_for_each_entry(d, &hci_dev_list, list) {
930 if (d->id == index) {
931 hdev = hci_dev_hold(d);
932 break;
933 }
934 }
935 read_unlock(&hci_dev_list_lock);
936 return hdev;
937}
938
939/* ---- Inquiry support ---- */
940
941bool hci_discovery_active(struct hci_dev *hdev)
942{
943 struct discovery_state *discov = &hdev->discovery;
944
945 switch (discov->state) {
946 case DISCOVERY_FINDING:
947 case DISCOVERY_RESOLVING:
948 return true;
949
950 default:
951 return false;
952 }
953}
954
955void hci_discovery_set_state(struct hci_dev *hdev, int state)
956{
957 int old_state = hdev->discovery.state;
958
959 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
960
961 if (old_state == state)
962 return;
963
964 hdev->discovery.state = state;
965
966 switch (state) {
967 case DISCOVERY_STOPPED:
968 hci_update_background_scan(hdev);
969
970 if (old_state != DISCOVERY_STARTING)
971 mgmt_discovering(hdev, 0);
972 break;
973 case DISCOVERY_STARTING:
974 break;
975 case DISCOVERY_FINDING:
976 mgmt_discovering(hdev, 1);
977 break;
978 case DISCOVERY_RESOLVING:
979 break;
980 case DISCOVERY_STOPPING:
981 break;
982 }
983}
984
985void hci_inquiry_cache_flush(struct hci_dev *hdev)
986{
987 struct discovery_state *cache = &hdev->discovery;
988 struct inquiry_entry *p, *n;
989
990 list_for_each_entry_safe(p, n, &cache->all, all) {
991 list_del(&p->all);
992 kfree(p);
993 }
994
995 INIT_LIST_HEAD(&cache->unknown);
996 INIT_LIST_HEAD(&cache->resolve);
997}
998
999struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1000 bdaddr_t *bdaddr)
1001{
1002 struct discovery_state *cache = &hdev->discovery;
1003 struct inquiry_entry *e;
1004
1005 BT_DBG("cache %p, %pMR", cache, bdaddr);
1006
1007 list_for_each_entry(e, &cache->all, all) {
1008 if (!bacmp(&e->data.bdaddr, bdaddr))
1009 return e;
1010 }
1011
1012 return NULL;
1013}
1014
1015struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1016 bdaddr_t *bdaddr)
1017{
1018 struct discovery_state *cache = &hdev->discovery;
1019 struct inquiry_entry *e;
1020
1021 BT_DBG("cache %p, %pMR", cache, bdaddr);
1022
1023 list_for_each_entry(e, &cache->unknown, list) {
1024 if (!bacmp(&e->data.bdaddr, bdaddr))
1025 return e;
1026 }
1027
1028 return NULL;
1029}
1030
1031struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1032 bdaddr_t *bdaddr,
1033 int state)
1034{
1035 struct discovery_state *cache = &hdev->discovery;
1036 struct inquiry_entry *e;
1037
1038 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1039
1040 list_for_each_entry(e, &cache->resolve, list) {
1041 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1042 return e;
1043 if (!bacmp(&e->data.bdaddr, bdaddr))
1044 return e;
1045 }
1046
1047 return NULL;
1048}
1049
1050void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1051 struct inquiry_entry *ie)
1052{
1053 struct discovery_state *cache = &hdev->discovery;
1054 struct list_head *pos = &cache->resolve;
1055 struct inquiry_entry *p;
1056
1057 list_del(&ie->list);
1058
1059 list_for_each_entry(p, &cache->resolve, list) {
1060 if (p->name_state != NAME_PENDING &&
1061 abs(p->data.rssi) >= abs(ie->data.rssi))
1062 break;
1063 pos = &p->list;
1064 }
1065
1066 list_add(&ie->list, pos);
1067}
1068
1069u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1070 bool name_known)
1071{
1072 struct discovery_state *cache = &hdev->discovery;
1073 struct inquiry_entry *ie;
1074 u32 flags = 0;
1075
1076 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1077
1078 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1079
1080 if (!data->ssp_mode)
1081 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1082
1083 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1084 if (ie) {
1085 if (!ie->data.ssp_mode)
1086 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1087
1088 if (ie->name_state == NAME_NEEDED &&
1089 data->rssi != ie->data.rssi) {
1090 ie->data.rssi = data->rssi;
1091 hci_inquiry_cache_update_resolve(hdev, ie);
1092 }
1093
1094 goto update;
1095 }
1096
1097 /* Entry not in the cache. Add new one. */
1098 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1099 if (!ie) {
1100 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1101 goto done;
1102 }
1103
1104 list_add(&ie->all, &cache->all);
1105
1106 if (name_known) {
1107 ie->name_state = NAME_KNOWN;
1108 } else {
1109 ie->name_state = NAME_NOT_KNOWN;
1110 list_add(&ie->list, &cache->unknown);
1111 }
1112
1113update:
1114 if (name_known && ie->name_state != NAME_KNOWN &&
1115 ie->name_state != NAME_PENDING) {
1116 ie->name_state = NAME_KNOWN;
1117 list_del(&ie->list);
1118 }
1119
1120 memcpy(&ie->data, data, sizeof(*data));
1121 ie->timestamp = jiffies;
1122 cache->timestamp = jiffies;
1123
1124 if (ie->name_state == NAME_NOT_KNOWN)
1125 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1126
1127done:
1128 return flags;
1129}
1130
1131static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1132{
1133 struct discovery_state *cache = &hdev->discovery;
1134 struct inquiry_info *info = (struct inquiry_info *) buf;
1135 struct inquiry_entry *e;
1136 int copied = 0;
1137
1138 list_for_each_entry(e, &cache->all, all) {
1139 struct inquiry_data *data = &e->data;
1140
1141 if (copied >= num)
1142 break;
1143
1144 bacpy(&info->bdaddr, &data->bdaddr);
1145 info->pscan_rep_mode = data->pscan_rep_mode;
1146 info->pscan_period_mode = data->pscan_period_mode;
1147 info->pscan_mode = data->pscan_mode;
1148 memcpy(info->dev_class, data->dev_class, 3);
1149 info->clock_offset = data->clock_offset;
1150
1151 info++;
1152 copied++;
1153 }
1154
1155 BT_DBG("cache %p, copied %d", cache, copied);
1156 return copied;
1157}
1158
1159static int hci_inq_req(struct hci_request *req, unsigned long opt)
1160{
1161 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1162 struct hci_dev *hdev = req->hdev;
1163 struct hci_cp_inquiry cp;
1164
1165 BT_DBG("%s", hdev->name);
1166
1167 if (test_bit(HCI_INQUIRY, &hdev->flags))
1168 return 0;
1169
1170 /* Start Inquiry */
1171 memcpy(&cp.lap, &ir->lap, 3);
1172 cp.length = ir->length;
1173 cp.num_rsp = ir->num_rsp;
1174 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1175
1176 return 0;
1177}
1178
1179int hci_inquiry(void __user *arg)
1180{
1181 __u8 __user *ptr = arg;
1182 struct hci_inquiry_req ir;
1183 struct hci_dev *hdev;
1184 int err = 0, do_inquiry = 0, max_rsp;
1185 long timeo;
1186 __u8 *buf;
1187
1188 if (copy_from_user(&ir, ptr, sizeof(ir)))
1189 return -EFAULT;
1190
1191 hdev = hci_dev_get(ir.dev_id);
1192 if (!hdev)
1193 return -ENODEV;
1194
1195 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1196 err = -EBUSY;
1197 goto done;
1198 }
1199
1200 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1201 err = -EOPNOTSUPP;
1202 goto done;
1203 }
1204
1205 if (hdev->dev_type != HCI_BREDR) {
1206 err = -EOPNOTSUPP;
1207 goto done;
1208 }
1209
1210 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1211 err = -EOPNOTSUPP;
1212 goto done;
1213 }
1214
1215 hci_dev_lock(hdev);
1216 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1217 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1218 hci_inquiry_cache_flush(hdev);
1219 do_inquiry = 1;
1220 }
1221 hci_dev_unlock(hdev);
1222
1223 timeo = ir.length * msecs_to_jiffies(2000);
1224
1225 if (do_inquiry) {
1226 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1227 timeo, NULL);
1228 if (err < 0)
1229 goto done;
1230
1231 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1232 * cleared). If it is interrupted by a signal, return -EINTR.
1233 */
1234 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1235 TASK_INTERRUPTIBLE))
1236 return -EINTR;
1237 }
1238
1239 /* for unlimited number of responses we will use buffer with
1240 * 255 entries
1241 */
1242 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1243
1244 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1245 * copy it to the user space.
1246 */
1247 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1248 if (!buf) {
1249 err = -ENOMEM;
1250 goto done;
1251 }
1252
1253 hci_dev_lock(hdev);
1254 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1255 hci_dev_unlock(hdev);
1256
1257 BT_DBG("num_rsp %d", ir.num_rsp);
1258
1259 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1260 ptr += sizeof(ir);
1261 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1262 ir.num_rsp))
1263 err = -EFAULT;
1264 } else
1265 err = -EFAULT;
1266
1267 kfree(buf);
1268
1269done:
1270 hci_dev_put(hdev);
1271 return err;
1272}
1273
1274static int hci_dev_do_open(struct hci_dev *hdev)
1275{
1276 int ret = 0;
1277
1278 BT_DBG("%s %p", hdev->name, hdev);
1279
1280 hci_req_sync_lock(hdev);
1281
1282 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1283 ret = -ENODEV;
1284 goto done;
1285 }
1286
1287 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1288 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1289 /* Check for rfkill but allow the HCI setup stage to
1290 * proceed (which in itself doesn't cause any RF activity).
1291 */
1292 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1293 ret = -ERFKILL;
1294 goto done;
1295 }
1296
1297 /* Check for valid public address or a configured static
1298 * random adddress, but let the HCI setup proceed to
1299 * be able to determine if there is a public address
1300 * or not.
1301 *
1302 * In case of user channel usage, it is not important
1303 * if a public address or static random address is
1304 * available.
1305 *
1306 * This check is only valid for BR/EDR controllers
1307 * since AMP controllers do not have an address.
1308 */
1309 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1310 hdev->dev_type == HCI_BREDR &&
1311 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1312 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1313 ret = -EADDRNOTAVAIL;
1314 goto done;
1315 }
1316 }
1317
1318 if (test_bit(HCI_UP, &hdev->flags)) {
1319 ret = -EALREADY;
1320 goto done;
1321 }
1322
1323 if (hdev->open(hdev)) {
1324 ret = -EIO;
1325 goto done;
1326 }
1327
1328 set_bit(HCI_RUNNING, &hdev->flags);
1329 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1330
1331 atomic_set(&hdev->cmd_cnt, 1);
1332 set_bit(HCI_INIT, &hdev->flags);
1333
1334 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1335 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1336
1337 if (hdev->setup)
1338 ret = hdev->setup(hdev);
1339
1340 /* The transport driver can set these quirks before
1341 * creating the HCI device or in its setup callback.
1342 *
1343 * In case any of them is set, the controller has to
1344 * start up as unconfigured.
1345 */
1346 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1347 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1348 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1349
1350 /* For an unconfigured controller it is required to
1351 * read at least the version information provided by
1352 * the Read Local Version Information command.
1353 *
1354 * If the set_bdaddr driver callback is provided, then
1355 * also the original Bluetooth public device address
1356 * will be read using the Read BD Address command.
1357 */
1358 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1359 ret = __hci_unconf_init(hdev);
1360 }
1361
1362 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1363 /* If public address change is configured, ensure that
1364 * the address gets programmed. If the driver does not
1365 * support changing the public address, fail the power
1366 * on procedure.
1367 */
1368 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1369 hdev->set_bdaddr)
1370 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1371 else
1372 ret = -EADDRNOTAVAIL;
1373 }
1374
1375 if (!ret) {
1376 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1377 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1378 ret = __hci_init(hdev);
1379 if (!ret && hdev->post_init)
1380 ret = hdev->post_init(hdev);
1381 }
1382 }
1383
1384 /* If the HCI Reset command is clearing all diagnostic settings,
1385 * then they need to be reprogrammed after the init procedure
1386 * completed.
1387 */
1388 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1389 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1390 ret = hdev->set_diag(hdev, true);
1391
1392 clear_bit(HCI_INIT, &hdev->flags);
1393
1394 if (!ret) {
1395 hci_dev_hold(hdev);
1396 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1397 set_bit(HCI_UP, &hdev->flags);
1398 hci_sock_dev_event(hdev, HCI_DEV_UP);
1399 hci_leds_update_powered(hdev, true);
1400 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1401 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1402 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1403 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1404 hci_dev_test_flag(hdev, HCI_MGMT) &&
1405 hdev->dev_type == HCI_BREDR) {
1406 ret = __hci_req_hci_power_on(hdev);
1407 mgmt_power_on(hdev, ret);
1408 }
1409 } else {
1410 /* Init failed, cleanup */
1411 flush_work(&hdev->tx_work);
1412 flush_work(&hdev->cmd_work);
1413 flush_work(&hdev->rx_work);
1414
1415 skb_queue_purge(&hdev->cmd_q);
1416 skb_queue_purge(&hdev->rx_q);
1417
1418 if (hdev->flush)
1419 hdev->flush(hdev);
1420
1421 if (hdev->sent_cmd) {
1422 kfree_skb(hdev->sent_cmd);
1423 hdev->sent_cmd = NULL;
1424 }
1425
1426 clear_bit(HCI_RUNNING, &hdev->flags);
1427 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1428
1429 hdev->close(hdev);
1430 hdev->flags &= BIT(HCI_RAW);
1431 }
1432
1433done:
1434 hci_req_sync_unlock(hdev);
1435 return ret;
1436}
1437
1438/* ---- HCI ioctl helpers ---- */
1439
1440int hci_dev_open(__u16 dev)
1441{
1442 struct hci_dev *hdev;
1443 int err;
1444
1445 hdev = hci_dev_get(dev);
1446 if (!hdev)
1447 return -ENODEV;
1448
1449 /* Devices that are marked as unconfigured can only be powered
1450 * up as user channel. Trying to bring them up as normal devices
1451 * will result into a failure. Only user channel operation is
1452 * possible.
1453 *
1454 * When this function is called for a user channel, the flag
1455 * HCI_USER_CHANNEL will be set first before attempting to
1456 * open the device.
1457 */
1458 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1459 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1460 err = -EOPNOTSUPP;
1461 goto done;
1462 }
1463
1464 /* We need to ensure that no other power on/off work is pending
1465 * before proceeding to call hci_dev_do_open. This is
1466 * particularly important if the setup procedure has not yet
1467 * completed.
1468 */
1469 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1470 cancel_delayed_work(&hdev->power_off);
1471
1472 /* After this call it is guaranteed that the setup procedure
1473 * has finished. This means that error conditions like RFKILL
1474 * or no valid public or static random address apply.
1475 */
1476 flush_workqueue(hdev->req_workqueue);
1477
1478 /* For controllers not using the management interface and that
1479 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1480 * so that pairing works for them. Once the management interface
1481 * is in use this bit will be cleared again and userspace has
1482 * to explicitly enable it.
1483 */
1484 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1485 !hci_dev_test_flag(hdev, HCI_MGMT))
1486 hci_dev_set_flag(hdev, HCI_BONDABLE);
1487
1488 err = hci_dev_do_open(hdev);
1489
1490done:
1491 hci_dev_put(hdev);
1492 return err;
1493}
1494
1495/* This function requires the caller holds hdev->lock */
1496static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1497{
1498 struct hci_conn_params *p;
1499
1500 list_for_each_entry(p, &hdev->le_conn_params, list) {
1501 if (p->conn) {
1502 hci_conn_drop(p->conn);
1503 hci_conn_put(p->conn);
1504 p->conn = NULL;
1505 }
1506 list_del_init(&p->action);
1507 }
1508
1509 BT_DBG("All LE pending actions cleared");
1510}
1511
1512int hci_dev_do_close(struct hci_dev *hdev)
1513{
1514 bool auto_off;
1515
1516 BT_DBG("%s %p", hdev->name, hdev);
1517
1518 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1519 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1520 test_bit(HCI_UP, &hdev->flags)) {
1521 /* Execute vendor specific shutdown routine */
1522 if (hdev->shutdown)
1523 hdev->shutdown(hdev);
1524 }
1525
1526 cancel_delayed_work(&hdev->power_off);
1527
1528 hci_request_cancel_all(hdev);
1529 hci_req_sync_lock(hdev);
1530
1531 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1532 cancel_delayed_work_sync(&hdev->cmd_timer);
1533 hci_req_sync_unlock(hdev);
1534 return 0;
1535 }
1536
1537 hci_leds_update_powered(hdev, false);
1538
1539 /* Flush RX and TX works */
1540 flush_work(&hdev->tx_work);
1541 flush_work(&hdev->rx_work);
1542
1543 if (hdev->discov_timeout > 0) {
1544 hdev->discov_timeout = 0;
1545 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1547 }
1548
1549 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1550 cancel_delayed_work(&hdev->service_cache);
1551
1552 if (hci_dev_test_flag(hdev, HCI_MGMT))
1553 cancel_delayed_work_sync(&hdev->rpa_expired);
1554
1555 /* Avoid potential lockdep warnings from the *_flush() calls by
1556 * ensuring the workqueue is empty up front.
1557 */
1558 drain_workqueue(hdev->workqueue);
1559
1560 hci_dev_lock(hdev);
1561
1562 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1563
1564 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1565
1566 if (!auto_off && hdev->dev_type == HCI_BREDR &&
1567 hci_dev_test_flag(hdev, HCI_MGMT))
1568 __mgmt_power_off(hdev);
1569
1570 hci_inquiry_cache_flush(hdev);
1571 hci_pend_le_actions_clear(hdev);
1572 hci_conn_hash_flush(hdev);
1573 hci_dev_unlock(hdev);
1574
1575 smp_unregister(hdev);
1576
1577 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1578
1579 if (hdev->flush)
1580 hdev->flush(hdev);
1581
1582 /* Reset device */
1583 skb_queue_purge(&hdev->cmd_q);
1584 atomic_set(&hdev->cmd_cnt, 1);
1585 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1586 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1587 set_bit(HCI_INIT, &hdev->flags);
1588 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1589 clear_bit(HCI_INIT, &hdev->flags);
1590 }
1591
1592 /* flush cmd work */
1593 flush_work(&hdev->cmd_work);
1594
1595 /* Drop queues */
1596 skb_queue_purge(&hdev->rx_q);
1597 skb_queue_purge(&hdev->cmd_q);
1598 skb_queue_purge(&hdev->raw_q);
1599
1600 /* Drop last sent command */
1601 if (hdev->sent_cmd) {
1602 cancel_delayed_work_sync(&hdev->cmd_timer);
1603 kfree_skb(hdev->sent_cmd);
1604 hdev->sent_cmd = NULL;
1605 }
1606
1607 clear_bit(HCI_RUNNING, &hdev->flags);
1608 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1609
1610 /* After this point our queues are empty
1611 * and no tasks are scheduled. */
1612 hdev->close(hdev);
1613
1614 /* Clear flags */
1615 hdev->flags &= BIT(HCI_RAW);
1616 hci_dev_clear_volatile_flags(hdev);
1617
1618 /* Controller radio is available but is currently powered down */
1619 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1620
1621 memset(hdev->eir, 0, sizeof(hdev->eir));
1622 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1623 bacpy(&hdev->random_addr, BDADDR_ANY);
1624
1625 hci_req_sync_unlock(hdev);
1626
1627 hci_dev_put(hdev);
1628 return 0;
1629}
1630
1631int hci_dev_close(__u16 dev)
1632{
1633 struct hci_dev *hdev;
1634 int err;
1635
1636 hdev = hci_dev_get(dev);
1637 if (!hdev)
1638 return -ENODEV;
1639
1640 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1641 err = -EBUSY;
1642 goto done;
1643 }
1644
1645 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1646 cancel_delayed_work(&hdev->power_off);
1647
1648 err = hci_dev_do_close(hdev);
1649
1650done:
1651 hci_dev_put(hdev);
1652 return err;
1653}
1654
1655static int hci_dev_do_reset(struct hci_dev *hdev)
1656{
1657 int ret;
1658
1659 BT_DBG("%s %p", hdev->name, hdev);
1660
1661 hci_req_sync_lock(hdev);
1662
1663 /* Drop queues */
1664 skb_queue_purge(&hdev->rx_q);
1665 skb_queue_purge(&hdev->cmd_q);
1666
1667 /* Avoid potential lockdep warnings from the *_flush() calls by
1668 * ensuring the workqueue is empty up front.
1669 */
1670 drain_workqueue(hdev->workqueue);
1671
1672 hci_dev_lock(hdev);
1673 hci_inquiry_cache_flush(hdev);
1674 hci_conn_hash_flush(hdev);
1675 hci_dev_unlock(hdev);
1676
1677 if (hdev->flush)
1678 hdev->flush(hdev);
1679
1680 atomic_set(&hdev->cmd_cnt, 1);
1681 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1682
1683 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1684
1685 hci_req_sync_unlock(hdev);
1686 return ret;
1687}
1688
1689int hci_dev_reset(__u16 dev)
1690{
1691 struct hci_dev *hdev;
1692 int err;
1693
1694 hdev = hci_dev_get(dev);
1695 if (!hdev)
1696 return -ENODEV;
1697
1698 if (!test_bit(HCI_UP, &hdev->flags)) {
1699 err = -ENETDOWN;
1700 goto done;
1701 }
1702
1703 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1704 err = -EBUSY;
1705 goto done;
1706 }
1707
1708 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1709 err = -EOPNOTSUPP;
1710 goto done;
1711 }
1712
1713 err = hci_dev_do_reset(hdev);
1714
1715done:
1716 hci_dev_put(hdev);
1717 return err;
1718}
1719
1720int hci_dev_reset_stat(__u16 dev)
1721{
1722 struct hci_dev *hdev;
1723 int ret = 0;
1724
1725 hdev = hci_dev_get(dev);
1726 if (!hdev)
1727 return -ENODEV;
1728
1729 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1730 ret = -EBUSY;
1731 goto done;
1732 }
1733
1734 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1735 ret = -EOPNOTSUPP;
1736 goto done;
1737 }
1738
1739 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1740
1741done:
1742 hci_dev_put(hdev);
1743 return ret;
1744}
1745
1746static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1747{
1748 bool conn_changed, discov_changed;
1749
1750 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1751
1752 if ((scan & SCAN_PAGE))
1753 conn_changed = !hci_dev_test_and_set_flag(hdev,
1754 HCI_CONNECTABLE);
1755 else
1756 conn_changed = hci_dev_test_and_clear_flag(hdev,
1757 HCI_CONNECTABLE);
1758
1759 if ((scan & SCAN_INQUIRY)) {
1760 discov_changed = !hci_dev_test_and_set_flag(hdev,
1761 HCI_DISCOVERABLE);
1762 } else {
1763 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1764 discov_changed = hci_dev_test_and_clear_flag(hdev,
1765 HCI_DISCOVERABLE);
1766 }
1767
1768 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1769 return;
1770
1771 if (conn_changed || discov_changed) {
1772 /* In case this was disabled through mgmt */
1773 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1774
1775 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1776 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1777
1778 mgmt_new_settings(hdev);
1779 }
1780}
1781
1782int hci_dev_cmd(unsigned int cmd, void __user *arg)
1783{
1784 struct hci_dev *hdev;
1785 struct hci_dev_req dr;
1786 int err = 0;
1787
1788 if (copy_from_user(&dr, arg, sizeof(dr)))
1789 return -EFAULT;
1790
1791 hdev = hci_dev_get(dr.dev_id);
1792 if (!hdev)
1793 return -ENODEV;
1794
1795 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1796 err = -EBUSY;
1797 goto done;
1798 }
1799
1800 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1801 err = -EOPNOTSUPP;
1802 goto done;
1803 }
1804
1805 if (hdev->dev_type != HCI_BREDR) {
1806 err = -EOPNOTSUPP;
1807 goto done;
1808 }
1809
1810 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1811 err = -EOPNOTSUPP;
1812 goto done;
1813 }
1814
1815 switch (cmd) {
1816 case HCISETAUTH:
1817 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1818 HCI_INIT_TIMEOUT, NULL);
1819 break;
1820
1821 case HCISETENCRYPT:
1822 if (!lmp_encrypt_capable(hdev)) {
1823 err = -EOPNOTSUPP;
1824 break;
1825 }
1826
1827 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1828 /* Auth must be enabled first */
1829 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1830 HCI_INIT_TIMEOUT, NULL);
1831 if (err)
1832 break;
1833 }
1834
1835 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1836 HCI_INIT_TIMEOUT, NULL);
1837 break;
1838
1839 case HCISETSCAN:
1840 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1841 HCI_INIT_TIMEOUT, NULL);
1842
1843 /* Ensure that the connectable and discoverable states
1844 * get correctly modified as this was a non-mgmt change.
1845 */
1846 if (!err)
1847 hci_update_scan_state(hdev, dr.dev_opt);
1848 break;
1849
1850 case HCISETLINKPOL:
1851 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1852 HCI_INIT_TIMEOUT, NULL);
1853 break;
1854
1855 case HCISETLINKMODE:
1856 hdev->link_mode = ((__u16) dr.dev_opt) &
1857 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1858 break;
1859
1860 case HCISETPTYPE:
1861 hdev->pkt_type = (__u16) dr.dev_opt;
1862 break;
1863
1864 case HCISETACLMTU:
1865 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1866 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1867 break;
1868
1869 case HCISETSCOMTU:
1870 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1871 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1872 break;
1873
1874 default:
1875 err = -EINVAL;
1876 break;
1877 }
1878
1879done:
1880 hci_dev_put(hdev);
1881 return err;
1882}
1883
1884int hci_get_dev_list(void __user *arg)
1885{
1886 struct hci_dev *hdev;
1887 struct hci_dev_list_req *dl;
1888 struct hci_dev_req *dr;
1889 int n = 0, size, err;
1890 __u16 dev_num;
1891
1892 if (get_user(dev_num, (__u16 __user *) arg))
1893 return -EFAULT;
1894
1895 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1896 return -EINVAL;
1897
1898 size = sizeof(*dl) + dev_num * sizeof(*dr);
1899
1900 dl = kzalloc(size, GFP_KERNEL);
1901 if (!dl)
1902 return -ENOMEM;
1903
1904 dr = dl->dev_req;
1905
1906 read_lock(&hci_dev_list_lock);
1907 list_for_each_entry(hdev, &hci_dev_list, list) {
1908 unsigned long flags = hdev->flags;
1909
1910 /* When the auto-off is configured it means the transport
1911 * is running, but in that case still indicate that the
1912 * device is actually down.
1913 */
1914 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1915 flags &= ~BIT(HCI_UP);
1916
1917 (dr + n)->dev_id = hdev->id;
1918 (dr + n)->dev_opt = flags;
1919
1920 if (++n >= dev_num)
1921 break;
1922 }
1923 read_unlock(&hci_dev_list_lock);
1924
1925 dl->dev_num = n;
1926 size = sizeof(*dl) + n * sizeof(*dr);
1927
1928 err = copy_to_user(arg, dl, size);
1929 kfree(dl);
1930
1931 return err ? -EFAULT : 0;
1932}
1933
1934int hci_get_dev_info(void __user *arg)
1935{
1936 struct hci_dev *hdev;
1937 struct hci_dev_info di;
1938 unsigned long flags;
1939 int err = 0;
1940
1941 if (copy_from_user(&di, arg, sizeof(di)))
1942 return -EFAULT;
1943
1944 hdev = hci_dev_get(di.dev_id);
1945 if (!hdev)
1946 return -ENODEV;
1947
1948 /* When the auto-off is configured it means the transport
1949 * is running, but in that case still indicate that the
1950 * device is actually down.
1951 */
1952 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1953 flags = hdev->flags & ~BIT(HCI_UP);
1954 else
1955 flags = hdev->flags;
1956
1957 strcpy(di.name, hdev->name);
1958 di.bdaddr = hdev->bdaddr;
1959 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1960 di.flags = flags;
1961 di.pkt_type = hdev->pkt_type;
1962 if (lmp_bredr_capable(hdev)) {
1963 di.acl_mtu = hdev->acl_mtu;
1964 di.acl_pkts = hdev->acl_pkts;
1965 di.sco_mtu = hdev->sco_mtu;
1966 di.sco_pkts = hdev->sco_pkts;
1967 } else {
1968 di.acl_mtu = hdev->le_mtu;
1969 di.acl_pkts = hdev->le_pkts;
1970 di.sco_mtu = 0;
1971 di.sco_pkts = 0;
1972 }
1973 di.link_policy = hdev->link_policy;
1974 di.link_mode = hdev->link_mode;
1975
1976 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1977 memcpy(&di.features, &hdev->features, sizeof(di.features));
1978
1979 if (copy_to_user(arg, &di, sizeof(di)))
1980 err = -EFAULT;
1981
1982 hci_dev_put(hdev);
1983
1984 return err;
1985}
1986
1987/* ---- Interface to HCI drivers ---- */
1988
1989static int hci_rfkill_set_block(void *data, bool blocked)
1990{
1991 struct hci_dev *hdev = data;
1992
1993 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1994
1995 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1996 return -EBUSY;
1997
1998 if (blocked) {
1999 hci_dev_set_flag(hdev, HCI_RFKILLED);
2000 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2001 !hci_dev_test_flag(hdev, HCI_CONFIG))
2002 hci_dev_do_close(hdev);
2003 } else {
2004 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2005 }
2006
2007 return 0;
2008}
2009
2010static const struct rfkill_ops hci_rfkill_ops = {
2011 .set_block = hci_rfkill_set_block,
2012};
2013
2014static void hci_power_on(struct work_struct *work)
2015{
2016 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2017 int err;
2018
2019 BT_DBG("%s", hdev->name);
2020
2021 if (test_bit(HCI_UP, &hdev->flags) &&
2022 hci_dev_test_flag(hdev, HCI_MGMT) &&
2023 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2024 cancel_delayed_work(&hdev->power_off);
2025 hci_req_sync_lock(hdev);
2026 err = __hci_req_hci_power_on(hdev);
2027 hci_req_sync_unlock(hdev);
2028 mgmt_power_on(hdev, err);
2029 return;
2030 }
2031
2032 err = hci_dev_do_open(hdev);
2033 if (err < 0) {
2034 hci_dev_lock(hdev);
2035 mgmt_set_powered_failed(hdev, err);
2036 hci_dev_unlock(hdev);
2037 return;
2038 }
2039
2040 /* During the HCI setup phase, a few error conditions are
2041 * ignored and they need to be checked now. If they are still
2042 * valid, it is important to turn the device back off.
2043 */
2044 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2045 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2046 (hdev->dev_type == HCI_BREDR &&
2047 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2048 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2049 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2050 hci_dev_do_close(hdev);
2051 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2052 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2053 HCI_AUTO_OFF_TIMEOUT);
2054 }
2055
2056 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2057 /* For unconfigured devices, set the HCI_RAW flag
2058 * so that userspace can easily identify them.
2059 */
2060 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2061 set_bit(HCI_RAW, &hdev->flags);
2062
2063 /* For fully configured devices, this will send
2064 * the Index Added event. For unconfigured devices,
2065 * it will send Unconfigued Index Added event.
2066 *
2067 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2068 * and no event will be send.
2069 */
2070 mgmt_index_added(hdev);
2071 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2072 /* When the controller is now configured, then it
2073 * is important to clear the HCI_RAW flag.
2074 */
2075 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2076 clear_bit(HCI_RAW, &hdev->flags);
2077
2078 /* Powering on the controller with HCI_CONFIG set only
2079 * happens with the transition from unconfigured to
2080 * configured. This will send the Index Added event.
2081 */
2082 mgmt_index_added(hdev);
2083 }
2084}
2085
2086static void hci_power_off(struct work_struct *work)
2087{
2088 struct hci_dev *hdev = container_of(work, struct hci_dev,
2089 power_off.work);
2090
2091 BT_DBG("%s", hdev->name);
2092
2093 hci_dev_do_close(hdev);
2094}
2095
2096static void hci_error_reset(struct work_struct *work)
2097{
2098 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2099
2100 BT_DBG("%s", hdev->name);
2101
2102 if (hdev->hw_error)
2103 hdev->hw_error(hdev, hdev->hw_error_code);
2104 else
2105 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2106 hdev->hw_error_code);
2107
2108 if (hci_dev_do_close(hdev))
2109 return;
2110
2111 hci_dev_do_open(hdev);
2112}
2113
2114void hci_uuids_clear(struct hci_dev *hdev)
2115{
2116 struct bt_uuid *uuid, *tmp;
2117
2118 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2119 list_del(&uuid->list);
2120 kfree(uuid);
2121 }
2122}
2123
2124void hci_link_keys_clear(struct hci_dev *hdev)
2125{
2126 struct link_key *key;
2127
2128 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2129 list_del_rcu(&key->list);
2130 kfree_rcu(key, rcu);
2131 }
2132}
2133
2134void hci_smp_ltks_clear(struct hci_dev *hdev)
2135{
2136 struct smp_ltk *k;
2137
2138 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2139 list_del_rcu(&k->list);
2140 kfree_rcu(k, rcu);
2141 }
2142}
2143
2144void hci_smp_irks_clear(struct hci_dev *hdev)
2145{
2146 struct smp_irk *k;
2147
2148 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2149 list_del_rcu(&k->list);
2150 kfree_rcu(k, rcu);
2151 }
2152}
2153
2154struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2155{
2156 struct link_key *k;
2157
2158 rcu_read_lock();
2159 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2160 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2161 rcu_read_unlock();
2162 return k;
2163 }
2164 }
2165 rcu_read_unlock();
2166
2167 return NULL;
2168}
2169
2170static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2171 u8 key_type, u8 old_key_type)
2172{
2173 /* Legacy key */
2174 if (key_type < 0x03)
2175 return true;
2176
2177 /* Debug keys are insecure so don't store them persistently */
2178 if (key_type == HCI_LK_DEBUG_COMBINATION)
2179 return false;
2180
2181 /* Changed combination key and there's no previous one */
2182 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2183 return false;
2184
2185 /* Security mode 3 case */
2186 if (!conn)
2187 return true;
2188
2189 /* BR/EDR key derived using SC from an LE link */
2190 if (conn->type == LE_LINK)
2191 return true;
2192
2193 /* Neither local nor remote side had no-bonding as requirement */
2194 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2195 return true;
2196
2197 /* Local side had dedicated bonding as requirement */
2198 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2199 return true;
2200
2201 /* Remote side had dedicated bonding as requirement */
2202 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2203 return true;
2204
2205 /* If none of the above criteria match, then don't store the key
2206 * persistently */
2207 return false;
2208}
2209
2210static u8 ltk_role(u8 type)
2211{
2212 if (type == SMP_LTK)
2213 return HCI_ROLE_MASTER;
2214
2215 return HCI_ROLE_SLAVE;
2216}
2217
2218struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2219 u8 addr_type, u8 role)
2220{
2221 struct smp_ltk *k;
2222
2223 rcu_read_lock();
2224 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2225 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2226 continue;
2227
2228 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2229 rcu_read_unlock();
2230 return k;
2231 }
2232 }
2233 rcu_read_unlock();
2234
2235 return NULL;
2236}
2237
2238struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2239{
2240 struct smp_irk *irk;
2241
2242 rcu_read_lock();
2243 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2244 if (!bacmp(&irk->rpa, rpa)) {
2245 rcu_read_unlock();
2246 return irk;
2247 }
2248 }
2249
2250 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2251 if (smp_irk_matches(hdev, irk->val, rpa)) {
2252 bacpy(&irk->rpa, rpa);
2253 rcu_read_unlock();
2254 return irk;
2255 }
2256 }
2257 rcu_read_unlock();
2258
2259 return NULL;
2260}
2261
2262struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2263 u8 addr_type)
2264{
2265 struct smp_irk *irk;
2266
2267 /* Identity Address must be public or static random */
2268 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2269 return NULL;
2270
2271 rcu_read_lock();
2272 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2273 if (addr_type == irk->addr_type &&
2274 bacmp(bdaddr, &irk->bdaddr) == 0) {
2275 rcu_read_unlock();
2276 return irk;
2277 }
2278 }
2279 rcu_read_unlock();
2280
2281 return NULL;
2282}
2283
2284struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2285 bdaddr_t *bdaddr, u8 *val, u8 type,
2286 u8 pin_len, bool *persistent)
2287{
2288 struct link_key *key, *old_key;
2289 u8 old_key_type;
2290
2291 old_key = hci_find_link_key(hdev, bdaddr);
2292 if (old_key) {
2293 old_key_type = old_key->type;
2294 key = old_key;
2295 } else {
2296 old_key_type = conn ? conn->key_type : 0xff;
2297 key = kzalloc(sizeof(*key), GFP_KERNEL);
2298 if (!key)
2299 return NULL;
2300 list_add_rcu(&key->list, &hdev->link_keys);
2301 }
2302
2303 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2304
2305 /* Some buggy controller combinations generate a changed
2306 * combination key for legacy pairing even when there's no
2307 * previous key */
2308 if (type == HCI_LK_CHANGED_COMBINATION &&
2309 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2310 type = HCI_LK_COMBINATION;
2311 if (conn)
2312 conn->key_type = type;
2313 }
2314
2315 bacpy(&key->bdaddr, bdaddr);
2316 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2317 key->pin_len = pin_len;
2318
2319 if (type == HCI_LK_CHANGED_COMBINATION)
2320 key->type = old_key_type;
2321 else
2322 key->type = type;
2323
2324 if (persistent)
2325 *persistent = hci_persistent_key(hdev, conn, type,
2326 old_key_type);
2327
2328 return key;
2329}
2330
2331struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2332 u8 addr_type, u8 type, u8 authenticated,
2333 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2334{
2335 struct smp_ltk *key, *old_key;
2336 u8 role = ltk_role(type);
2337
2338 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2339 if (old_key)
2340 key = old_key;
2341 else {
2342 key = kzalloc(sizeof(*key), GFP_KERNEL);
2343 if (!key)
2344 return NULL;
2345 list_add_rcu(&key->list, &hdev->long_term_keys);
2346 }
2347
2348 bacpy(&key->bdaddr, bdaddr);
2349 key->bdaddr_type = addr_type;
2350 memcpy(key->val, tk, sizeof(key->val));
2351 key->authenticated = authenticated;
2352 key->ediv = ediv;
2353 key->rand = rand;
2354 key->enc_size = enc_size;
2355 key->type = type;
2356
2357 return key;
2358}
2359
2360struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2361 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2362{
2363 struct smp_irk *irk;
2364
2365 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2366 if (!irk) {
2367 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2368 if (!irk)
2369 return NULL;
2370
2371 bacpy(&irk->bdaddr, bdaddr);
2372 irk->addr_type = addr_type;
2373
2374 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2375 }
2376
2377 memcpy(irk->val, val, 16);
2378 bacpy(&irk->rpa, rpa);
2379
2380 return irk;
2381}
2382
2383int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2384{
2385 struct link_key *key;
2386
2387 key = hci_find_link_key(hdev, bdaddr);
2388 if (!key)
2389 return -ENOENT;
2390
2391 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2392
2393 list_del_rcu(&key->list);
2394 kfree_rcu(key, rcu);
2395
2396 return 0;
2397}
2398
2399int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2400{
2401 struct smp_ltk *k;
2402 int removed = 0;
2403
2404 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2405 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2406 continue;
2407
2408 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2409
2410 list_del_rcu(&k->list);
2411 kfree_rcu(k, rcu);
2412 removed++;
2413 }
2414
2415 return removed ? 0 : -ENOENT;
2416}
2417
2418void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2419{
2420 struct smp_irk *k;
2421
2422 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2423 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2424 continue;
2425
2426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2427
2428 list_del_rcu(&k->list);
2429 kfree_rcu(k, rcu);
2430 }
2431}
2432
2433bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2434{
2435 struct smp_ltk *k;
2436 struct smp_irk *irk;
2437 u8 addr_type;
2438
2439 if (type == BDADDR_BREDR) {
2440 if (hci_find_link_key(hdev, bdaddr))
2441 return true;
2442 return false;
2443 }
2444
2445 /* Convert to HCI addr type which struct smp_ltk uses */
2446 if (type == BDADDR_LE_PUBLIC)
2447 addr_type = ADDR_LE_DEV_PUBLIC;
2448 else
2449 addr_type = ADDR_LE_DEV_RANDOM;
2450
2451 irk = hci_get_irk(hdev, bdaddr, addr_type);
2452 if (irk) {
2453 bdaddr = &irk->bdaddr;
2454 addr_type = irk->addr_type;
2455 }
2456
2457 rcu_read_lock();
2458 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2459 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2460 rcu_read_unlock();
2461 return true;
2462 }
2463 }
2464 rcu_read_unlock();
2465
2466 return false;
2467}
2468
2469/* HCI command timer function */
2470static void hci_cmd_timeout(struct work_struct *work)
2471{
2472 struct hci_dev *hdev = container_of(work, struct hci_dev,
2473 cmd_timer.work);
2474
2475 if (hdev->sent_cmd) {
2476 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2477 u16 opcode = __le16_to_cpu(sent->opcode);
2478
2479 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2480 } else {
2481 BT_ERR("%s command tx timeout", hdev->name);
2482 }
2483
2484 atomic_set(&hdev->cmd_cnt, 1);
2485 queue_work(hdev->workqueue, &hdev->cmd_work);
2486}
2487
2488struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2489 bdaddr_t *bdaddr, u8 bdaddr_type)
2490{
2491 struct oob_data *data;
2492
2493 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2494 if (bacmp(bdaddr, &data->bdaddr) != 0)
2495 continue;
2496 if (data->bdaddr_type != bdaddr_type)
2497 continue;
2498 return data;
2499 }
2500
2501 return NULL;
2502}
2503
2504int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2505 u8 bdaddr_type)
2506{
2507 struct oob_data *data;
2508
2509 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2510 if (!data)
2511 return -ENOENT;
2512
2513 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2514
2515 list_del(&data->list);
2516 kfree(data);
2517
2518 return 0;
2519}
2520
2521void hci_remote_oob_data_clear(struct hci_dev *hdev)
2522{
2523 struct oob_data *data, *n;
2524
2525 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2526 list_del(&data->list);
2527 kfree(data);
2528 }
2529}
2530
2531int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2532 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2533 u8 *hash256, u8 *rand256)
2534{
2535 struct oob_data *data;
2536
2537 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2538 if (!data) {
2539 data = kmalloc(sizeof(*data), GFP_KERNEL);
2540 if (!data)
2541 return -ENOMEM;
2542
2543 bacpy(&data->bdaddr, bdaddr);
2544 data->bdaddr_type = bdaddr_type;
2545 list_add(&data->list, &hdev->remote_oob_data);
2546 }
2547
2548 if (hash192 && rand192) {
2549 memcpy(data->hash192, hash192, sizeof(data->hash192));
2550 memcpy(data->rand192, rand192, sizeof(data->rand192));
2551 if (hash256 && rand256)
2552 data->present = 0x03;
2553 } else {
2554 memset(data->hash192, 0, sizeof(data->hash192));
2555 memset(data->rand192, 0, sizeof(data->rand192));
2556 if (hash256 && rand256)
2557 data->present = 0x02;
2558 else
2559 data->present = 0x00;
2560 }
2561
2562 if (hash256 && rand256) {
2563 memcpy(data->hash256, hash256, sizeof(data->hash256));
2564 memcpy(data->rand256, rand256, sizeof(data->rand256));
2565 } else {
2566 memset(data->hash256, 0, sizeof(data->hash256));
2567 memset(data->rand256, 0, sizeof(data->rand256));
2568 if (hash192 && rand192)
2569 data->present = 0x01;
2570 }
2571
2572 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2573
2574 return 0;
2575}
2576
2577/* This function requires the caller holds hdev->lock */
2578struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2579{
2580 struct adv_info *adv_instance;
2581
2582 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2583 if (adv_instance->instance == instance)
2584 return adv_instance;
2585 }
2586
2587 return NULL;
2588}
2589
2590/* This function requires the caller holds hdev->lock */
2591struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2592{
2593 struct adv_info *cur_instance;
2594
2595 cur_instance = hci_find_adv_instance(hdev, instance);
2596 if (!cur_instance)
2597 return NULL;
2598
2599 if (cur_instance == list_last_entry(&hdev->adv_instances,
2600 struct adv_info, list))
2601 return list_first_entry(&hdev->adv_instances,
2602 struct adv_info, list);
2603 else
2604 return list_next_entry(cur_instance, list);
2605}
2606
2607/* This function requires the caller holds hdev->lock */
2608int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2609{
2610 struct adv_info *adv_instance;
2611
2612 adv_instance = hci_find_adv_instance(hdev, instance);
2613 if (!adv_instance)
2614 return -ENOENT;
2615
2616 BT_DBG("%s removing %dMR", hdev->name, instance);
2617
2618 if (hdev->cur_adv_instance == instance) {
2619 if (hdev->adv_instance_timeout) {
2620 cancel_delayed_work(&hdev->adv_instance_expire);
2621 hdev->adv_instance_timeout = 0;
2622 }
2623 hdev->cur_adv_instance = 0x00;
2624 }
2625
2626 list_del(&adv_instance->list);
2627 kfree(adv_instance);
2628
2629 hdev->adv_instance_cnt--;
2630
2631 return 0;
2632}
2633
2634/* This function requires the caller holds hdev->lock */
2635void hci_adv_instances_clear(struct hci_dev *hdev)
2636{
2637 struct adv_info *adv_instance, *n;
2638
2639 if (hdev->adv_instance_timeout) {
2640 cancel_delayed_work(&hdev->adv_instance_expire);
2641 hdev->adv_instance_timeout = 0;
2642 }
2643
2644 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2645 list_del(&adv_instance->list);
2646 kfree(adv_instance);
2647 }
2648
2649 hdev->adv_instance_cnt = 0;
2650 hdev->cur_adv_instance = 0x00;
2651}
2652
2653/* This function requires the caller holds hdev->lock */
2654int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2655 u16 adv_data_len, u8 *adv_data,
2656 u16 scan_rsp_len, u8 *scan_rsp_data,
2657 u16 timeout, u16 duration)
2658{
2659 struct adv_info *adv_instance;
2660
2661 adv_instance = hci_find_adv_instance(hdev, instance);
2662 if (adv_instance) {
2663 memset(adv_instance->adv_data, 0,
2664 sizeof(adv_instance->adv_data));
2665 memset(adv_instance->scan_rsp_data, 0,
2666 sizeof(adv_instance->scan_rsp_data));
2667 } else {
2668 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2669 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2670 return -EOVERFLOW;
2671
2672 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2673 if (!adv_instance)
2674 return -ENOMEM;
2675
2676 adv_instance->pending = true;
2677 adv_instance->instance = instance;
2678 list_add(&adv_instance->list, &hdev->adv_instances);
2679 hdev->adv_instance_cnt++;
2680 }
2681
2682 adv_instance->flags = flags;
2683 adv_instance->adv_data_len = adv_data_len;
2684 adv_instance->scan_rsp_len = scan_rsp_len;
2685
2686 if (adv_data_len)
2687 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2688
2689 if (scan_rsp_len)
2690 memcpy(adv_instance->scan_rsp_data,
2691 scan_rsp_data, scan_rsp_len);
2692
2693 adv_instance->timeout = timeout;
2694 adv_instance->remaining_time = timeout;
2695
2696 if (duration == 0)
2697 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2698 else
2699 adv_instance->duration = duration;
2700
2701 BT_DBG("%s for %dMR", hdev->name, instance);
2702
2703 return 0;
2704}
2705
2706struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2707 bdaddr_t *bdaddr, u8 type)
2708{
2709 struct bdaddr_list *b;
2710
2711 list_for_each_entry(b, bdaddr_list, list) {
2712 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2713 return b;
2714 }
2715
2716 return NULL;
2717}
2718
2719void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2720{
2721 struct bdaddr_list *b, *n;
2722
2723 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2724 list_del(&b->list);
2725 kfree(b);
2726 }
2727}
2728
2729int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2730{
2731 struct bdaddr_list *entry;
2732
2733 if (!bacmp(bdaddr, BDADDR_ANY))
2734 return -EBADF;
2735
2736 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2737 return -EEXIST;
2738
2739 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2740 if (!entry)
2741 return -ENOMEM;
2742
2743 bacpy(&entry->bdaddr, bdaddr);
2744 entry->bdaddr_type = type;
2745
2746 list_add(&entry->list, list);
2747
2748 return 0;
2749}
2750
2751int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2752{
2753 struct bdaddr_list *entry;
2754
2755 if (!bacmp(bdaddr, BDADDR_ANY)) {
2756 hci_bdaddr_list_clear(list);
2757 return 0;
2758 }
2759
2760 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2761 if (!entry)
2762 return -ENOENT;
2763
2764 list_del(&entry->list);
2765 kfree(entry);
2766
2767 return 0;
2768}
2769
2770/* This function requires the caller holds hdev->lock */
2771struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2772 bdaddr_t *addr, u8 addr_type)
2773{
2774 struct hci_conn_params *params;
2775
2776 list_for_each_entry(params, &hdev->le_conn_params, list) {
2777 if (bacmp(¶ms->addr, addr) == 0 &&
2778 params->addr_type == addr_type) {
2779 return params;
2780 }
2781 }
2782
2783 return NULL;
2784}
2785
2786/* This function requires the caller holds hdev->lock */
2787struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2788 bdaddr_t *addr, u8 addr_type)
2789{
2790 struct hci_conn_params *param;
2791
2792 list_for_each_entry(param, list, action) {
2793 if (bacmp(¶m->addr, addr) == 0 &&
2794 param->addr_type == addr_type)
2795 return param;
2796 }
2797
2798 return NULL;
2799}
2800
2801/* This function requires the caller holds hdev->lock */
2802struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2803 bdaddr_t *addr, u8 addr_type)
2804{
2805 struct hci_conn_params *params;
2806
2807 params = hci_conn_params_lookup(hdev, addr, addr_type);
2808 if (params)
2809 return params;
2810
2811 params = kzalloc(sizeof(*params), GFP_KERNEL);
2812 if (!params) {
2813 BT_ERR("Out of memory");
2814 return NULL;
2815 }
2816
2817 bacpy(¶ms->addr, addr);
2818 params->addr_type = addr_type;
2819
2820 list_add(¶ms->list, &hdev->le_conn_params);
2821 INIT_LIST_HEAD(¶ms->action);
2822
2823 params->conn_min_interval = hdev->le_conn_min_interval;
2824 params->conn_max_interval = hdev->le_conn_max_interval;
2825 params->conn_latency = hdev->le_conn_latency;
2826 params->supervision_timeout = hdev->le_supv_timeout;
2827 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2828
2829 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2830
2831 return params;
2832}
2833
2834static void hci_conn_params_free(struct hci_conn_params *params)
2835{
2836 if (params->conn) {
2837 hci_conn_drop(params->conn);
2838 hci_conn_put(params->conn);
2839 }
2840
2841 list_del(¶ms->action);
2842 list_del(¶ms->list);
2843 kfree(params);
2844}
2845
2846/* This function requires the caller holds hdev->lock */
2847void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2848{
2849 struct hci_conn_params *params;
2850
2851 params = hci_conn_params_lookup(hdev, addr, addr_type);
2852 if (!params)
2853 return;
2854
2855 hci_conn_params_free(params);
2856
2857 hci_update_background_scan(hdev);
2858
2859 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2860}
2861
2862/* This function requires the caller holds hdev->lock */
2863void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2864{
2865 struct hci_conn_params *params, *tmp;
2866
2867 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2868 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2869 continue;
2870
2871 /* If trying to estabilish one time connection to disabled
2872 * device, leave the params, but mark them as just once.
2873 */
2874 if (params->explicit_connect) {
2875 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2876 continue;
2877 }
2878
2879 list_del(¶ms->list);
2880 kfree(params);
2881 }
2882
2883 BT_DBG("All LE disabled connection parameters were removed");
2884}
2885
2886/* This function requires the caller holds hdev->lock */
2887static void hci_conn_params_clear_all(struct hci_dev *hdev)
2888{
2889 struct hci_conn_params *params, *tmp;
2890
2891 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2892 hci_conn_params_free(params);
2893
2894 BT_DBG("All LE connection parameters were removed");
2895}
2896
2897/* Copy the Identity Address of the controller.
2898 *
2899 * If the controller has a public BD_ADDR, then by default use that one.
2900 * If this is a LE only controller without a public address, default to
2901 * the static random address.
2902 *
2903 * For debugging purposes it is possible to force controllers with a
2904 * public address to use the static random address instead.
2905 *
2906 * In case BR/EDR has been disabled on a dual-mode controller and
2907 * userspace has configured a static address, then that address
2908 * becomes the identity address instead of the public BR/EDR address.
2909 */
2910void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2911 u8 *bdaddr_type)
2912{
2913 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2914 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2915 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2916 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2917 bacpy(bdaddr, &hdev->static_addr);
2918 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2919 } else {
2920 bacpy(bdaddr, &hdev->bdaddr);
2921 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2922 }
2923}
2924
2925/* Alloc HCI device */
2926struct hci_dev *hci_alloc_dev(void)
2927{
2928 struct hci_dev *hdev;
2929
2930 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2931 if (!hdev)
2932 return NULL;
2933
2934 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2935 hdev->esco_type = (ESCO_HV1);
2936 hdev->link_mode = (HCI_LM_ACCEPT);
2937 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2938 hdev->io_capability = 0x03; /* No Input No Output */
2939 hdev->manufacturer = 0xffff; /* Default to internal use */
2940 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2941 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2942 hdev->adv_instance_cnt = 0;
2943 hdev->cur_adv_instance = 0x00;
2944 hdev->adv_instance_timeout = 0;
2945
2946 hdev->sniff_max_interval = 800;
2947 hdev->sniff_min_interval = 80;
2948
2949 hdev->le_adv_channel_map = 0x07;
2950 hdev->le_adv_min_interval = 0x0800;
2951 hdev->le_adv_max_interval = 0x0800;
2952 hdev->le_scan_interval = 0x0060;
2953 hdev->le_scan_window = 0x0030;
2954 hdev->le_conn_min_interval = 0x0028;
2955 hdev->le_conn_max_interval = 0x0038;
2956 hdev->le_conn_latency = 0x0000;
2957 hdev->le_supv_timeout = 0x002a;
2958 hdev->le_def_tx_len = 0x001b;
2959 hdev->le_def_tx_time = 0x0148;
2960 hdev->le_max_tx_len = 0x001b;
2961 hdev->le_max_tx_time = 0x0148;
2962 hdev->le_max_rx_len = 0x001b;
2963 hdev->le_max_rx_time = 0x0148;
2964
2965 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2966 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2967 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2968 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2969
2970 mutex_init(&hdev->lock);
2971 mutex_init(&hdev->req_lock);
2972
2973 INIT_LIST_HEAD(&hdev->mgmt_pending);
2974 INIT_LIST_HEAD(&hdev->blacklist);
2975 INIT_LIST_HEAD(&hdev->whitelist);
2976 INIT_LIST_HEAD(&hdev->uuids);
2977 INIT_LIST_HEAD(&hdev->link_keys);
2978 INIT_LIST_HEAD(&hdev->long_term_keys);
2979 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2980 INIT_LIST_HEAD(&hdev->remote_oob_data);
2981 INIT_LIST_HEAD(&hdev->le_white_list);
2982 INIT_LIST_HEAD(&hdev->le_conn_params);
2983 INIT_LIST_HEAD(&hdev->pend_le_conns);
2984 INIT_LIST_HEAD(&hdev->pend_le_reports);
2985 INIT_LIST_HEAD(&hdev->conn_hash.list);
2986 INIT_LIST_HEAD(&hdev->adv_instances);
2987
2988 INIT_WORK(&hdev->rx_work, hci_rx_work);
2989 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2990 INIT_WORK(&hdev->tx_work, hci_tx_work);
2991 INIT_WORK(&hdev->power_on, hci_power_on);
2992 INIT_WORK(&hdev->error_reset, hci_error_reset);
2993
2994 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2995
2996 skb_queue_head_init(&hdev->rx_q);
2997 skb_queue_head_init(&hdev->cmd_q);
2998 skb_queue_head_init(&hdev->raw_q);
2999
3000 init_waitqueue_head(&hdev->req_wait_q);
3001
3002 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3003
3004 hci_request_setup(hdev);
3005
3006 hci_init_sysfs(hdev);
3007 discovery_init(hdev);
3008
3009 return hdev;
3010}
3011EXPORT_SYMBOL(hci_alloc_dev);
3012
3013/* Free HCI device */
3014void hci_free_dev(struct hci_dev *hdev)
3015{
3016 /* will free via device release */
3017 put_device(&hdev->dev);
3018}
3019EXPORT_SYMBOL(hci_free_dev);
3020
3021/* Register HCI device */
3022int hci_register_dev(struct hci_dev *hdev)
3023{
3024 int id, error;
3025
3026 if (!hdev->open || !hdev->close || !hdev->send)
3027 return -EINVAL;
3028
3029 /* Do not allow HCI_AMP devices to register at index 0,
3030 * so the index can be used as the AMP controller ID.
3031 */
3032 switch (hdev->dev_type) {
3033 case HCI_BREDR:
3034 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3035 break;
3036 case HCI_AMP:
3037 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3038 break;
3039 default:
3040 return -EINVAL;
3041 }
3042
3043 if (id < 0)
3044 return id;
3045
3046 sprintf(hdev->name, "hci%d", id);
3047 hdev->id = id;
3048
3049 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3050
3051 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3052 WQ_MEM_RECLAIM, 1, hdev->name);
3053 if (!hdev->workqueue) {
3054 error = -ENOMEM;
3055 goto err;
3056 }
3057
3058 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3059 WQ_MEM_RECLAIM, 1, hdev->name);
3060 if (!hdev->req_workqueue) {
3061 destroy_workqueue(hdev->workqueue);
3062 error = -ENOMEM;
3063 goto err;
3064 }
3065
3066 if (!IS_ERR_OR_NULL(bt_debugfs))
3067 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3068
3069 dev_set_name(&hdev->dev, "%s", hdev->name);
3070
3071 error = device_add(&hdev->dev);
3072 if (error < 0)
3073 goto err_wqueue;
3074
3075 hci_leds_init(hdev);
3076
3077 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3078 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3079 hdev);
3080 if (hdev->rfkill) {
3081 if (rfkill_register(hdev->rfkill) < 0) {
3082 rfkill_destroy(hdev->rfkill);
3083 hdev->rfkill = NULL;
3084 }
3085 }
3086
3087 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3088 hci_dev_set_flag(hdev, HCI_RFKILLED);
3089
3090 hci_dev_set_flag(hdev, HCI_SETUP);
3091 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3092
3093 if (hdev->dev_type == HCI_BREDR) {
3094 /* Assume BR/EDR support until proven otherwise (such as
3095 * through reading supported features during init.
3096 */
3097 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3098 }
3099
3100 write_lock(&hci_dev_list_lock);
3101 list_add(&hdev->list, &hci_dev_list);
3102 write_unlock(&hci_dev_list_lock);
3103
3104 /* Devices that are marked for raw-only usage are unconfigured
3105 * and should not be included in normal operation.
3106 */
3107 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3108 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3109
3110 hci_sock_dev_event(hdev, HCI_DEV_REG);
3111 hci_dev_hold(hdev);
3112
3113 queue_work(hdev->req_workqueue, &hdev->power_on);
3114
3115 return id;
3116
3117err_wqueue:
3118 destroy_workqueue(hdev->workqueue);
3119 destroy_workqueue(hdev->req_workqueue);
3120err:
3121 ida_simple_remove(&hci_index_ida, hdev->id);
3122
3123 return error;
3124}
3125EXPORT_SYMBOL(hci_register_dev);
3126
3127/* Unregister HCI device */
3128void hci_unregister_dev(struct hci_dev *hdev)
3129{
3130 int id;
3131
3132 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3133
3134 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3135
3136 id = hdev->id;
3137
3138 write_lock(&hci_dev_list_lock);
3139 list_del(&hdev->list);
3140 write_unlock(&hci_dev_list_lock);
3141
3142 hci_dev_do_close(hdev);
3143
3144 cancel_work_sync(&hdev->power_on);
3145
3146 if (!test_bit(HCI_INIT, &hdev->flags) &&
3147 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3148 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3149 hci_dev_lock(hdev);
3150 mgmt_index_removed(hdev);
3151 hci_dev_unlock(hdev);
3152 }
3153
3154 /* mgmt_index_removed should take care of emptying the
3155 * pending list */
3156 BUG_ON(!list_empty(&hdev->mgmt_pending));
3157
3158 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3159
3160 if (hdev->rfkill) {
3161 rfkill_unregister(hdev->rfkill);
3162 rfkill_destroy(hdev->rfkill);
3163 }
3164
3165 device_del(&hdev->dev);
3166
3167 debugfs_remove_recursive(hdev->debugfs);
3168
3169 destroy_workqueue(hdev->workqueue);
3170 destroy_workqueue(hdev->req_workqueue);
3171
3172 hci_dev_lock(hdev);
3173 hci_bdaddr_list_clear(&hdev->blacklist);
3174 hci_bdaddr_list_clear(&hdev->whitelist);
3175 hci_uuids_clear(hdev);
3176 hci_link_keys_clear(hdev);
3177 hci_smp_ltks_clear(hdev);
3178 hci_smp_irks_clear(hdev);
3179 hci_remote_oob_data_clear(hdev);
3180 hci_adv_instances_clear(hdev);
3181 hci_bdaddr_list_clear(&hdev->le_white_list);
3182 hci_conn_params_clear_all(hdev);
3183 hci_discovery_filter_clear(hdev);
3184 hci_dev_unlock(hdev);
3185
3186 hci_dev_put(hdev);
3187
3188 ida_simple_remove(&hci_index_ida, id);
3189}
3190EXPORT_SYMBOL(hci_unregister_dev);
3191
3192/* Suspend HCI device */
3193int hci_suspend_dev(struct hci_dev *hdev)
3194{
3195 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3196 return 0;
3197}
3198EXPORT_SYMBOL(hci_suspend_dev);
3199
3200/* Resume HCI device */
3201int hci_resume_dev(struct hci_dev *hdev)
3202{
3203 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3204 return 0;
3205}
3206EXPORT_SYMBOL(hci_resume_dev);
3207
3208/* Reset HCI device */
3209int hci_reset_dev(struct hci_dev *hdev)
3210{
3211 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3212 struct sk_buff *skb;
3213
3214 skb = bt_skb_alloc(3, GFP_ATOMIC);
3215 if (!skb)
3216 return -ENOMEM;
3217
3218 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3219 memcpy(skb_put(skb, 3), hw_err, 3);
3220
3221 /* Send Hardware Error to upper stack */
3222 return hci_recv_frame(hdev, skb);
3223}
3224EXPORT_SYMBOL(hci_reset_dev);
3225
3226/* Receive frame from HCI drivers */
3227int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3228{
3229 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3230 && !test_bit(HCI_INIT, &hdev->flags))) {
3231 kfree_skb(skb);
3232 return -ENXIO;
3233 }
3234
3235 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3236 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3237 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3238 kfree_skb(skb);
3239 return -EINVAL;
3240 }
3241
3242 /* Incoming skb */
3243 bt_cb(skb)->incoming = 1;
3244
3245 /* Time stamp */
3246 __net_timestamp(skb);
3247
3248 skb_queue_tail(&hdev->rx_q, skb);
3249 queue_work(hdev->workqueue, &hdev->rx_work);
3250
3251 return 0;
3252}
3253EXPORT_SYMBOL(hci_recv_frame);
3254
3255/* Receive diagnostic message from HCI drivers */
3256int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3257{
3258 /* Mark as diagnostic packet */
3259 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3260
3261 /* Time stamp */
3262 __net_timestamp(skb);
3263
3264 skb_queue_tail(&hdev->rx_q, skb);
3265 queue_work(hdev->workqueue, &hdev->rx_work);
3266
3267 return 0;
3268}
3269EXPORT_SYMBOL(hci_recv_diag);
3270
3271/* ---- Interface to upper protocols ---- */
3272
3273int hci_register_cb(struct hci_cb *cb)
3274{
3275 BT_DBG("%p name %s", cb, cb->name);
3276
3277 mutex_lock(&hci_cb_list_lock);
3278 list_add_tail(&cb->list, &hci_cb_list);
3279 mutex_unlock(&hci_cb_list_lock);
3280
3281 return 0;
3282}
3283EXPORT_SYMBOL(hci_register_cb);
3284
3285int hci_unregister_cb(struct hci_cb *cb)
3286{
3287 BT_DBG("%p name %s", cb, cb->name);
3288
3289 mutex_lock(&hci_cb_list_lock);
3290 list_del(&cb->list);
3291 mutex_unlock(&hci_cb_list_lock);
3292
3293 return 0;
3294}
3295EXPORT_SYMBOL(hci_unregister_cb);
3296
3297static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3298{
3299 int err;
3300
3301 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3302 skb->len);
3303
3304 /* Time stamp */
3305 __net_timestamp(skb);
3306
3307 /* Send copy to monitor */
3308 hci_send_to_monitor(hdev, skb);
3309
3310 if (atomic_read(&hdev->promisc)) {
3311 /* Send copy to the sockets */
3312 hci_send_to_sock(hdev, skb);
3313 }
3314
3315 /* Get rid of skb owner, prior to sending to the driver. */
3316 skb_orphan(skb);
3317
3318 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3319 kfree_skb(skb);
3320 return;
3321 }
3322
3323 err = hdev->send(hdev, skb);
3324 if (err < 0) {
3325 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3326 kfree_skb(skb);
3327 }
3328}
3329
3330/* Send HCI command */
3331int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3332 const void *param)
3333{
3334 struct sk_buff *skb;
3335
3336 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3337
3338 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3339 if (!skb) {
3340 BT_ERR("%s no memory for command", hdev->name);
3341 return -ENOMEM;
3342 }
3343
3344 /* Stand-alone HCI commands must be flagged as
3345 * single-command requests.
3346 */
3347 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3348
3349 skb_queue_tail(&hdev->cmd_q, skb);
3350 queue_work(hdev->workqueue, &hdev->cmd_work);
3351
3352 return 0;
3353}
3354
3355/* Get data from the previously sent command */
3356void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3357{
3358 struct hci_command_hdr *hdr;
3359
3360 if (!hdev->sent_cmd)
3361 return NULL;
3362
3363 hdr = (void *) hdev->sent_cmd->data;
3364
3365 if (hdr->opcode != cpu_to_le16(opcode))
3366 return NULL;
3367
3368 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3369
3370 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3371}
3372
3373/* Send HCI command and wait for command commplete event */
3374struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3375 const void *param, u32 timeout)
3376{
3377 struct sk_buff *skb;
3378
3379 if (!test_bit(HCI_UP, &hdev->flags))
3380 return ERR_PTR(-ENETDOWN);
3381
3382 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3383
3384 hci_req_sync_lock(hdev);
3385 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3386 hci_req_sync_unlock(hdev);
3387
3388 return skb;
3389}
3390EXPORT_SYMBOL(hci_cmd_sync);
3391
3392/* Send ACL data */
3393static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3394{
3395 struct hci_acl_hdr *hdr;
3396 int len = skb->len;
3397
3398 skb_push(skb, HCI_ACL_HDR_SIZE);
3399 skb_reset_transport_header(skb);
3400 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3401 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3402 hdr->dlen = cpu_to_le16(len);
3403}
3404
3405static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3406 struct sk_buff *skb, __u16 flags)
3407{
3408 struct hci_conn *conn = chan->conn;
3409 struct hci_dev *hdev = conn->hdev;
3410 struct sk_buff *list;
3411
3412 skb->len = skb_headlen(skb);
3413 skb->data_len = 0;
3414
3415 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3416
3417 switch (hdev->dev_type) {
3418 case HCI_BREDR:
3419 hci_add_acl_hdr(skb, conn->handle, flags);
3420 break;
3421 case HCI_AMP:
3422 hci_add_acl_hdr(skb, chan->handle, flags);
3423 break;
3424 default:
3425 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3426 return;
3427 }
3428
3429 list = skb_shinfo(skb)->frag_list;
3430 if (!list) {
3431 /* Non fragmented */
3432 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3433
3434 skb_queue_tail(queue, skb);
3435 } else {
3436 /* Fragmented */
3437 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3438
3439 skb_shinfo(skb)->frag_list = NULL;
3440
3441 /* Queue all fragments atomically. We need to use spin_lock_bh
3442 * here because of 6LoWPAN links, as there this function is
3443 * called from softirq and using normal spin lock could cause
3444 * deadlocks.
3445 */
3446 spin_lock_bh(&queue->lock);
3447
3448 __skb_queue_tail(queue, skb);
3449
3450 flags &= ~ACL_START;
3451 flags |= ACL_CONT;
3452 do {
3453 skb = list; list = list->next;
3454
3455 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3456 hci_add_acl_hdr(skb, conn->handle, flags);
3457
3458 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3459
3460 __skb_queue_tail(queue, skb);
3461 } while (list);
3462
3463 spin_unlock_bh(&queue->lock);
3464 }
3465}
3466
3467void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3468{
3469 struct hci_dev *hdev = chan->conn->hdev;
3470
3471 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3472
3473 hci_queue_acl(chan, &chan->data_q, skb, flags);
3474
3475 queue_work(hdev->workqueue, &hdev->tx_work);
3476}
3477
3478/* Send SCO data */
3479void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3480{
3481 struct hci_dev *hdev = conn->hdev;
3482 struct hci_sco_hdr hdr;
3483
3484 BT_DBG("%s len %d", hdev->name, skb->len);
3485
3486 hdr.handle = cpu_to_le16(conn->handle);
3487 hdr.dlen = skb->len;
3488
3489 skb_push(skb, HCI_SCO_HDR_SIZE);
3490 skb_reset_transport_header(skb);
3491 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3492
3493 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3494
3495 skb_queue_tail(&conn->data_q, skb);
3496 queue_work(hdev->workqueue, &hdev->tx_work);
3497}
3498
3499/* ---- HCI TX task (outgoing data) ---- */
3500
3501/* HCI Connection scheduler */
3502static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3503 int *quote)
3504{
3505 struct hci_conn_hash *h = &hdev->conn_hash;
3506 struct hci_conn *conn = NULL, *c;
3507 unsigned int num = 0, min = ~0;
3508
3509 /* We don't have to lock device here. Connections are always
3510 * added and removed with TX task disabled. */
3511
3512 rcu_read_lock();
3513
3514 list_for_each_entry_rcu(c, &h->list, list) {
3515 if (c->type != type || skb_queue_empty(&c->data_q))
3516 continue;
3517
3518 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3519 continue;
3520
3521 num++;
3522
3523 if (c->sent < min) {
3524 min = c->sent;
3525 conn = c;
3526 }
3527
3528 if (hci_conn_num(hdev, type) == num)
3529 break;
3530 }
3531
3532 rcu_read_unlock();
3533
3534 if (conn) {
3535 int cnt, q;
3536
3537 switch (conn->type) {
3538 case ACL_LINK:
3539 cnt = hdev->acl_cnt;
3540 break;
3541 case SCO_LINK:
3542 case ESCO_LINK:
3543 cnt = hdev->sco_cnt;
3544 break;
3545 case LE_LINK:
3546 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3547 break;
3548 default:
3549 cnt = 0;
3550 BT_ERR("Unknown link type");
3551 }
3552
3553 q = cnt / num;
3554 *quote = q ? q : 1;
3555 } else
3556 *quote = 0;
3557
3558 BT_DBG("conn %p quote %d", conn, *quote);
3559 return conn;
3560}
3561
3562static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3563{
3564 struct hci_conn_hash *h = &hdev->conn_hash;
3565 struct hci_conn *c;
3566
3567 BT_ERR("%s link tx timeout", hdev->name);
3568
3569 rcu_read_lock();
3570
3571 /* Kill stalled connections */
3572 list_for_each_entry_rcu(c, &h->list, list) {
3573 if (c->type == type && c->sent) {
3574 BT_ERR("%s killing stalled connection %pMR",
3575 hdev->name, &c->dst);
3576 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3577 }
3578 }
3579
3580 rcu_read_unlock();
3581}
3582
3583static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3584 int *quote)
3585{
3586 struct hci_conn_hash *h = &hdev->conn_hash;
3587 struct hci_chan *chan = NULL;
3588 unsigned int num = 0, min = ~0, cur_prio = 0;
3589 struct hci_conn *conn;
3590 int cnt, q, conn_num = 0;
3591
3592 BT_DBG("%s", hdev->name);
3593
3594 rcu_read_lock();
3595
3596 list_for_each_entry_rcu(conn, &h->list, list) {
3597 struct hci_chan *tmp;
3598
3599 if (conn->type != type)
3600 continue;
3601
3602 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3603 continue;
3604
3605 conn_num++;
3606
3607 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3608 struct sk_buff *skb;
3609
3610 if (skb_queue_empty(&tmp->data_q))
3611 continue;
3612
3613 skb = skb_peek(&tmp->data_q);
3614 if (skb->priority < cur_prio)
3615 continue;
3616
3617 if (skb->priority > cur_prio) {
3618 num = 0;
3619 min = ~0;
3620 cur_prio = skb->priority;
3621 }
3622
3623 num++;
3624
3625 if (conn->sent < min) {
3626 min = conn->sent;
3627 chan = tmp;
3628 }
3629 }
3630
3631 if (hci_conn_num(hdev, type) == conn_num)
3632 break;
3633 }
3634
3635 rcu_read_unlock();
3636
3637 if (!chan)
3638 return NULL;
3639
3640 switch (chan->conn->type) {
3641 case ACL_LINK:
3642 cnt = hdev->acl_cnt;
3643 break;
3644 case AMP_LINK:
3645 cnt = hdev->block_cnt;
3646 break;
3647 case SCO_LINK:
3648 case ESCO_LINK:
3649 cnt = hdev->sco_cnt;
3650 break;
3651 case LE_LINK:
3652 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3653 break;
3654 default:
3655 cnt = 0;
3656 BT_ERR("Unknown link type");
3657 }
3658
3659 q = cnt / num;
3660 *quote = q ? q : 1;
3661 BT_DBG("chan %p quote %d", chan, *quote);
3662 return chan;
3663}
3664
3665static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3666{
3667 struct hci_conn_hash *h = &hdev->conn_hash;
3668 struct hci_conn *conn;
3669 int num = 0;
3670
3671 BT_DBG("%s", hdev->name);
3672
3673 rcu_read_lock();
3674
3675 list_for_each_entry_rcu(conn, &h->list, list) {
3676 struct hci_chan *chan;
3677
3678 if (conn->type != type)
3679 continue;
3680
3681 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3682 continue;
3683
3684 num++;
3685
3686 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3687 struct sk_buff *skb;
3688
3689 if (chan->sent) {
3690 chan->sent = 0;
3691 continue;
3692 }
3693
3694 if (skb_queue_empty(&chan->data_q))
3695 continue;
3696
3697 skb = skb_peek(&chan->data_q);
3698 if (skb->priority >= HCI_PRIO_MAX - 1)
3699 continue;
3700
3701 skb->priority = HCI_PRIO_MAX - 1;
3702
3703 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3704 skb->priority);
3705 }
3706
3707 if (hci_conn_num(hdev, type) == num)
3708 break;
3709 }
3710
3711 rcu_read_unlock();
3712
3713}
3714
3715static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3716{
3717 /* Calculate count of blocks used by this packet */
3718 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3719}
3720
3721static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3722{
3723 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3724 /* ACL tx timeout must be longer than maximum
3725 * link supervision timeout (40.9 seconds) */
3726 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3727 HCI_ACL_TX_TIMEOUT))
3728 hci_link_tx_to(hdev, ACL_LINK);
3729 }
3730}
3731
3732static void hci_sched_acl_pkt(struct hci_dev *hdev)
3733{
3734 unsigned int cnt = hdev->acl_cnt;
3735 struct hci_chan *chan;
3736 struct sk_buff *skb;
3737 int quote;
3738
3739 __check_timeout(hdev, cnt);
3740
3741 while (hdev->acl_cnt &&
3742 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3743 u32 priority = (skb_peek(&chan->data_q))->priority;
3744 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3745 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3746 skb->len, skb->priority);
3747
3748 /* Stop if priority has changed */
3749 if (skb->priority < priority)
3750 break;
3751
3752 skb = skb_dequeue(&chan->data_q);
3753
3754 hci_conn_enter_active_mode(chan->conn,
3755 bt_cb(skb)->force_active);
3756
3757 hci_send_frame(hdev, skb);
3758 hdev->acl_last_tx = jiffies;
3759
3760 hdev->acl_cnt--;
3761 chan->sent++;
3762 chan->conn->sent++;
3763 }
3764 }
3765
3766 if (cnt != hdev->acl_cnt)
3767 hci_prio_recalculate(hdev, ACL_LINK);
3768}
3769
3770static void hci_sched_acl_blk(struct hci_dev *hdev)
3771{
3772 unsigned int cnt = hdev->block_cnt;
3773 struct hci_chan *chan;
3774 struct sk_buff *skb;
3775 int quote;
3776 u8 type;
3777
3778 __check_timeout(hdev, cnt);
3779
3780 BT_DBG("%s", hdev->name);
3781
3782 if (hdev->dev_type == HCI_AMP)
3783 type = AMP_LINK;
3784 else
3785 type = ACL_LINK;
3786
3787 while (hdev->block_cnt > 0 &&
3788 (chan = hci_chan_sent(hdev, type, "e))) {
3789 u32 priority = (skb_peek(&chan->data_q))->priority;
3790 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3791 int blocks;
3792
3793 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3794 skb->len, skb->priority);
3795
3796 /* Stop if priority has changed */
3797 if (skb->priority < priority)
3798 break;
3799
3800 skb = skb_dequeue(&chan->data_q);
3801
3802 blocks = __get_blocks(hdev, skb);
3803 if (blocks > hdev->block_cnt)
3804 return;
3805
3806 hci_conn_enter_active_mode(chan->conn,
3807 bt_cb(skb)->force_active);
3808
3809 hci_send_frame(hdev, skb);
3810 hdev->acl_last_tx = jiffies;
3811
3812 hdev->block_cnt -= blocks;
3813 quote -= blocks;
3814
3815 chan->sent += blocks;
3816 chan->conn->sent += blocks;
3817 }
3818 }
3819
3820 if (cnt != hdev->block_cnt)
3821 hci_prio_recalculate(hdev, type);
3822}
3823
3824static void hci_sched_acl(struct hci_dev *hdev)
3825{
3826 BT_DBG("%s", hdev->name);
3827
3828 /* No ACL link over BR/EDR controller */
3829 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3830 return;
3831
3832 /* No AMP link over AMP controller */
3833 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3834 return;
3835
3836 switch (hdev->flow_ctl_mode) {
3837 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3838 hci_sched_acl_pkt(hdev);
3839 break;
3840
3841 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3842 hci_sched_acl_blk(hdev);
3843 break;
3844 }
3845}
3846
3847/* Schedule SCO */
3848static void hci_sched_sco(struct hci_dev *hdev)
3849{
3850 struct hci_conn *conn;
3851 struct sk_buff *skb;
3852 int quote;
3853
3854 BT_DBG("%s", hdev->name);
3855
3856 if (!hci_conn_num(hdev, SCO_LINK))
3857 return;
3858
3859 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3860 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3861 BT_DBG("skb %p len %d", skb, skb->len);
3862 hci_send_frame(hdev, skb);
3863
3864 conn->sent++;
3865 if (conn->sent == ~0)
3866 conn->sent = 0;
3867 }
3868 }
3869}
3870
3871static void hci_sched_esco(struct hci_dev *hdev)
3872{
3873 struct hci_conn *conn;
3874 struct sk_buff *skb;
3875 int quote;
3876
3877 BT_DBG("%s", hdev->name);
3878
3879 if (!hci_conn_num(hdev, ESCO_LINK))
3880 return;
3881
3882 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3883 "e))) {
3884 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3885 BT_DBG("skb %p len %d", skb, skb->len);
3886 hci_send_frame(hdev, skb);
3887
3888 conn->sent++;
3889 if (conn->sent == ~0)
3890 conn->sent = 0;
3891 }
3892 }
3893}
3894
3895static void hci_sched_le(struct hci_dev *hdev)
3896{
3897 struct hci_chan *chan;
3898 struct sk_buff *skb;
3899 int quote, cnt, tmp;
3900
3901 BT_DBG("%s", hdev->name);
3902
3903 if (!hci_conn_num(hdev, LE_LINK))
3904 return;
3905
3906 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3907 /* LE tx timeout must be longer than maximum
3908 * link supervision timeout (40.9 seconds) */
3909 if (!hdev->le_cnt && hdev->le_pkts &&
3910 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3911 hci_link_tx_to(hdev, LE_LINK);
3912 }
3913
3914 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3915 tmp = cnt;
3916 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3917 u32 priority = (skb_peek(&chan->data_q))->priority;
3918 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3919 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3920 skb->len, skb->priority);
3921
3922 /* Stop if priority has changed */
3923 if (skb->priority < priority)
3924 break;
3925
3926 skb = skb_dequeue(&chan->data_q);
3927
3928 hci_send_frame(hdev, skb);
3929 hdev->le_last_tx = jiffies;
3930
3931 cnt--;
3932 chan->sent++;
3933 chan->conn->sent++;
3934 }
3935 }
3936
3937 if (hdev->le_pkts)
3938 hdev->le_cnt = cnt;
3939 else
3940 hdev->acl_cnt = cnt;
3941
3942 if (cnt != tmp)
3943 hci_prio_recalculate(hdev, LE_LINK);
3944}
3945
3946static void hci_tx_work(struct work_struct *work)
3947{
3948 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3949 struct sk_buff *skb;
3950
3951 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3952 hdev->sco_cnt, hdev->le_cnt);
3953
3954 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3955 /* Schedule queues and send stuff to HCI driver */
3956 hci_sched_acl(hdev);
3957 hci_sched_sco(hdev);
3958 hci_sched_esco(hdev);
3959 hci_sched_le(hdev);
3960 }
3961
3962 /* Send next queued raw (unknown type) packet */
3963 while ((skb = skb_dequeue(&hdev->raw_q)))
3964 hci_send_frame(hdev, skb);
3965}
3966
3967/* ----- HCI RX task (incoming data processing) ----- */
3968
3969/* ACL data packet */
3970static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3971{
3972 struct hci_acl_hdr *hdr = (void *) skb->data;
3973 struct hci_conn *conn;
3974 __u16 handle, flags;
3975
3976 skb_pull(skb, HCI_ACL_HDR_SIZE);
3977
3978 handle = __le16_to_cpu(hdr->handle);
3979 flags = hci_flags(handle);
3980 handle = hci_handle(handle);
3981
3982 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3983 handle, flags);
3984
3985 hdev->stat.acl_rx++;
3986
3987 hci_dev_lock(hdev);
3988 conn = hci_conn_hash_lookup_handle(hdev, handle);
3989 hci_dev_unlock(hdev);
3990
3991 if (conn) {
3992 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3993
3994 /* Send to upper protocol */
3995 l2cap_recv_acldata(conn, skb, flags);
3996 return;
3997 } else {
3998 BT_ERR("%s ACL packet for unknown connection handle %d",
3999 hdev->name, handle);
4000 }
4001
4002 kfree_skb(skb);
4003}
4004
4005/* SCO data packet */
4006static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4007{
4008 struct hci_sco_hdr *hdr = (void *) skb->data;
4009 struct hci_conn *conn;
4010 __u16 handle;
4011
4012 skb_pull(skb, HCI_SCO_HDR_SIZE);
4013
4014 handle = __le16_to_cpu(hdr->handle);
4015
4016 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4017
4018 hdev->stat.sco_rx++;
4019
4020 hci_dev_lock(hdev);
4021 conn = hci_conn_hash_lookup_handle(hdev, handle);
4022 hci_dev_unlock(hdev);
4023
4024 if (conn) {
4025 /* Send to upper protocol */
4026 sco_recv_scodata(conn, skb);
4027 return;
4028 } else {
4029 BT_ERR("%s SCO packet for unknown connection handle %d",
4030 hdev->name, handle);
4031 }
4032
4033 kfree_skb(skb);
4034}
4035
4036static bool hci_req_is_complete(struct hci_dev *hdev)
4037{
4038 struct sk_buff *skb;
4039
4040 skb = skb_peek(&hdev->cmd_q);
4041 if (!skb)
4042 return true;
4043
4044 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4045}
4046
4047static void hci_resend_last(struct hci_dev *hdev)
4048{
4049 struct hci_command_hdr *sent;
4050 struct sk_buff *skb;
4051 u16 opcode;
4052
4053 if (!hdev->sent_cmd)
4054 return;
4055
4056 sent = (void *) hdev->sent_cmd->data;
4057 opcode = __le16_to_cpu(sent->opcode);
4058 if (opcode == HCI_OP_RESET)
4059 return;
4060
4061 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4062 if (!skb)
4063 return;
4064
4065 skb_queue_head(&hdev->cmd_q, skb);
4066 queue_work(hdev->workqueue, &hdev->cmd_work);
4067}
4068
4069void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4070 hci_req_complete_t *req_complete,
4071 hci_req_complete_skb_t *req_complete_skb)
4072{
4073 struct sk_buff *skb;
4074 unsigned long flags;
4075
4076 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4077
4078 /* If the completed command doesn't match the last one that was
4079 * sent we need to do special handling of it.
4080 */
4081 if (!hci_sent_cmd_data(hdev, opcode)) {
4082 /* Some CSR based controllers generate a spontaneous
4083 * reset complete event during init and any pending
4084 * command will never be completed. In such a case we
4085 * need to resend whatever was the last sent
4086 * command.
4087 */
4088 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4089 hci_resend_last(hdev);
4090
4091 return;
4092 }
4093
4094 /* If the command succeeded and there's still more commands in
4095 * this request the request is not yet complete.
4096 */
4097 if (!status && !hci_req_is_complete(hdev))
4098 return;
4099
4100 /* If this was the last command in a request the complete
4101 * callback would be found in hdev->sent_cmd instead of the
4102 * command queue (hdev->cmd_q).
4103 */
4104 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4105 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4106 return;
4107 }
4108
4109 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4110 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4111 return;
4112 }
4113
4114 /* Remove all pending commands belonging to this request */
4115 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4116 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4117 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4118 __skb_queue_head(&hdev->cmd_q, skb);
4119 break;
4120 }
4121
4122 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4123 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4124 else
4125 *req_complete = bt_cb(skb)->hci.req_complete;
4126 kfree_skb(skb);
4127 }
4128 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4129}
4130
4131static void hci_rx_work(struct work_struct *work)
4132{
4133 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4134 struct sk_buff *skb;
4135
4136 BT_DBG("%s", hdev->name);
4137
4138 while ((skb = skb_dequeue(&hdev->rx_q))) {
4139 /* Send copy to monitor */
4140 hci_send_to_monitor(hdev, skb);
4141
4142 if (atomic_read(&hdev->promisc)) {
4143 /* Send copy to the sockets */
4144 hci_send_to_sock(hdev, skb);
4145 }
4146
4147 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4148 kfree_skb(skb);
4149 continue;
4150 }
4151
4152 if (test_bit(HCI_INIT, &hdev->flags)) {
4153 /* Don't process data packets in this states. */
4154 switch (hci_skb_pkt_type(skb)) {
4155 case HCI_ACLDATA_PKT:
4156 case HCI_SCODATA_PKT:
4157 kfree_skb(skb);
4158 continue;
4159 }
4160 }
4161
4162 /* Process frame */
4163 switch (hci_skb_pkt_type(skb)) {
4164 case HCI_EVENT_PKT:
4165 BT_DBG("%s Event packet", hdev->name);
4166 hci_event_packet(hdev, skb);
4167 break;
4168
4169 case HCI_ACLDATA_PKT:
4170 BT_DBG("%s ACL data packet", hdev->name);
4171 hci_acldata_packet(hdev, skb);
4172 break;
4173
4174 case HCI_SCODATA_PKT:
4175 BT_DBG("%s SCO data packet", hdev->name);
4176 hci_scodata_packet(hdev, skb);
4177 break;
4178
4179 default:
4180 kfree_skb(skb);
4181 break;
4182 }
4183 }
4184}
4185
4186static void hci_cmd_work(struct work_struct *work)
4187{
4188 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4189 struct sk_buff *skb;
4190
4191 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4192 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4193
4194 /* Send queued commands */
4195 if (atomic_read(&hdev->cmd_cnt)) {
4196 skb = skb_dequeue(&hdev->cmd_q);
4197 if (!skb)
4198 return;
4199
4200 kfree_skb(hdev->sent_cmd);
4201
4202 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4203 if (hdev->sent_cmd) {
4204 atomic_dec(&hdev->cmd_cnt);
4205 hci_send_frame(hdev, skb);
4206 if (test_bit(HCI_RESET, &hdev->flags))
4207 cancel_delayed_work(&hdev->cmd_timer);
4208 else
4209 schedule_delayed_work(&hdev->cmd_timer,
4210 HCI_CMD_TIMEOUT);
4211 } else {
4212 skb_queue_head(&hdev->cmd_q, skb);
4213 queue_work(hdev->workqueue, &hdev->cmd_work);
4214 }
4215 }
4216}
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/idr.h>
30#include <linux/rfkill.h>
31#include <linux/debugfs.h>
32#include <linux/crypto.h>
33#include <asm/unaligned.h>
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
38#include "smp.h"
39
40static void hci_rx_work(struct work_struct *work);
41static void hci_cmd_work(struct work_struct *work);
42static void hci_tx_work(struct work_struct *work);
43
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
55/* ---- HCI notifications ---- */
56
57static void hci_notify(struct hci_dev *hdev, int event)
58{
59 hci_sock_dev_event(hdev, event);
60}
61
62/* ---- HCI debugfs entries ---- */
63
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
475 hdev->idle_timeout = val;
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
534 hdev->sniff_min_interval = val;
535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
562 hdev->sniff_max_interval = val;
563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585 bdaddr_t addr;
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
590 hci_copy_identity_address(hdev, &addr, &addr_type);
591
592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
593 16, hdev->irk, &hdev->rpa);
594
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int identity_open(struct inode *inode, struct file *file)
601{
602 return single_open(file, identity_show, inode->i_private);
603}
604
605static const struct file_operations identity_fops = {
606 .open = identity_open,
607 .read = seq_read,
608 .llseek = seq_lseek,
609 .release = single_release,
610};
611
612static int random_address_show(struct seq_file *f, void *p)
613{
614 struct hci_dev *hdev = f->private;
615
616 hci_dev_lock(hdev);
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
619
620 return 0;
621}
622
623static int random_address_open(struct inode *inode, struct file *file)
624{
625 return single_open(file, random_address_show, inode->i_private);
626}
627
628static const struct file_operations random_address_fops = {
629 .open = random_address_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = single_release,
633};
634
635static int static_address_show(struct seq_file *f, void *p)
636{
637 struct hci_dev *hdev = f->private;
638
639 hci_dev_lock(hdev);
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int static_address_open(struct inode *inode, struct file *file)
647{
648 return single_open(file, static_address_show, inode->i_private);
649}
650
651static const struct file_operations static_address_fops = {
652 .open = static_address_open,
653 .read = seq_read,
654 .llseek = seq_lseek,
655 .release = single_release,
656};
657
658static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
661{
662 struct hci_dev *hdev = file->private_data;
663 char buf[3];
664
665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666 buf[1] = '\n';
667 buf[2] = '\0';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
669}
670
671static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
674{
675 struct hci_dev *hdev = file->private_data;
676 char buf[32];
677 size_t buf_size = min(count, (sizeof(buf)-1));
678 bool enable;
679
680 if (test_bit(HCI_UP, &hdev->flags))
681 return -EBUSY;
682
683 if (copy_from_user(buf, user_buf, buf_size))
684 return -EFAULT;
685
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
688 return -EINVAL;
689
690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691 return -EALREADY;
692
693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695 return count;
696}
697
698static const struct file_operations force_static_address_fops = {
699 .open = simple_open,
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
703};
704
705static int white_list_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct bdaddr_list *b;
709
710 hci_dev_lock(hdev);
711 list_for_each_entry(b, &hdev->le_white_list, list)
712 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int white_list_open(struct inode *inode, struct file *file)
719{
720 return single_open(file, white_list_show, inode->i_private);
721}
722
723static const struct file_operations white_list_fops = {
724 .open = white_list_open,
725 .read = seq_read,
726 .llseek = seq_lseek,
727 .release = single_release,
728};
729
730static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
731{
732 struct hci_dev *hdev = f->private;
733 struct list_head *p, *n;
734
735 hci_dev_lock(hdev);
736 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
737 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
738 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
739 &irk->bdaddr, irk->addr_type,
740 16, irk->val, &irk->rpa);
741 }
742 hci_dev_unlock(hdev);
743
744 return 0;
745}
746
747static int identity_resolving_keys_open(struct inode *inode, struct file *file)
748{
749 return single_open(file, identity_resolving_keys_show,
750 inode->i_private);
751}
752
753static const struct file_operations identity_resolving_keys_fops = {
754 .open = identity_resolving_keys_open,
755 .read = seq_read,
756 .llseek = seq_lseek,
757 .release = single_release,
758};
759
760static int long_term_keys_show(struct seq_file *f, void *ptr)
761{
762 struct hci_dev *hdev = f->private;
763 struct list_head *p, *n;
764
765 hci_dev_lock(hdev);
766 list_for_each_safe(p, n, &hdev->long_term_keys) {
767 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
768 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
769 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
770 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
771 __le64_to_cpu(ltk->rand), 16, ltk->val);
772 }
773 hci_dev_unlock(hdev);
774
775 return 0;
776}
777
778static int long_term_keys_open(struct inode *inode, struct file *file)
779{
780 return single_open(file, long_term_keys_show, inode->i_private);
781}
782
783static const struct file_operations long_term_keys_fops = {
784 .open = long_term_keys_open,
785 .read = seq_read,
786 .llseek = seq_lseek,
787 .release = single_release,
788};
789
790static int conn_min_interval_set(void *data, u64 val)
791{
792 struct hci_dev *hdev = data;
793
794 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
795 return -EINVAL;
796
797 hci_dev_lock(hdev);
798 hdev->le_conn_min_interval = val;
799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int conn_min_interval_get(void *data, u64 *val)
805{
806 struct hci_dev *hdev = data;
807
808 hci_dev_lock(hdev);
809 *val = hdev->le_conn_min_interval;
810 hci_dev_unlock(hdev);
811
812 return 0;
813}
814
815DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
816 conn_min_interval_set, "%llu\n");
817
818static int conn_max_interval_set(void *data, u64 val)
819{
820 struct hci_dev *hdev = data;
821
822 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
823 return -EINVAL;
824
825 hci_dev_lock(hdev);
826 hdev->le_conn_max_interval = val;
827 hci_dev_unlock(hdev);
828
829 return 0;
830}
831
832static int conn_max_interval_get(void *data, u64 *val)
833{
834 struct hci_dev *hdev = data;
835
836 hci_dev_lock(hdev);
837 *val = hdev->le_conn_max_interval;
838 hci_dev_unlock(hdev);
839
840 return 0;
841}
842
843DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
844 conn_max_interval_set, "%llu\n");
845
846static int adv_channel_map_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x01 || val > 0x07)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
854 hdev->le_adv_channel_map = val;
855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int adv_channel_map_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_adv_channel_map;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
872 adv_channel_map_set, "%llu\n");
873
874static ssize_t lowpan_read(struct file *file, char __user *user_buf,
875 size_t count, loff_t *ppos)
876{
877 struct hci_dev *hdev = file->private_data;
878 char buf[3];
879
880 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
881 buf[1] = '\n';
882 buf[2] = '\0';
883 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
884}
885
886static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
887 size_t count, loff_t *position)
888{
889 struct hci_dev *hdev = fp->private_data;
890 bool enable;
891 char buf[32];
892 size_t buf_size = min(count, (sizeof(buf)-1));
893
894 if (copy_from_user(buf, user_buffer, buf_size))
895 return -EFAULT;
896
897 buf[buf_size] = '\0';
898
899 if (strtobool(buf, &enable) < 0)
900 return -EINVAL;
901
902 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
903 return -EALREADY;
904
905 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
906
907 return count;
908}
909
910static const struct file_operations lowpan_debugfs_fops = {
911 .open = simple_open,
912 .read = lowpan_read,
913 .write = lowpan_write,
914 .llseek = default_llseek,
915};
916
917static int le_auto_conn_show(struct seq_file *sf, void *ptr)
918{
919 struct hci_dev *hdev = sf->private;
920 struct hci_conn_params *p;
921
922 hci_dev_lock(hdev);
923
924 list_for_each_entry(p, &hdev->le_conn_params, list) {
925 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
926 p->auto_connect);
927 }
928
929 hci_dev_unlock(hdev);
930
931 return 0;
932}
933
934static int le_auto_conn_open(struct inode *inode, struct file *file)
935{
936 return single_open(file, le_auto_conn_show, inode->i_private);
937}
938
939static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
940 size_t count, loff_t *offset)
941{
942 struct seq_file *sf = file->private_data;
943 struct hci_dev *hdev = sf->private;
944 u8 auto_connect = 0;
945 bdaddr_t addr;
946 u8 addr_type;
947 char *buf;
948 int err = 0;
949 int n;
950
951 /* Don't allow partial write */
952 if (*offset != 0)
953 return -EINVAL;
954
955 if (count < 3)
956 return -EINVAL;
957
958 buf = kzalloc(count, GFP_KERNEL);
959 if (!buf)
960 return -ENOMEM;
961
962 if (copy_from_user(buf, data, count)) {
963 err = -EFAULT;
964 goto done;
965 }
966
967 if (memcmp(buf, "add", 3) == 0) {
968 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
969 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
970 &addr.b[1], &addr.b[0], &addr_type,
971 &auto_connect);
972
973 if (n < 7) {
974 err = -EINVAL;
975 goto done;
976 }
977
978 hci_dev_lock(hdev);
979 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
980 hdev->le_conn_min_interval,
981 hdev->le_conn_max_interval);
982 hci_dev_unlock(hdev);
983
984 if (err)
985 goto done;
986 } else if (memcmp(buf, "del", 3) == 0) {
987 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
988 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
989 &addr.b[1], &addr.b[0], &addr_type);
990
991 if (n < 7) {
992 err = -EINVAL;
993 goto done;
994 }
995
996 hci_dev_lock(hdev);
997 hci_conn_params_del(hdev, &addr, addr_type);
998 hci_dev_unlock(hdev);
999 } else if (memcmp(buf, "clr", 3) == 0) {
1000 hci_dev_lock(hdev);
1001 hci_conn_params_clear(hdev);
1002 hci_pend_le_conns_clear(hdev);
1003 hci_update_background_scan(hdev);
1004 hci_dev_unlock(hdev);
1005 } else {
1006 err = -EINVAL;
1007 }
1008
1009done:
1010 kfree(buf);
1011
1012 if (err)
1013 return err;
1014 else
1015 return count;
1016}
1017
1018static const struct file_operations le_auto_conn_fops = {
1019 .open = le_auto_conn_open,
1020 .read = seq_read,
1021 .write = le_auto_conn_write,
1022 .llseek = seq_lseek,
1023 .release = single_release,
1024};
1025
1026/* ---- HCI requests ---- */
1027
1028static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1029{
1030 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1031
1032 if (hdev->req_status == HCI_REQ_PEND) {
1033 hdev->req_result = result;
1034 hdev->req_status = HCI_REQ_DONE;
1035 wake_up_interruptible(&hdev->req_wait_q);
1036 }
1037}
1038
1039static void hci_req_cancel(struct hci_dev *hdev, int err)
1040{
1041 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1042
1043 if (hdev->req_status == HCI_REQ_PEND) {
1044 hdev->req_result = err;
1045 hdev->req_status = HCI_REQ_CANCELED;
1046 wake_up_interruptible(&hdev->req_wait_q);
1047 }
1048}
1049
1050static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1051 u8 event)
1052{
1053 struct hci_ev_cmd_complete *ev;
1054 struct hci_event_hdr *hdr;
1055 struct sk_buff *skb;
1056
1057 hci_dev_lock(hdev);
1058
1059 skb = hdev->recv_evt;
1060 hdev->recv_evt = NULL;
1061
1062 hci_dev_unlock(hdev);
1063
1064 if (!skb)
1065 return ERR_PTR(-ENODATA);
1066
1067 if (skb->len < sizeof(*hdr)) {
1068 BT_ERR("Too short HCI event");
1069 goto failed;
1070 }
1071
1072 hdr = (void *) skb->data;
1073 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1074
1075 if (event) {
1076 if (hdr->evt != event)
1077 goto failed;
1078 return skb;
1079 }
1080
1081 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1082 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1083 goto failed;
1084 }
1085
1086 if (skb->len < sizeof(*ev)) {
1087 BT_ERR("Too short cmd_complete event");
1088 goto failed;
1089 }
1090
1091 ev = (void *) skb->data;
1092 skb_pull(skb, sizeof(*ev));
1093
1094 if (opcode == __le16_to_cpu(ev->opcode))
1095 return skb;
1096
1097 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1098 __le16_to_cpu(ev->opcode));
1099
1100failed:
1101 kfree_skb(skb);
1102 return ERR_PTR(-ENODATA);
1103}
1104
1105struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1106 const void *param, u8 event, u32 timeout)
1107{
1108 DECLARE_WAITQUEUE(wait, current);
1109 struct hci_request req;
1110 int err = 0;
1111
1112 BT_DBG("%s", hdev->name);
1113
1114 hci_req_init(&req, hdev);
1115
1116 hci_req_add_ev(&req, opcode, plen, param, event);
1117
1118 hdev->req_status = HCI_REQ_PEND;
1119
1120 err = hci_req_run(&req, hci_req_sync_complete);
1121 if (err < 0)
1122 return ERR_PTR(err);
1123
1124 add_wait_queue(&hdev->req_wait_q, &wait);
1125 set_current_state(TASK_INTERRUPTIBLE);
1126
1127 schedule_timeout(timeout);
1128
1129 remove_wait_queue(&hdev->req_wait_q, &wait);
1130
1131 if (signal_pending(current))
1132 return ERR_PTR(-EINTR);
1133
1134 switch (hdev->req_status) {
1135 case HCI_REQ_DONE:
1136 err = -bt_to_errno(hdev->req_result);
1137 break;
1138
1139 case HCI_REQ_CANCELED:
1140 err = -hdev->req_result;
1141 break;
1142
1143 default:
1144 err = -ETIMEDOUT;
1145 break;
1146 }
1147
1148 hdev->req_status = hdev->req_result = 0;
1149
1150 BT_DBG("%s end: err %d", hdev->name, err);
1151
1152 if (err < 0)
1153 return ERR_PTR(err);
1154
1155 return hci_get_cmd_complete(hdev, opcode, event);
1156}
1157EXPORT_SYMBOL(__hci_cmd_sync_ev);
1158
1159struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1160 const void *param, u32 timeout)
1161{
1162 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1163}
1164EXPORT_SYMBOL(__hci_cmd_sync);
1165
1166/* Execute request and wait for completion. */
1167static int __hci_req_sync(struct hci_dev *hdev,
1168 void (*func)(struct hci_request *req,
1169 unsigned long opt),
1170 unsigned long opt, __u32 timeout)
1171{
1172 struct hci_request req;
1173 DECLARE_WAITQUEUE(wait, current);
1174 int err = 0;
1175
1176 BT_DBG("%s start", hdev->name);
1177
1178 hci_req_init(&req, hdev);
1179
1180 hdev->req_status = HCI_REQ_PEND;
1181
1182 func(&req, opt);
1183
1184 err = hci_req_run(&req, hci_req_sync_complete);
1185 if (err < 0) {
1186 hdev->req_status = 0;
1187
1188 /* ENODATA means the HCI request command queue is empty.
1189 * This can happen when a request with conditionals doesn't
1190 * trigger any commands to be sent. This is normal behavior
1191 * and should not trigger an error return.
1192 */
1193 if (err == -ENODATA)
1194 return 0;
1195
1196 return err;
1197 }
1198
1199 add_wait_queue(&hdev->req_wait_q, &wait);
1200 set_current_state(TASK_INTERRUPTIBLE);
1201
1202 schedule_timeout(timeout);
1203
1204 remove_wait_queue(&hdev->req_wait_q, &wait);
1205
1206 if (signal_pending(current))
1207 return -EINTR;
1208
1209 switch (hdev->req_status) {
1210 case HCI_REQ_DONE:
1211 err = -bt_to_errno(hdev->req_result);
1212 break;
1213
1214 case HCI_REQ_CANCELED:
1215 err = -hdev->req_result;
1216 break;
1217
1218 default:
1219 err = -ETIMEDOUT;
1220 break;
1221 }
1222
1223 hdev->req_status = hdev->req_result = 0;
1224
1225 BT_DBG("%s end: err %d", hdev->name, err);
1226
1227 return err;
1228}
1229
1230static int hci_req_sync(struct hci_dev *hdev,
1231 void (*req)(struct hci_request *req,
1232 unsigned long opt),
1233 unsigned long opt, __u32 timeout)
1234{
1235 int ret;
1236
1237 if (!test_bit(HCI_UP, &hdev->flags))
1238 return -ENETDOWN;
1239
1240 /* Serialize all requests */
1241 hci_req_lock(hdev);
1242 ret = __hci_req_sync(hdev, req, opt, timeout);
1243 hci_req_unlock(hdev);
1244
1245 return ret;
1246}
1247
1248static void hci_reset_req(struct hci_request *req, unsigned long opt)
1249{
1250 BT_DBG("%s %ld", req->hdev->name, opt);
1251
1252 /* Reset device */
1253 set_bit(HCI_RESET, &req->hdev->flags);
1254 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1255}
1256
1257static void bredr_init(struct hci_request *req)
1258{
1259 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1260
1261 /* Read Local Supported Features */
1262 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1263
1264 /* Read Local Version */
1265 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1266
1267 /* Read BD Address */
1268 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1269}
1270
1271static void amp_init(struct hci_request *req)
1272{
1273 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1274
1275 /* Read Local Version */
1276 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1277
1278 /* Read Local Supported Commands */
1279 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1280
1281 /* Read Local Supported Features */
1282 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1283
1284 /* Read Local AMP Info */
1285 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1286
1287 /* Read Data Blk size */
1288 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1289
1290 /* Read Flow Control Mode */
1291 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1292
1293 /* Read Location Data */
1294 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1295}
1296
1297static void hci_init1_req(struct hci_request *req, unsigned long opt)
1298{
1299 struct hci_dev *hdev = req->hdev;
1300
1301 BT_DBG("%s %ld", hdev->name, opt);
1302
1303 /* Reset */
1304 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1305 hci_reset_req(req, 0);
1306
1307 switch (hdev->dev_type) {
1308 case HCI_BREDR:
1309 bredr_init(req);
1310 break;
1311
1312 case HCI_AMP:
1313 amp_init(req);
1314 break;
1315
1316 default:
1317 BT_ERR("Unknown device type %d", hdev->dev_type);
1318 break;
1319 }
1320}
1321
1322static void bredr_setup(struct hci_request *req)
1323{
1324 struct hci_dev *hdev = req->hdev;
1325
1326 __le16 param;
1327 __u8 flt_type;
1328
1329 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1330 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1331
1332 /* Read Class of Device */
1333 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1334
1335 /* Read Local Name */
1336 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1337
1338 /* Read Voice Setting */
1339 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1340
1341 /* Read Number of Supported IAC */
1342 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1343
1344 /* Read Current IAC LAP */
1345 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1346
1347 /* Clear Event Filters */
1348 flt_type = HCI_FLT_CLEAR_ALL;
1349 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1350
1351 /* Connection accept timeout ~20 secs */
1352 param = cpu_to_le16(0x7d00);
1353 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1354
1355 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1356 * but it does not support page scan related HCI commands.
1357 */
1358 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1359 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1360 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1361 }
1362}
1363
1364static void le_setup(struct hci_request *req)
1365{
1366 struct hci_dev *hdev = req->hdev;
1367
1368 /* Read LE Buffer Size */
1369 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1370
1371 /* Read LE Local Supported Features */
1372 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1373
1374 /* Read LE Supported States */
1375 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1376
1377 /* Read LE Advertising Channel TX Power */
1378 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1379
1380 /* Read LE White List Size */
1381 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1382
1383 /* Clear LE White List */
1384 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1385
1386 /* LE-only controllers have LE implicitly enabled */
1387 if (!lmp_bredr_capable(hdev))
1388 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1389}
1390
1391static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1392{
1393 if (lmp_ext_inq_capable(hdev))
1394 return 0x02;
1395
1396 if (lmp_inq_rssi_capable(hdev))
1397 return 0x01;
1398
1399 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1400 hdev->lmp_subver == 0x0757)
1401 return 0x01;
1402
1403 if (hdev->manufacturer == 15) {
1404 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1405 return 0x01;
1406 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1407 return 0x01;
1408 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1409 return 0x01;
1410 }
1411
1412 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1413 hdev->lmp_subver == 0x1805)
1414 return 0x01;
1415
1416 return 0x00;
1417}
1418
1419static void hci_setup_inquiry_mode(struct hci_request *req)
1420{
1421 u8 mode;
1422
1423 mode = hci_get_inquiry_mode(req->hdev);
1424
1425 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1426}
1427
1428static void hci_setup_event_mask(struct hci_request *req)
1429{
1430 struct hci_dev *hdev = req->hdev;
1431
1432 /* The second byte is 0xff instead of 0x9f (two reserved bits
1433 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1434 * command otherwise.
1435 */
1436 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1437
1438 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1439 * any event mask for pre 1.2 devices.
1440 */
1441 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1442 return;
1443
1444 if (lmp_bredr_capable(hdev)) {
1445 events[4] |= 0x01; /* Flow Specification Complete */
1446 events[4] |= 0x02; /* Inquiry Result with RSSI */
1447 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1448 events[5] |= 0x08; /* Synchronous Connection Complete */
1449 events[5] |= 0x10; /* Synchronous Connection Changed */
1450 } else {
1451 /* Use a different default for LE-only devices */
1452 memset(events, 0, sizeof(events));
1453 events[0] |= 0x10; /* Disconnection Complete */
1454 events[0] |= 0x80; /* Encryption Change */
1455 events[1] |= 0x08; /* Read Remote Version Information Complete */
1456 events[1] |= 0x20; /* Command Complete */
1457 events[1] |= 0x40; /* Command Status */
1458 events[1] |= 0x80; /* Hardware Error */
1459 events[2] |= 0x04; /* Number of Completed Packets */
1460 events[3] |= 0x02; /* Data Buffer Overflow */
1461 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1462 }
1463
1464 if (lmp_inq_rssi_capable(hdev))
1465 events[4] |= 0x02; /* Inquiry Result with RSSI */
1466
1467 if (lmp_sniffsubr_capable(hdev))
1468 events[5] |= 0x20; /* Sniff Subrating */
1469
1470 if (lmp_pause_enc_capable(hdev))
1471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1472
1473 if (lmp_ext_inq_capable(hdev))
1474 events[5] |= 0x40; /* Extended Inquiry Result */
1475
1476 if (lmp_no_flush_capable(hdev))
1477 events[7] |= 0x01; /* Enhanced Flush Complete */
1478
1479 if (lmp_lsto_capable(hdev))
1480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1481
1482 if (lmp_ssp_capable(hdev)) {
1483 events[6] |= 0x01; /* IO Capability Request */
1484 events[6] |= 0x02; /* IO Capability Response */
1485 events[6] |= 0x04; /* User Confirmation Request */
1486 events[6] |= 0x08; /* User Passkey Request */
1487 events[6] |= 0x10; /* Remote OOB Data Request */
1488 events[6] |= 0x20; /* Simple Pairing Complete */
1489 events[7] |= 0x04; /* User Passkey Notification */
1490 events[7] |= 0x08; /* Keypress Notification */
1491 events[7] |= 0x10; /* Remote Host Supported
1492 * Features Notification
1493 */
1494 }
1495
1496 if (lmp_le_capable(hdev))
1497 events[7] |= 0x20; /* LE Meta-Event */
1498
1499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1500
1501 if (lmp_le_capable(hdev)) {
1502 memset(events, 0, sizeof(events));
1503 events[0] = 0x1f;
1504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1505 sizeof(events), events);
1506 }
1507}
1508
1509static void hci_init2_req(struct hci_request *req, unsigned long opt)
1510{
1511 struct hci_dev *hdev = req->hdev;
1512
1513 if (lmp_bredr_capable(hdev))
1514 bredr_setup(req);
1515 else
1516 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1517
1518 if (lmp_le_capable(hdev))
1519 le_setup(req);
1520
1521 hci_setup_event_mask(req);
1522
1523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1528
1529 if (lmp_ssp_capable(hdev)) {
1530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
1538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
1540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
1542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
1548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
1553 hci_setup_inquiry_mode(req);
1554
1555 if (lmp_inq_tx_pwr_capable(hdev))
1556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
1562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
1564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
1568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
1570 }
1571}
1572
1573static void hci_setup_link_policy(struct hci_request *req)
1574{
1575 struct hci_dev *hdev = req->hdev;
1576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
1589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1590}
1591
1592static void hci_set_le_support(struct hci_request *req)
1593{
1594 struct hci_dev *hdev = req->hdev;
1595 struct hci_cp_write_le_host_supported cp;
1596
1597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
1601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
1605 cp.simul = lmp_le_br_capable(hdev);
1606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
1609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
1611}
1612
1613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
1621 if (lmp_csb_master_capable(hdev)) {
1622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
1631 if (lmp_csb_slave_capable(hdev)) {
1632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
1638 /* Enable Authenticated Payload Timeout Expired event if supported */
1639 if (lmp_ping_capable(hdev))
1640 events[2] |= 0x80;
1641
1642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
1645static void hci_init3_req(struct hci_request *req, unsigned long opt)
1646{
1647 struct hci_dev *hdev = req->hdev;
1648 u8 p;
1649
1650 /* Some Broadcom based Bluetooth controllers do not support the
1651 * Delete Stored Link Key command. They are clearly indicating its
1652 * absence in the bit mask of supported commands.
1653 *
1654 * Check the supported commands and only if the the command is marked
1655 * as supported send it. If not supported assume that the controller
1656 * does not have actual support for stored link keys which makes this
1657 * command redundant anyway.
1658 *
1659 * Some controllers indicate that they support handling deleting
1660 * stored link keys, but they don't. The quirk lets a driver
1661 * just disable this command.
1662 */
1663 if (hdev->commands[6] & 0x80 &&
1664 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1665 struct hci_cp_delete_stored_link_key cp;
1666
1667 bacpy(&cp.bdaddr, BDADDR_ANY);
1668 cp.delete_all = 0x01;
1669 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1670 sizeof(cp), &cp);
1671 }
1672
1673 if (hdev->commands[5] & 0x10)
1674 hci_setup_link_policy(req);
1675
1676 if (lmp_le_capable(hdev))
1677 hci_set_le_support(req);
1678
1679 /* Read features beyond page 1 if available */
1680 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1681 struct hci_cp_read_local_ext_features cp;
1682
1683 cp.page = p;
1684 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1685 sizeof(cp), &cp);
1686 }
1687}
1688
1689static void hci_init4_req(struct hci_request *req, unsigned long opt)
1690{
1691 struct hci_dev *hdev = req->hdev;
1692
1693 /* Set event mask page 2 if the HCI command for it is supported */
1694 if (hdev->commands[22] & 0x04)
1695 hci_set_event_mask_page_2(req);
1696
1697 /* Check for Synchronization Train support */
1698 if (lmp_sync_train_capable(hdev))
1699 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1700
1701 /* Enable Secure Connections if supported and configured */
1702 if ((lmp_sc_capable(hdev) ||
1703 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1704 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1705 u8 support = 0x01;
1706 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1707 sizeof(support), &support);
1708 }
1709}
1710
1711static int __hci_init(struct hci_dev *hdev)
1712{
1713 int err;
1714
1715 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1716 if (err < 0)
1717 return err;
1718
1719 /* The Device Under Test (DUT) mode is special and available for
1720 * all controller types. So just create it early on.
1721 */
1722 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1723 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1724 &dut_mode_fops);
1725 }
1726
1727 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1728 * BR/EDR/LE type controllers. AMP controllers only need the
1729 * first stage init.
1730 */
1731 if (hdev->dev_type != HCI_BREDR)
1732 return 0;
1733
1734 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1735 if (err < 0)
1736 return err;
1737
1738 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1739 if (err < 0)
1740 return err;
1741
1742 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1743 if (err < 0)
1744 return err;
1745
1746 /* Only create debugfs entries during the initial setup
1747 * phase and not every time the controller gets powered on.
1748 */
1749 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1750 return 0;
1751
1752 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1753 &features_fops);
1754 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1755 &hdev->manufacturer);
1756 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1757 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1758 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1759 &blacklist_fops);
1760 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1761
1762 if (lmp_bredr_capable(hdev)) {
1763 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1764 hdev, &inquiry_cache_fops);
1765 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1766 hdev, &link_keys_fops);
1767 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1768 hdev, &dev_class_fops);
1769 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1770 hdev, &voice_setting_fops);
1771 }
1772
1773 if (lmp_ssp_capable(hdev)) {
1774 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1775 hdev, &auto_accept_delay_fops);
1776 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1777 hdev, &ssp_debug_mode_fops);
1778 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1779 hdev, &force_sc_support_fops);
1780 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1781 hdev, &sc_only_mode_fops);
1782 }
1783
1784 if (lmp_sniff_capable(hdev)) {
1785 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1786 hdev, &idle_timeout_fops);
1787 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1788 hdev, &sniff_min_interval_fops);
1789 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1790 hdev, &sniff_max_interval_fops);
1791 }
1792
1793 if (lmp_le_capable(hdev)) {
1794 debugfs_create_file("identity", 0400, hdev->debugfs,
1795 hdev, &identity_fops);
1796 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1797 hdev, &rpa_timeout_fops);
1798 debugfs_create_file("random_address", 0444, hdev->debugfs,
1799 hdev, &random_address_fops);
1800 debugfs_create_file("static_address", 0444, hdev->debugfs,
1801 hdev, &static_address_fops);
1802
1803 /* For controllers with a public address, provide a debug
1804 * option to force the usage of the configured static
1805 * address. By default the public address is used.
1806 */
1807 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1808 debugfs_create_file("force_static_address", 0644,
1809 hdev->debugfs, hdev,
1810 &force_static_address_fops);
1811
1812 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1813 &hdev->le_white_list_size);
1814 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1815 &white_list_fops);
1816 debugfs_create_file("identity_resolving_keys", 0400,
1817 hdev->debugfs, hdev,
1818 &identity_resolving_keys_fops);
1819 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1820 hdev, &long_term_keys_fops);
1821 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1822 hdev, &conn_min_interval_fops);
1823 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1824 hdev, &conn_max_interval_fops);
1825 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1826 hdev, &adv_channel_map_fops);
1827 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1828 &lowpan_debugfs_fops);
1829 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1830 &le_auto_conn_fops);
1831 }
1832
1833 return 0;
1834}
1835
1836static void hci_scan_req(struct hci_request *req, unsigned long opt)
1837{
1838 __u8 scan = opt;
1839
1840 BT_DBG("%s %x", req->hdev->name, scan);
1841
1842 /* Inquiry and Page scans */
1843 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1844}
1845
1846static void hci_auth_req(struct hci_request *req, unsigned long opt)
1847{
1848 __u8 auth = opt;
1849
1850 BT_DBG("%s %x", req->hdev->name, auth);
1851
1852 /* Authentication */
1853 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1854}
1855
1856static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1857{
1858 __u8 encrypt = opt;
1859
1860 BT_DBG("%s %x", req->hdev->name, encrypt);
1861
1862 /* Encryption */
1863 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1864}
1865
1866static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1867{
1868 __le16 policy = cpu_to_le16(opt);
1869
1870 BT_DBG("%s %x", req->hdev->name, policy);
1871
1872 /* Default link policy */
1873 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1874}
1875
1876/* Get HCI device by index.
1877 * Device is held on return. */
1878struct hci_dev *hci_dev_get(int index)
1879{
1880 struct hci_dev *hdev = NULL, *d;
1881
1882 BT_DBG("%d", index);
1883
1884 if (index < 0)
1885 return NULL;
1886
1887 read_lock(&hci_dev_list_lock);
1888 list_for_each_entry(d, &hci_dev_list, list) {
1889 if (d->id == index) {
1890 hdev = hci_dev_hold(d);
1891 break;
1892 }
1893 }
1894 read_unlock(&hci_dev_list_lock);
1895 return hdev;
1896}
1897
1898/* ---- Inquiry support ---- */
1899
1900bool hci_discovery_active(struct hci_dev *hdev)
1901{
1902 struct discovery_state *discov = &hdev->discovery;
1903
1904 switch (discov->state) {
1905 case DISCOVERY_FINDING:
1906 case DISCOVERY_RESOLVING:
1907 return true;
1908
1909 default:
1910 return false;
1911 }
1912}
1913
1914void hci_discovery_set_state(struct hci_dev *hdev, int state)
1915{
1916 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1917
1918 if (hdev->discovery.state == state)
1919 return;
1920
1921 switch (state) {
1922 case DISCOVERY_STOPPED:
1923 hci_update_background_scan(hdev);
1924
1925 if (hdev->discovery.state != DISCOVERY_STARTING)
1926 mgmt_discovering(hdev, 0);
1927 break;
1928 case DISCOVERY_STARTING:
1929 break;
1930 case DISCOVERY_FINDING:
1931 mgmt_discovering(hdev, 1);
1932 break;
1933 case DISCOVERY_RESOLVING:
1934 break;
1935 case DISCOVERY_STOPPING:
1936 break;
1937 }
1938
1939 hdev->discovery.state = state;
1940}
1941
1942void hci_inquiry_cache_flush(struct hci_dev *hdev)
1943{
1944 struct discovery_state *cache = &hdev->discovery;
1945 struct inquiry_entry *p, *n;
1946
1947 list_for_each_entry_safe(p, n, &cache->all, all) {
1948 list_del(&p->all);
1949 kfree(p);
1950 }
1951
1952 INIT_LIST_HEAD(&cache->unknown);
1953 INIT_LIST_HEAD(&cache->resolve);
1954}
1955
1956struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1957 bdaddr_t *bdaddr)
1958{
1959 struct discovery_state *cache = &hdev->discovery;
1960 struct inquiry_entry *e;
1961
1962 BT_DBG("cache %p, %pMR", cache, bdaddr);
1963
1964 list_for_each_entry(e, &cache->all, all) {
1965 if (!bacmp(&e->data.bdaddr, bdaddr))
1966 return e;
1967 }
1968
1969 return NULL;
1970}
1971
1972struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1973 bdaddr_t *bdaddr)
1974{
1975 struct discovery_state *cache = &hdev->discovery;
1976 struct inquiry_entry *e;
1977
1978 BT_DBG("cache %p, %pMR", cache, bdaddr);
1979
1980 list_for_each_entry(e, &cache->unknown, list) {
1981 if (!bacmp(&e->data.bdaddr, bdaddr))
1982 return e;
1983 }
1984
1985 return NULL;
1986}
1987
1988struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1989 bdaddr_t *bdaddr,
1990 int state)
1991{
1992 struct discovery_state *cache = &hdev->discovery;
1993 struct inquiry_entry *e;
1994
1995 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1996
1997 list_for_each_entry(e, &cache->resolve, list) {
1998 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1999 return e;
2000 if (!bacmp(&e->data.bdaddr, bdaddr))
2001 return e;
2002 }
2003
2004 return NULL;
2005}
2006
2007void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2008 struct inquiry_entry *ie)
2009{
2010 struct discovery_state *cache = &hdev->discovery;
2011 struct list_head *pos = &cache->resolve;
2012 struct inquiry_entry *p;
2013
2014 list_del(&ie->list);
2015
2016 list_for_each_entry(p, &cache->resolve, list) {
2017 if (p->name_state != NAME_PENDING &&
2018 abs(p->data.rssi) >= abs(ie->data.rssi))
2019 break;
2020 pos = &p->list;
2021 }
2022
2023 list_add(&ie->list, pos);
2024}
2025
2026bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2027 bool name_known, bool *ssp)
2028{
2029 struct discovery_state *cache = &hdev->discovery;
2030 struct inquiry_entry *ie;
2031
2032 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2033
2034 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2035
2036 if (ssp)
2037 *ssp = data->ssp_mode;
2038
2039 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2040 if (ie) {
2041 if (ie->data.ssp_mode && ssp)
2042 *ssp = true;
2043
2044 if (ie->name_state == NAME_NEEDED &&
2045 data->rssi != ie->data.rssi) {
2046 ie->data.rssi = data->rssi;
2047 hci_inquiry_cache_update_resolve(hdev, ie);
2048 }
2049
2050 goto update;
2051 }
2052
2053 /* Entry not in the cache. Add new one. */
2054 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2055 if (!ie)
2056 return false;
2057
2058 list_add(&ie->all, &cache->all);
2059
2060 if (name_known) {
2061 ie->name_state = NAME_KNOWN;
2062 } else {
2063 ie->name_state = NAME_NOT_KNOWN;
2064 list_add(&ie->list, &cache->unknown);
2065 }
2066
2067update:
2068 if (name_known && ie->name_state != NAME_KNOWN &&
2069 ie->name_state != NAME_PENDING) {
2070 ie->name_state = NAME_KNOWN;
2071 list_del(&ie->list);
2072 }
2073
2074 memcpy(&ie->data, data, sizeof(*data));
2075 ie->timestamp = jiffies;
2076 cache->timestamp = jiffies;
2077
2078 if (ie->name_state == NAME_NOT_KNOWN)
2079 return false;
2080
2081 return true;
2082}
2083
2084static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2085{
2086 struct discovery_state *cache = &hdev->discovery;
2087 struct inquiry_info *info = (struct inquiry_info *) buf;
2088 struct inquiry_entry *e;
2089 int copied = 0;
2090
2091 list_for_each_entry(e, &cache->all, all) {
2092 struct inquiry_data *data = &e->data;
2093
2094 if (copied >= num)
2095 break;
2096
2097 bacpy(&info->bdaddr, &data->bdaddr);
2098 info->pscan_rep_mode = data->pscan_rep_mode;
2099 info->pscan_period_mode = data->pscan_period_mode;
2100 info->pscan_mode = data->pscan_mode;
2101 memcpy(info->dev_class, data->dev_class, 3);
2102 info->clock_offset = data->clock_offset;
2103
2104 info++;
2105 copied++;
2106 }
2107
2108 BT_DBG("cache %p, copied %d", cache, copied);
2109 return copied;
2110}
2111
2112static void hci_inq_req(struct hci_request *req, unsigned long opt)
2113{
2114 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2115 struct hci_dev *hdev = req->hdev;
2116 struct hci_cp_inquiry cp;
2117
2118 BT_DBG("%s", hdev->name);
2119
2120 if (test_bit(HCI_INQUIRY, &hdev->flags))
2121 return;
2122
2123 /* Start Inquiry */
2124 memcpy(&cp.lap, &ir->lap, 3);
2125 cp.length = ir->length;
2126 cp.num_rsp = ir->num_rsp;
2127 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2128}
2129
2130static int wait_inquiry(void *word)
2131{
2132 schedule();
2133 return signal_pending(current);
2134}
2135
2136int hci_inquiry(void __user *arg)
2137{
2138 __u8 __user *ptr = arg;
2139 struct hci_inquiry_req ir;
2140 struct hci_dev *hdev;
2141 int err = 0, do_inquiry = 0, max_rsp;
2142 long timeo;
2143 __u8 *buf;
2144
2145 if (copy_from_user(&ir, ptr, sizeof(ir)))
2146 return -EFAULT;
2147
2148 hdev = hci_dev_get(ir.dev_id);
2149 if (!hdev)
2150 return -ENODEV;
2151
2152 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2153 err = -EBUSY;
2154 goto done;
2155 }
2156
2157 if (hdev->dev_type != HCI_BREDR) {
2158 err = -EOPNOTSUPP;
2159 goto done;
2160 }
2161
2162 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2163 err = -EOPNOTSUPP;
2164 goto done;
2165 }
2166
2167 hci_dev_lock(hdev);
2168 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2169 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2170 hci_inquiry_cache_flush(hdev);
2171 do_inquiry = 1;
2172 }
2173 hci_dev_unlock(hdev);
2174
2175 timeo = ir.length * msecs_to_jiffies(2000);
2176
2177 if (do_inquiry) {
2178 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2179 timeo);
2180 if (err < 0)
2181 goto done;
2182
2183 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2184 * cleared). If it is interrupted by a signal, return -EINTR.
2185 */
2186 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2187 TASK_INTERRUPTIBLE))
2188 return -EINTR;
2189 }
2190
2191 /* for unlimited number of responses we will use buffer with
2192 * 255 entries
2193 */
2194 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2195
2196 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2197 * copy it to the user space.
2198 */
2199 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2200 if (!buf) {
2201 err = -ENOMEM;
2202 goto done;
2203 }
2204
2205 hci_dev_lock(hdev);
2206 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2207 hci_dev_unlock(hdev);
2208
2209 BT_DBG("num_rsp %d", ir.num_rsp);
2210
2211 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2212 ptr += sizeof(ir);
2213 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2214 ir.num_rsp))
2215 err = -EFAULT;
2216 } else
2217 err = -EFAULT;
2218
2219 kfree(buf);
2220
2221done:
2222 hci_dev_put(hdev);
2223 return err;
2224}
2225
2226static int hci_dev_do_open(struct hci_dev *hdev)
2227{
2228 int ret = 0;
2229
2230 BT_DBG("%s %p", hdev->name, hdev);
2231
2232 hci_req_lock(hdev);
2233
2234 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2235 ret = -ENODEV;
2236 goto done;
2237 }
2238
2239 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2240 /* Check for rfkill but allow the HCI setup stage to
2241 * proceed (which in itself doesn't cause any RF activity).
2242 */
2243 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2244 ret = -ERFKILL;
2245 goto done;
2246 }
2247
2248 /* Check for valid public address or a configured static
2249 * random adddress, but let the HCI setup proceed to
2250 * be able to determine if there is a public address
2251 * or not.
2252 *
2253 * In case of user channel usage, it is not important
2254 * if a public address or static random address is
2255 * available.
2256 *
2257 * This check is only valid for BR/EDR controllers
2258 * since AMP controllers do not have an address.
2259 */
2260 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2261 hdev->dev_type == HCI_BREDR &&
2262 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2263 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2264 ret = -EADDRNOTAVAIL;
2265 goto done;
2266 }
2267 }
2268
2269 if (test_bit(HCI_UP, &hdev->flags)) {
2270 ret = -EALREADY;
2271 goto done;
2272 }
2273
2274 if (hdev->open(hdev)) {
2275 ret = -EIO;
2276 goto done;
2277 }
2278
2279 atomic_set(&hdev->cmd_cnt, 1);
2280 set_bit(HCI_INIT, &hdev->flags);
2281
2282 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2283 ret = hdev->setup(hdev);
2284
2285 if (!ret) {
2286 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2287 set_bit(HCI_RAW, &hdev->flags);
2288
2289 if (!test_bit(HCI_RAW, &hdev->flags) &&
2290 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2291 ret = __hci_init(hdev);
2292 }
2293
2294 clear_bit(HCI_INIT, &hdev->flags);
2295
2296 if (!ret) {
2297 hci_dev_hold(hdev);
2298 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2299 set_bit(HCI_UP, &hdev->flags);
2300 hci_notify(hdev, HCI_DEV_UP);
2301 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2302 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2303 hdev->dev_type == HCI_BREDR) {
2304 hci_dev_lock(hdev);
2305 mgmt_powered(hdev, 1);
2306 hci_dev_unlock(hdev);
2307 }
2308 } else {
2309 /* Init failed, cleanup */
2310 flush_work(&hdev->tx_work);
2311 flush_work(&hdev->cmd_work);
2312 flush_work(&hdev->rx_work);
2313
2314 skb_queue_purge(&hdev->cmd_q);
2315 skb_queue_purge(&hdev->rx_q);
2316
2317 if (hdev->flush)
2318 hdev->flush(hdev);
2319
2320 if (hdev->sent_cmd) {
2321 kfree_skb(hdev->sent_cmd);
2322 hdev->sent_cmd = NULL;
2323 }
2324
2325 hdev->close(hdev);
2326 hdev->flags = 0;
2327 }
2328
2329done:
2330 hci_req_unlock(hdev);
2331 return ret;
2332}
2333
2334/* ---- HCI ioctl helpers ---- */
2335
2336int hci_dev_open(__u16 dev)
2337{
2338 struct hci_dev *hdev;
2339 int err;
2340
2341 hdev = hci_dev_get(dev);
2342 if (!hdev)
2343 return -ENODEV;
2344
2345 /* We need to ensure that no other power on/off work is pending
2346 * before proceeding to call hci_dev_do_open. This is
2347 * particularly important if the setup procedure has not yet
2348 * completed.
2349 */
2350 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2351 cancel_delayed_work(&hdev->power_off);
2352
2353 /* After this call it is guaranteed that the setup procedure
2354 * has finished. This means that error conditions like RFKILL
2355 * or no valid public or static random address apply.
2356 */
2357 flush_workqueue(hdev->req_workqueue);
2358
2359 err = hci_dev_do_open(hdev);
2360
2361 hci_dev_put(hdev);
2362
2363 return err;
2364}
2365
2366static int hci_dev_do_close(struct hci_dev *hdev)
2367{
2368 BT_DBG("%s %p", hdev->name, hdev);
2369
2370 cancel_delayed_work(&hdev->power_off);
2371
2372 hci_req_cancel(hdev, ENODEV);
2373 hci_req_lock(hdev);
2374
2375 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2376 del_timer_sync(&hdev->cmd_timer);
2377 hci_req_unlock(hdev);
2378 return 0;
2379 }
2380
2381 /* Flush RX and TX works */
2382 flush_work(&hdev->tx_work);
2383 flush_work(&hdev->rx_work);
2384
2385 if (hdev->discov_timeout > 0) {
2386 cancel_delayed_work(&hdev->discov_off);
2387 hdev->discov_timeout = 0;
2388 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2389 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2390 }
2391
2392 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2393 cancel_delayed_work(&hdev->service_cache);
2394
2395 cancel_delayed_work_sync(&hdev->le_scan_disable);
2396
2397 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2398 cancel_delayed_work_sync(&hdev->rpa_expired);
2399
2400 hci_dev_lock(hdev);
2401 hci_inquiry_cache_flush(hdev);
2402 hci_conn_hash_flush(hdev);
2403 hci_pend_le_conns_clear(hdev);
2404 hci_dev_unlock(hdev);
2405
2406 hci_notify(hdev, HCI_DEV_DOWN);
2407
2408 if (hdev->flush)
2409 hdev->flush(hdev);
2410
2411 /* Reset device */
2412 skb_queue_purge(&hdev->cmd_q);
2413 atomic_set(&hdev->cmd_cnt, 1);
2414 if (!test_bit(HCI_RAW, &hdev->flags) &&
2415 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2416 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2417 set_bit(HCI_INIT, &hdev->flags);
2418 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2419 clear_bit(HCI_INIT, &hdev->flags);
2420 }
2421
2422 /* flush cmd work */
2423 flush_work(&hdev->cmd_work);
2424
2425 /* Drop queues */
2426 skb_queue_purge(&hdev->rx_q);
2427 skb_queue_purge(&hdev->cmd_q);
2428 skb_queue_purge(&hdev->raw_q);
2429
2430 /* Drop last sent command */
2431 if (hdev->sent_cmd) {
2432 del_timer_sync(&hdev->cmd_timer);
2433 kfree_skb(hdev->sent_cmd);
2434 hdev->sent_cmd = NULL;
2435 }
2436
2437 kfree_skb(hdev->recv_evt);
2438 hdev->recv_evt = NULL;
2439
2440 /* After this point our queues are empty
2441 * and no tasks are scheduled. */
2442 hdev->close(hdev);
2443
2444 /* Clear flags */
2445 hdev->flags = 0;
2446 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2447
2448 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2449 if (hdev->dev_type == HCI_BREDR) {
2450 hci_dev_lock(hdev);
2451 mgmt_powered(hdev, 0);
2452 hci_dev_unlock(hdev);
2453 }
2454 }
2455
2456 /* Controller radio is available but is currently powered down */
2457 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2458
2459 memset(hdev->eir, 0, sizeof(hdev->eir));
2460 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2461 bacpy(&hdev->random_addr, BDADDR_ANY);
2462
2463 hci_req_unlock(hdev);
2464
2465 hci_dev_put(hdev);
2466 return 0;
2467}
2468
2469int hci_dev_close(__u16 dev)
2470{
2471 struct hci_dev *hdev;
2472 int err;
2473
2474 hdev = hci_dev_get(dev);
2475 if (!hdev)
2476 return -ENODEV;
2477
2478 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2479 err = -EBUSY;
2480 goto done;
2481 }
2482
2483 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2484 cancel_delayed_work(&hdev->power_off);
2485
2486 err = hci_dev_do_close(hdev);
2487
2488done:
2489 hci_dev_put(hdev);
2490 return err;
2491}
2492
2493int hci_dev_reset(__u16 dev)
2494{
2495 struct hci_dev *hdev;
2496 int ret = 0;
2497
2498 hdev = hci_dev_get(dev);
2499 if (!hdev)
2500 return -ENODEV;
2501
2502 hci_req_lock(hdev);
2503
2504 if (!test_bit(HCI_UP, &hdev->flags)) {
2505 ret = -ENETDOWN;
2506 goto done;
2507 }
2508
2509 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2510 ret = -EBUSY;
2511 goto done;
2512 }
2513
2514 /* Drop queues */
2515 skb_queue_purge(&hdev->rx_q);
2516 skb_queue_purge(&hdev->cmd_q);
2517
2518 hci_dev_lock(hdev);
2519 hci_inquiry_cache_flush(hdev);
2520 hci_conn_hash_flush(hdev);
2521 hci_dev_unlock(hdev);
2522
2523 if (hdev->flush)
2524 hdev->flush(hdev);
2525
2526 atomic_set(&hdev->cmd_cnt, 1);
2527 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2528
2529 if (!test_bit(HCI_RAW, &hdev->flags))
2530 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2531
2532done:
2533 hci_req_unlock(hdev);
2534 hci_dev_put(hdev);
2535 return ret;
2536}
2537
2538int hci_dev_reset_stat(__u16 dev)
2539{
2540 struct hci_dev *hdev;
2541 int ret = 0;
2542
2543 hdev = hci_dev_get(dev);
2544 if (!hdev)
2545 return -ENODEV;
2546
2547 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2548 ret = -EBUSY;
2549 goto done;
2550 }
2551
2552 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2553
2554done:
2555 hci_dev_put(hdev);
2556 return ret;
2557}
2558
2559int hci_dev_cmd(unsigned int cmd, void __user *arg)
2560{
2561 struct hci_dev *hdev;
2562 struct hci_dev_req dr;
2563 int err = 0;
2564
2565 if (copy_from_user(&dr, arg, sizeof(dr)))
2566 return -EFAULT;
2567
2568 hdev = hci_dev_get(dr.dev_id);
2569 if (!hdev)
2570 return -ENODEV;
2571
2572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 err = -EBUSY;
2574 goto done;
2575 }
2576
2577 if (hdev->dev_type != HCI_BREDR) {
2578 err = -EOPNOTSUPP;
2579 goto done;
2580 }
2581
2582 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2583 err = -EOPNOTSUPP;
2584 goto done;
2585 }
2586
2587 switch (cmd) {
2588 case HCISETAUTH:
2589 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2590 HCI_INIT_TIMEOUT);
2591 break;
2592
2593 case HCISETENCRYPT:
2594 if (!lmp_encrypt_capable(hdev)) {
2595 err = -EOPNOTSUPP;
2596 break;
2597 }
2598
2599 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2600 /* Auth must be enabled first */
2601 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2602 HCI_INIT_TIMEOUT);
2603 if (err)
2604 break;
2605 }
2606
2607 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2608 HCI_INIT_TIMEOUT);
2609 break;
2610
2611 case HCISETSCAN:
2612 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2613 HCI_INIT_TIMEOUT);
2614 break;
2615
2616 case HCISETLINKPOL:
2617 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2618 HCI_INIT_TIMEOUT);
2619 break;
2620
2621 case HCISETLINKMODE:
2622 hdev->link_mode = ((__u16) dr.dev_opt) &
2623 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2624 break;
2625
2626 case HCISETPTYPE:
2627 hdev->pkt_type = (__u16) dr.dev_opt;
2628 break;
2629
2630 case HCISETACLMTU:
2631 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2632 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2633 break;
2634
2635 case HCISETSCOMTU:
2636 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2637 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2638 break;
2639
2640 default:
2641 err = -EINVAL;
2642 break;
2643 }
2644
2645done:
2646 hci_dev_put(hdev);
2647 return err;
2648}
2649
2650int hci_get_dev_list(void __user *arg)
2651{
2652 struct hci_dev *hdev;
2653 struct hci_dev_list_req *dl;
2654 struct hci_dev_req *dr;
2655 int n = 0, size, err;
2656 __u16 dev_num;
2657
2658 if (get_user(dev_num, (__u16 __user *) arg))
2659 return -EFAULT;
2660
2661 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2662 return -EINVAL;
2663
2664 size = sizeof(*dl) + dev_num * sizeof(*dr);
2665
2666 dl = kzalloc(size, GFP_KERNEL);
2667 if (!dl)
2668 return -ENOMEM;
2669
2670 dr = dl->dev_req;
2671
2672 read_lock(&hci_dev_list_lock);
2673 list_for_each_entry(hdev, &hci_dev_list, list) {
2674 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2675 cancel_delayed_work(&hdev->power_off);
2676
2677 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2678 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2679
2680 (dr + n)->dev_id = hdev->id;
2681 (dr + n)->dev_opt = hdev->flags;
2682
2683 if (++n >= dev_num)
2684 break;
2685 }
2686 read_unlock(&hci_dev_list_lock);
2687
2688 dl->dev_num = n;
2689 size = sizeof(*dl) + n * sizeof(*dr);
2690
2691 err = copy_to_user(arg, dl, size);
2692 kfree(dl);
2693
2694 return err ? -EFAULT : 0;
2695}
2696
2697int hci_get_dev_info(void __user *arg)
2698{
2699 struct hci_dev *hdev;
2700 struct hci_dev_info di;
2701 int err = 0;
2702
2703 if (copy_from_user(&di, arg, sizeof(di)))
2704 return -EFAULT;
2705
2706 hdev = hci_dev_get(di.dev_id);
2707 if (!hdev)
2708 return -ENODEV;
2709
2710 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2711 cancel_delayed_work_sync(&hdev->power_off);
2712
2713 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2714 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2715
2716 strcpy(di.name, hdev->name);
2717 di.bdaddr = hdev->bdaddr;
2718 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2719 di.flags = hdev->flags;
2720 di.pkt_type = hdev->pkt_type;
2721 if (lmp_bredr_capable(hdev)) {
2722 di.acl_mtu = hdev->acl_mtu;
2723 di.acl_pkts = hdev->acl_pkts;
2724 di.sco_mtu = hdev->sco_mtu;
2725 di.sco_pkts = hdev->sco_pkts;
2726 } else {
2727 di.acl_mtu = hdev->le_mtu;
2728 di.acl_pkts = hdev->le_pkts;
2729 di.sco_mtu = 0;
2730 di.sco_pkts = 0;
2731 }
2732 di.link_policy = hdev->link_policy;
2733 di.link_mode = hdev->link_mode;
2734
2735 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2736 memcpy(&di.features, &hdev->features, sizeof(di.features));
2737
2738 if (copy_to_user(arg, &di, sizeof(di)))
2739 err = -EFAULT;
2740
2741 hci_dev_put(hdev);
2742
2743 return err;
2744}
2745
2746/* ---- Interface to HCI drivers ---- */
2747
2748static int hci_rfkill_set_block(void *data, bool blocked)
2749{
2750 struct hci_dev *hdev = data;
2751
2752 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2753
2754 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2755 return -EBUSY;
2756
2757 if (blocked) {
2758 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2759 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2760 hci_dev_do_close(hdev);
2761 } else {
2762 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2763 }
2764
2765 return 0;
2766}
2767
2768static const struct rfkill_ops hci_rfkill_ops = {
2769 .set_block = hci_rfkill_set_block,
2770};
2771
2772static void hci_power_on(struct work_struct *work)
2773{
2774 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2775 int err;
2776
2777 BT_DBG("%s", hdev->name);
2778
2779 err = hci_dev_do_open(hdev);
2780 if (err < 0) {
2781 mgmt_set_powered_failed(hdev, err);
2782 return;
2783 }
2784
2785 /* During the HCI setup phase, a few error conditions are
2786 * ignored and they need to be checked now. If they are still
2787 * valid, it is important to turn the device back off.
2788 */
2789 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2790 (hdev->dev_type == HCI_BREDR &&
2791 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2792 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2793 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2794 hci_dev_do_close(hdev);
2795 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2796 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2797 HCI_AUTO_OFF_TIMEOUT);
2798 }
2799
2800 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2801 mgmt_index_added(hdev);
2802}
2803
2804static void hci_power_off(struct work_struct *work)
2805{
2806 struct hci_dev *hdev = container_of(work, struct hci_dev,
2807 power_off.work);
2808
2809 BT_DBG("%s", hdev->name);
2810
2811 hci_dev_do_close(hdev);
2812}
2813
2814static void hci_discov_off(struct work_struct *work)
2815{
2816 struct hci_dev *hdev;
2817
2818 hdev = container_of(work, struct hci_dev, discov_off.work);
2819
2820 BT_DBG("%s", hdev->name);
2821
2822 mgmt_discoverable_timeout(hdev);
2823}
2824
2825void hci_uuids_clear(struct hci_dev *hdev)
2826{
2827 struct bt_uuid *uuid, *tmp;
2828
2829 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2830 list_del(&uuid->list);
2831 kfree(uuid);
2832 }
2833}
2834
2835void hci_link_keys_clear(struct hci_dev *hdev)
2836{
2837 struct list_head *p, *n;
2838
2839 list_for_each_safe(p, n, &hdev->link_keys) {
2840 struct link_key *key;
2841
2842 key = list_entry(p, struct link_key, list);
2843
2844 list_del(p);
2845 kfree(key);
2846 }
2847}
2848
2849void hci_smp_ltks_clear(struct hci_dev *hdev)
2850{
2851 struct smp_ltk *k, *tmp;
2852
2853 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2854 list_del(&k->list);
2855 kfree(k);
2856 }
2857}
2858
2859void hci_smp_irks_clear(struct hci_dev *hdev)
2860{
2861 struct smp_irk *k, *tmp;
2862
2863 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2864 list_del(&k->list);
2865 kfree(k);
2866 }
2867}
2868
2869struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2870{
2871 struct link_key *k;
2872
2873 list_for_each_entry(k, &hdev->link_keys, list)
2874 if (bacmp(bdaddr, &k->bdaddr) == 0)
2875 return k;
2876
2877 return NULL;
2878}
2879
2880static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2881 u8 key_type, u8 old_key_type)
2882{
2883 /* Legacy key */
2884 if (key_type < 0x03)
2885 return true;
2886
2887 /* Debug keys are insecure so don't store them persistently */
2888 if (key_type == HCI_LK_DEBUG_COMBINATION)
2889 return false;
2890
2891 /* Changed combination key and there's no previous one */
2892 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2893 return false;
2894
2895 /* Security mode 3 case */
2896 if (!conn)
2897 return true;
2898
2899 /* Neither local nor remote side had no-bonding as requirement */
2900 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2901 return true;
2902
2903 /* Local side had dedicated bonding as requirement */
2904 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2905 return true;
2906
2907 /* Remote side had dedicated bonding as requirement */
2908 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2909 return true;
2910
2911 /* If none of the above criteria match, then don't store the key
2912 * persistently */
2913 return false;
2914}
2915
2916static bool ltk_type_master(u8 type)
2917{
2918 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2919 return true;
2920
2921 return false;
2922}
2923
2924struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2925 bool master)
2926{
2927 struct smp_ltk *k;
2928
2929 list_for_each_entry(k, &hdev->long_term_keys, list) {
2930 if (k->ediv != ediv || k->rand != rand)
2931 continue;
2932
2933 if (ltk_type_master(k->type) != master)
2934 continue;
2935
2936 return k;
2937 }
2938
2939 return NULL;
2940}
2941
2942struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2943 u8 addr_type, bool master)
2944{
2945 struct smp_ltk *k;
2946
2947 list_for_each_entry(k, &hdev->long_term_keys, list)
2948 if (addr_type == k->bdaddr_type &&
2949 bacmp(bdaddr, &k->bdaddr) == 0 &&
2950 ltk_type_master(k->type) == master)
2951 return k;
2952
2953 return NULL;
2954}
2955
2956struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2957{
2958 struct smp_irk *irk;
2959
2960 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2961 if (!bacmp(&irk->rpa, rpa))
2962 return irk;
2963 }
2964
2965 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2966 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2967 bacpy(&irk->rpa, rpa);
2968 return irk;
2969 }
2970 }
2971
2972 return NULL;
2973}
2974
2975struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2976 u8 addr_type)
2977{
2978 struct smp_irk *irk;
2979
2980 /* Identity Address must be public or static random */
2981 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2982 return NULL;
2983
2984 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2985 if (addr_type == irk->addr_type &&
2986 bacmp(bdaddr, &irk->bdaddr) == 0)
2987 return irk;
2988 }
2989
2990 return NULL;
2991}
2992
2993int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2994 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2995{
2996 struct link_key *key, *old_key;
2997 u8 old_key_type;
2998 bool persistent;
2999
3000 old_key = hci_find_link_key(hdev, bdaddr);
3001 if (old_key) {
3002 old_key_type = old_key->type;
3003 key = old_key;
3004 } else {
3005 old_key_type = conn ? conn->key_type : 0xff;
3006 key = kzalloc(sizeof(*key), GFP_KERNEL);
3007 if (!key)
3008 return -ENOMEM;
3009 list_add(&key->list, &hdev->link_keys);
3010 }
3011
3012 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3013
3014 /* Some buggy controller combinations generate a changed
3015 * combination key for legacy pairing even when there's no
3016 * previous key */
3017 if (type == HCI_LK_CHANGED_COMBINATION &&
3018 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3019 type = HCI_LK_COMBINATION;
3020 if (conn)
3021 conn->key_type = type;
3022 }
3023
3024 bacpy(&key->bdaddr, bdaddr);
3025 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3026 key->pin_len = pin_len;
3027
3028 if (type == HCI_LK_CHANGED_COMBINATION)
3029 key->type = old_key_type;
3030 else
3031 key->type = type;
3032
3033 if (!new_key)
3034 return 0;
3035
3036 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3037
3038 mgmt_new_link_key(hdev, key, persistent);
3039
3040 if (conn)
3041 conn->flush_key = !persistent;
3042
3043 return 0;
3044}
3045
3046struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3047 u8 addr_type, u8 type, u8 authenticated,
3048 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3049{
3050 struct smp_ltk *key, *old_key;
3051 bool master = ltk_type_master(type);
3052
3053 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3054 if (old_key)
3055 key = old_key;
3056 else {
3057 key = kzalloc(sizeof(*key), GFP_KERNEL);
3058 if (!key)
3059 return NULL;
3060 list_add(&key->list, &hdev->long_term_keys);
3061 }
3062
3063 bacpy(&key->bdaddr, bdaddr);
3064 key->bdaddr_type = addr_type;
3065 memcpy(key->val, tk, sizeof(key->val));
3066 key->authenticated = authenticated;
3067 key->ediv = ediv;
3068 key->rand = rand;
3069 key->enc_size = enc_size;
3070 key->type = type;
3071
3072 return key;
3073}
3074
3075struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3076 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3077{
3078 struct smp_irk *irk;
3079
3080 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3081 if (!irk) {
3082 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3083 if (!irk)
3084 return NULL;
3085
3086 bacpy(&irk->bdaddr, bdaddr);
3087 irk->addr_type = addr_type;
3088
3089 list_add(&irk->list, &hdev->identity_resolving_keys);
3090 }
3091
3092 memcpy(irk->val, val, 16);
3093 bacpy(&irk->rpa, rpa);
3094
3095 return irk;
3096}
3097
3098int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3099{
3100 struct link_key *key;
3101
3102 key = hci_find_link_key(hdev, bdaddr);
3103 if (!key)
3104 return -ENOENT;
3105
3106 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3107
3108 list_del(&key->list);
3109 kfree(key);
3110
3111 return 0;
3112}
3113
3114int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3115{
3116 struct smp_ltk *k, *tmp;
3117 int removed = 0;
3118
3119 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3120 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3121 continue;
3122
3123 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3124
3125 list_del(&k->list);
3126 kfree(k);
3127 removed++;
3128 }
3129
3130 return removed ? 0 : -ENOENT;
3131}
3132
3133void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3134{
3135 struct smp_irk *k, *tmp;
3136
3137 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3138 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3139 continue;
3140
3141 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3142
3143 list_del(&k->list);
3144 kfree(k);
3145 }
3146}
3147
3148/* HCI command timer function */
3149static void hci_cmd_timeout(unsigned long arg)
3150{
3151 struct hci_dev *hdev = (void *) arg;
3152
3153 if (hdev->sent_cmd) {
3154 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3155 u16 opcode = __le16_to_cpu(sent->opcode);
3156
3157 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3158 } else {
3159 BT_ERR("%s command tx timeout", hdev->name);
3160 }
3161
3162 atomic_set(&hdev->cmd_cnt, 1);
3163 queue_work(hdev->workqueue, &hdev->cmd_work);
3164}
3165
3166struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3167 bdaddr_t *bdaddr)
3168{
3169 struct oob_data *data;
3170
3171 list_for_each_entry(data, &hdev->remote_oob_data, list)
3172 if (bacmp(bdaddr, &data->bdaddr) == 0)
3173 return data;
3174
3175 return NULL;
3176}
3177
3178int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3179{
3180 struct oob_data *data;
3181
3182 data = hci_find_remote_oob_data(hdev, bdaddr);
3183 if (!data)
3184 return -ENOENT;
3185
3186 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3187
3188 list_del(&data->list);
3189 kfree(data);
3190
3191 return 0;
3192}
3193
3194void hci_remote_oob_data_clear(struct hci_dev *hdev)
3195{
3196 struct oob_data *data, *n;
3197
3198 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3199 list_del(&data->list);
3200 kfree(data);
3201 }
3202}
3203
3204int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3205 u8 *hash, u8 *randomizer)
3206{
3207 struct oob_data *data;
3208
3209 data = hci_find_remote_oob_data(hdev, bdaddr);
3210 if (!data) {
3211 data = kmalloc(sizeof(*data), GFP_KERNEL);
3212 if (!data)
3213 return -ENOMEM;
3214
3215 bacpy(&data->bdaddr, bdaddr);
3216 list_add(&data->list, &hdev->remote_oob_data);
3217 }
3218
3219 memcpy(data->hash192, hash, sizeof(data->hash192));
3220 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3221
3222 memset(data->hash256, 0, sizeof(data->hash256));
3223 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3224
3225 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3226
3227 return 0;
3228}
3229
3230int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3231 u8 *hash192, u8 *randomizer192,
3232 u8 *hash256, u8 *randomizer256)
3233{
3234 struct oob_data *data;
3235
3236 data = hci_find_remote_oob_data(hdev, bdaddr);
3237 if (!data) {
3238 data = kmalloc(sizeof(*data), GFP_KERNEL);
3239 if (!data)
3240 return -ENOMEM;
3241
3242 bacpy(&data->bdaddr, bdaddr);
3243 list_add(&data->list, &hdev->remote_oob_data);
3244 }
3245
3246 memcpy(data->hash192, hash192, sizeof(data->hash192));
3247 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3248
3249 memcpy(data->hash256, hash256, sizeof(data->hash256));
3250 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3251
3252 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3253
3254 return 0;
3255}
3256
3257struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3258 bdaddr_t *bdaddr, u8 type)
3259{
3260 struct bdaddr_list *b;
3261
3262 list_for_each_entry(b, &hdev->blacklist, list) {
3263 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3264 return b;
3265 }
3266
3267 return NULL;
3268}
3269
3270static void hci_blacklist_clear(struct hci_dev *hdev)
3271{
3272 struct list_head *p, *n;
3273
3274 list_for_each_safe(p, n, &hdev->blacklist) {
3275 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3276
3277 list_del(p);
3278 kfree(b);
3279 }
3280}
3281
3282int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3283{
3284 struct bdaddr_list *entry;
3285
3286 if (!bacmp(bdaddr, BDADDR_ANY))
3287 return -EBADF;
3288
3289 if (hci_blacklist_lookup(hdev, bdaddr, type))
3290 return -EEXIST;
3291
3292 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3293 if (!entry)
3294 return -ENOMEM;
3295
3296 bacpy(&entry->bdaddr, bdaddr);
3297 entry->bdaddr_type = type;
3298
3299 list_add(&entry->list, &hdev->blacklist);
3300
3301 return mgmt_device_blocked(hdev, bdaddr, type);
3302}
3303
3304int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3305{
3306 struct bdaddr_list *entry;
3307
3308 if (!bacmp(bdaddr, BDADDR_ANY)) {
3309 hci_blacklist_clear(hdev);
3310 return 0;
3311 }
3312
3313 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3314 if (!entry)
3315 return -ENOENT;
3316
3317 list_del(&entry->list);
3318 kfree(entry);
3319
3320 return mgmt_device_unblocked(hdev, bdaddr, type);
3321}
3322
3323struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3324 bdaddr_t *bdaddr, u8 type)
3325{
3326 struct bdaddr_list *b;
3327
3328 list_for_each_entry(b, &hdev->le_white_list, list) {
3329 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3330 return b;
3331 }
3332
3333 return NULL;
3334}
3335
3336void hci_white_list_clear(struct hci_dev *hdev)
3337{
3338 struct list_head *p, *n;
3339
3340 list_for_each_safe(p, n, &hdev->le_white_list) {
3341 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3342
3343 list_del(p);
3344 kfree(b);
3345 }
3346}
3347
3348int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3349{
3350 struct bdaddr_list *entry;
3351
3352 if (!bacmp(bdaddr, BDADDR_ANY))
3353 return -EBADF;
3354
3355 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3356 if (!entry)
3357 return -ENOMEM;
3358
3359 bacpy(&entry->bdaddr, bdaddr);
3360 entry->bdaddr_type = type;
3361
3362 list_add(&entry->list, &hdev->le_white_list);
3363
3364 return 0;
3365}
3366
3367int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3368{
3369 struct bdaddr_list *entry;
3370
3371 if (!bacmp(bdaddr, BDADDR_ANY))
3372 return -EBADF;
3373
3374 entry = hci_white_list_lookup(hdev, bdaddr, type);
3375 if (!entry)
3376 return -ENOENT;
3377
3378 list_del(&entry->list);
3379 kfree(entry);
3380
3381 return 0;
3382}
3383
3384/* This function requires the caller holds hdev->lock */
3385struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3386 bdaddr_t *addr, u8 addr_type)
3387{
3388 struct hci_conn_params *params;
3389
3390 list_for_each_entry(params, &hdev->le_conn_params, list) {
3391 if (bacmp(¶ms->addr, addr) == 0 &&
3392 params->addr_type == addr_type) {
3393 return params;
3394 }
3395 }
3396
3397 return NULL;
3398}
3399
3400static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3401{
3402 struct hci_conn *conn;
3403
3404 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3405 if (!conn)
3406 return false;
3407
3408 if (conn->dst_type != type)
3409 return false;
3410
3411 if (conn->state != BT_CONNECTED)
3412 return false;
3413
3414 return true;
3415}
3416
3417static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3418{
3419 if (addr_type == ADDR_LE_DEV_PUBLIC)
3420 return true;
3421
3422 /* Check for Random Static address type */
3423 if ((addr->b[5] & 0xc0) == 0xc0)
3424 return true;
3425
3426 return false;
3427}
3428
3429/* This function requires the caller holds hdev->lock */
3430int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3431 u8 auto_connect, u16 conn_min_interval,
3432 u16 conn_max_interval)
3433{
3434 struct hci_conn_params *params;
3435
3436 if (!is_identity_address(addr, addr_type))
3437 return -EINVAL;
3438
3439 params = hci_conn_params_lookup(hdev, addr, addr_type);
3440 if (params)
3441 goto update;
3442
3443 params = kzalloc(sizeof(*params), GFP_KERNEL);
3444 if (!params) {
3445 BT_ERR("Out of memory");
3446 return -ENOMEM;
3447 }
3448
3449 bacpy(¶ms->addr, addr);
3450 params->addr_type = addr_type;
3451
3452 list_add(¶ms->list, &hdev->le_conn_params);
3453
3454update:
3455 params->conn_min_interval = conn_min_interval;
3456 params->conn_max_interval = conn_max_interval;
3457 params->auto_connect = auto_connect;
3458
3459 switch (auto_connect) {
3460 case HCI_AUTO_CONN_DISABLED:
3461 case HCI_AUTO_CONN_LINK_LOSS:
3462 hci_pend_le_conn_del(hdev, addr, addr_type);
3463 break;
3464 case HCI_AUTO_CONN_ALWAYS:
3465 if (!is_connected(hdev, addr, addr_type))
3466 hci_pend_le_conn_add(hdev, addr, addr_type);
3467 break;
3468 }
3469
3470 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3471 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3472 conn_min_interval, conn_max_interval);
3473
3474 return 0;
3475}
3476
3477/* This function requires the caller holds hdev->lock */
3478void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3479{
3480 struct hci_conn_params *params;
3481
3482 params = hci_conn_params_lookup(hdev, addr, addr_type);
3483 if (!params)
3484 return;
3485
3486 hci_pend_le_conn_del(hdev, addr, addr_type);
3487
3488 list_del(¶ms->list);
3489 kfree(params);
3490
3491 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3492}
3493
3494/* This function requires the caller holds hdev->lock */
3495void hci_conn_params_clear(struct hci_dev *hdev)
3496{
3497 struct hci_conn_params *params, *tmp;
3498
3499 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3500 list_del(¶ms->list);
3501 kfree(params);
3502 }
3503
3504 BT_DBG("All LE connection parameters were removed");
3505}
3506
3507/* This function requires the caller holds hdev->lock */
3508struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3509 bdaddr_t *addr, u8 addr_type)
3510{
3511 struct bdaddr_list *entry;
3512
3513 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3514 if (bacmp(&entry->bdaddr, addr) == 0 &&
3515 entry->bdaddr_type == addr_type)
3516 return entry;
3517 }
3518
3519 return NULL;
3520}
3521
3522/* This function requires the caller holds hdev->lock */
3523void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3524{
3525 struct bdaddr_list *entry;
3526
3527 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3528 if (entry)
3529 goto done;
3530
3531 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3532 if (!entry) {
3533 BT_ERR("Out of memory");
3534 return;
3535 }
3536
3537 bacpy(&entry->bdaddr, addr);
3538 entry->bdaddr_type = addr_type;
3539
3540 list_add(&entry->list, &hdev->pend_le_conns);
3541
3542 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3543
3544done:
3545 hci_update_background_scan(hdev);
3546}
3547
3548/* This function requires the caller holds hdev->lock */
3549void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3550{
3551 struct bdaddr_list *entry;
3552
3553 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3554 if (!entry)
3555 goto done;
3556
3557 list_del(&entry->list);
3558 kfree(entry);
3559
3560 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3561
3562done:
3563 hci_update_background_scan(hdev);
3564}
3565
3566/* This function requires the caller holds hdev->lock */
3567void hci_pend_le_conns_clear(struct hci_dev *hdev)
3568{
3569 struct bdaddr_list *entry, *tmp;
3570
3571 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3572 list_del(&entry->list);
3573 kfree(entry);
3574 }
3575
3576 BT_DBG("All LE pending connections cleared");
3577}
3578
3579static void inquiry_complete(struct hci_dev *hdev, u8 status)
3580{
3581 if (status) {
3582 BT_ERR("Failed to start inquiry: status %d", status);
3583
3584 hci_dev_lock(hdev);
3585 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3586 hci_dev_unlock(hdev);
3587 return;
3588 }
3589}
3590
3591static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3592{
3593 /* General inquiry access code (GIAC) */
3594 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3595 struct hci_request req;
3596 struct hci_cp_inquiry cp;
3597 int err;
3598
3599 if (status) {
3600 BT_ERR("Failed to disable LE scanning: status %d", status);
3601 return;
3602 }
3603
3604 switch (hdev->discovery.type) {
3605 case DISCOV_TYPE_LE:
3606 hci_dev_lock(hdev);
3607 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3608 hci_dev_unlock(hdev);
3609 break;
3610
3611 case DISCOV_TYPE_INTERLEAVED:
3612 hci_req_init(&req, hdev);
3613
3614 memset(&cp, 0, sizeof(cp));
3615 memcpy(&cp.lap, lap, sizeof(cp.lap));
3616 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3617 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3618
3619 hci_dev_lock(hdev);
3620
3621 hci_inquiry_cache_flush(hdev);
3622
3623 err = hci_req_run(&req, inquiry_complete);
3624 if (err) {
3625 BT_ERR("Inquiry request failed: err %d", err);
3626 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3627 }
3628
3629 hci_dev_unlock(hdev);
3630 break;
3631 }
3632}
3633
3634static void le_scan_disable_work(struct work_struct *work)
3635{
3636 struct hci_dev *hdev = container_of(work, struct hci_dev,
3637 le_scan_disable.work);
3638 struct hci_request req;
3639 int err;
3640
3641 BT_DBG("%s", hdev->name);
3642
3643 hci_req_init(&req, hdev);
3644
3645 hci_req_add_le_scan_disable(&req);
3646
3647 err = hci_req_run(&req, le_scan_disable_work_complete);
3648 if (err)
3649 BT_ERR("Disable LE scanning request failed: err %d", err);
3650}
3651
3652static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3653{
3654 struct hci_dev *hdev = req->hdev;
3655
3656 /* If we're advertising or initiating an LE connection we can't
3657 * go ahead and change the random address at this time. This is
3658 * because the eventual initiator address used for the
3659 * subsequently created connection will be undefined (some
3660 * controllers use the new address and others the one we had
3661 * when the operation started).
3662 *
3663 * In this kind of scenario skip the update and let the random
3664 * address be updated at the next cycle.
3665 */
3666 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3667 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3668 BT_DBG("Deferring random address update");
3669 return;
3670 }
3671
3672 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3673}
3674
3675int hci_update_random_address(struct hci_request *req, bool require_privacy,
3676 u8 *own_addr_type)
3677{
3678 struct hci_dev *hdev = req->hdev;
3679 int err;
3680
3681 /* If privacy is enabled use a resolvable private address. If
3682 * current RPA has expired or there is something else than
3683 * the current RPA in use, then generate a new one.
3684 */
3685 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3686 int to;
3687
3688 *own_addr_type = ADDR_LE_DEV_RANDOM;
3689
3690 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3691 !bacmp(&hdev->random_addr, &hdev->rpa))
3692 return 0;
3693
3694 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3695 if (err < 0) {
3696 BT_ERR("%s failed to generate new RPA", hdev->name);
3697 return err;
3698 }
3699
3700 set_random_addr(req, &hdev->rpa);
3701
3702 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3703 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3704
3705 return 0;
3706 }
3707
3708 /* In case of required privacy without resolvable private address,
3709 * use an unresolvable private address. This is useful for active
3710 * scanning and non-connectable advertising.
3711 */
3712 if (require_privacy) {
3713 bdaddr_t urpa;
3714
3715 get_random_bytes(&urpa, 6);
3716 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3717
3718 *own_addr_type = ADDR_LE_DEV_RANDOM;
3719 set_random_addr(req, &urpa);
3720 return 0;
3721 }
3722
3723 /* If forcing static address is in use or there is no public
3724 * address use the static address as random address (but skip
3725 * the HCI command if the current random address is already the
3726 * static one.
3727 */
3728 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3729 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3730 *own_addr_type = ADDR_LE_DEV_RANDOM;
3731 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3732 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3733 &hdev->static_addr);
3734 return 0;
3735 }
3736
3737 /* Neither privacy nor static address is being used so use a
3738 * public address.
3739 */
3740 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3741
3742 return 0;
3743}
3744
3745/* Copy the Identity Address of the controller.
3746 *
3747 * If the controller has a public BD_ADDR, then by default use that one.
3748 * If this is a LE only controller without a public address, default to
3749 * the static random address.
3750 *
3751 * For debugging purposes it is possible to force controllers with a
3752 * public address to use the static random address instead.
3753 */
3754void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3755 u8 *bdaddr_type)
3756{
3757 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3758 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3759 bacpy(bdaddr, &hdev->static_addr);
3760 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3761 } else {
3762 bacpy(bdaddr, &hdev->bdaddr);
3763 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3764 }
3765}
3766
3767/* Alloc HCI device */
3768struct hci_dev *hci_alloc_dev(void)
3769{
3770 struct hci_dev *hdev;
3771
3772 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3773 if (!hdev)
3774 return NULL;
3775
3776 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3777 hdev->esco_type = (ESCO_HV1);
3778 hdev->link_mode = (HCI_LM_ACCEPT);
3779 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3780 hdev->io_capability = 0x03; /* No Input No Output */
3781 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3782 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3783
3784 hdev->sniff_max_interval = 800;
3785 hdev->sniff_min_interval = 80;
3786
3787 hdev->le_adv_channel_map = 0x07;
3788 hdev->le_scan_interval = 0x0060;
3789 hdev->le_scan_window = 0x0030;
3790 hdev->le_conn_min_interval = 0x0028;
3791 hdev->le_conn_max_interval = 0x0038;
3792
3793 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3794
3795 mutex_init(&hdev->lock);
3796 mutex_init(&hdev->req_lock);
3797
3798 INIT_LIST_HEAD(&hdev->mgmt_pending);
3799 INIT_LIST_HEAD(&hdev->blacklist);
3800 INIT_LIST_HEAD(&hdev->uuids);
3801 INIT_LIST_HEAD(&hdev->link_keys);
3802 INIT_LIST_HEAD(&hdev->long_term_keys);
3803 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3804 INIT_LIST_HEAD(&hdev->remote_oob_data);
3805 INIT_LIST_HEAD(&hdev->le_white_list);
3806 INIT_LIST_HEAD(&hdev->le_conn_params);
3807 INIT_LIST_HEAD(&hdev->pend_le_conns);
3808 INIT_LIST_HEAD(&hdev->conn_hash.list);
3809
3810 INIT_WORK(&hdev->rx_work, hci_rx_work);
3811 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3812 INIT_WORK(&hdev->tx_work, hci_tx_work);
3813 INIT_WORK(&hdev->power_on, hci_power_on);
3814
3815 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3816 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3817 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3818
3819 skb_queue_head_init(&hdev->rx_q);
3820 skb_queue_head_init(&hdev->cmd_q);
3821 skb_queue_head_init(&hdev->raw_q);
3822
3823 init_waitqueue_head(&hdev->req_wait_q);
3824
3825 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3826
3827 hci_init_sysfs(hdev);
3828 discovery_init(hdev);
3829
3830 return hdev;
3831}
3832EXPORT_SYMBOL(hci_alloc_dev);
3833
3834/* Free HCI device */
3835void hci_free_dev(struct hci_dev *hdev)
3836{
3837 /* will free via device release */
3838 put_device(&hdev->dev);
3839}
3840EXPORT_SYMBOL(hci_free_dev);
3841
3842/* Register HCI device */
3843int hci_register_dev(struct hci_dev *hdev)
3844{
3845 int id, error;
3846
3847 if (!hdev->open || !hdev->close)
3848 return -EINVAL;
3849
3850 /* Do not allow HCI_AMP devices to register at index 0,
3851 * so the index can be used as the AMP controller ID.
3852 */
3853 switch (hdev->dev_type) {
3854 case HCI_BREDR:
3855 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3856 break;
3857 case HCI_AMP:
3858 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3859 break;
3860 default:
3861 return -EINVAL;
3862 }
3863
3864 if (id < 0)
3865 return id;
3866
3867 sprintf(hdev->name, "hci%d", id);
3868 hdev->id = id;
3869
3870 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3871
3872 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3873 WQ_MEM_RECLAIM, 1, hdev->name);
3874 if (!hdev->workqueue) {
3875 error = -ENOMEM;
3876 goto err;
3877 }
3878
3879 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3880 WQ_MEM_RECLAIM, 1, hdev->name);
3881 if (!hdev->req_workqueue) {
3882 destroy_workqueue(hdev->workqueue);
3883 error = -ENOMEM;
3884 goto err;
3885 }
3886
3887 if (!IS_ERR_OR_NULL(bt_debugfs))
3888 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3889
3890 dev_set_name(&hdev->dev, "%s", hdev->name);
3891
3892 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3893 CRYPTO_ALG_ASYNC);
3894 if (IS_ERR(hdev->tfm_aes)) {
3895 BT_ERR("Unable to create crypto context");
3896 error = PTR_ERR(hdev->tfm_aes);
3897 hdev->tfm_aes = NULL;
3898 goto err_wqueue;
3899 }
3900
3901 error = device_add(&hdev->dev);
3902 if (error < 0)
3903 goto err_tfm;
3904
3905 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3906 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3907 hdev);
3908 if (hdev->rfkill) {
3909 if (rfkill_register(hdev->rfkill) < 0) {
3910 rfkill_destroy(hdev->rfkill);
3911 hdev->rfkill = NULL;
3912 }
3913 }
3914
3915 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3916 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3917
3918 set_bit(HCI_SETUP, &hdev->dev_flags);
3919 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3920
3921 if (hdev->dev_type == HCI_BREDR) {
3922 /* Assume BR/EDR support until proven otherwise (such as
3923 * through reading supported features during init.
3924 */
3925 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3926 }
3927
3928 write_lock(&hci_dev_list_lock);
3929 list_add(&hdev->list, &hci_dev_list);
3930 write_unlock(&hci_dev_list_lock);
3931
3932 hci_notify(hdev, HCI_DEV_REG);
3933 hci_dev_hold(hdev);
3934
3935 queue_work(hdev->req_workqueue, &hdev->power_on);
3936
3937 return id;
3938
3939err_tfm:
3940 crypto_free_blkcipher(hdev->tfm_aes);
3941err_wqueue:
3942 destroy_workqueue(hdev->workqueue);
3943 destroy_workqueue(hdev->req_workqueue);
3944err:
3945 ida_simple_remove(&hci_index_ida, hdev->id);
3946
3947 return error;
3948}
3949EXPORT_SYMBOL(hci_register_dev);
3950
3951/* Unregister HCI device */
3952void hci_unregister_dev(struct hci_dev *hdev)
3953{
3954 int i, id;
3955
3956 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3957
3958 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3959
3960 id = hdev->id;
3961
3962 write_lock(&hci_dev_list_lock);
3963 list_del(&hdev->list);
3964 write_unlock(&hci_dev_list_lock);
3965
3966 hci_dev_do_close(hdev);
3967
3968 for (i = 0; i < NUM_REASSEMBLY; i++)
3969 kfree_skb(hdev->reassembly[i]);
3970
3971 cancel_work_sync(&hdev->power_on);
3972
3973 if (!test_bit(HCI_INIT, &hdev->flags) &&
3974 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3975 hci_dev_lock(hdev);
3976 mgmt_index_removed(hdev);
3977 hci_dev_unlock(hdev);
3978 }
3979
3980 /* mgmt_index_removed should take care of emptying the
3981 * pending list */
3982 BUG_ON(!list_empty(&hdev->mgmt_pending));
3983
3984 hci_notify(hdev, HCI_DEV_UNREG);
3985
3986 if (hdev->rfkill) {
3987 rfkill_unregister(hdev->rfkill);
3988 rfkill_destroy(hdev->rfkill);
3989 }
3990
3991 if (hdev->tfm_aes)
3992 crypto_free_blkcipher(hdev->tfm_aes);
3993
3994 device_del(&hdev->dev);
3995
3996 debugfs_remove_recursive(hdev->debugfs);
3997
3998 destroy_workqueue(hdev->workqueue);
3999 destroy_workqueue(hdev->req_workqueue);
4000
4001 hci_dev_lock(hdev);
4002 hci_blacklist_clear(hdev);
4003 hci_uuids_clear(hdev);
4004 hci_link_keys_clear(hdev);
4005 hci_smp_ltks_clear(hdev);
4006 hci_smp_irks_clear(hdev);
4007 hci_remote_oob_data_clear(hdev);
4008 hci_white_list_clear(hdev);
4009 hci_conn_params_clear(hdev);
4010 hci_pend_le_conns_clear(hdev);
4011 hci_dev_unlock(hdev);
4012
4013 hci_dev_put(hdev);
4014
4015 ida_simple_remove(&hci_index_ida, id);
4016}
4017EXPORT_SYMBOL(hci_unregister_dev);
4018
4019/* Suspend HCI device */
4020int hci_suspend_dev(struct hci_dev *hdev)
4021{
4022 hci_notify(hdev, HCI_DEV_SUSPEND);
4023 return 0;
4024}
4025EXPORT_SYMBOL(hci_suspend_dev);
4026
4027/* Resume HCI device */
4028int hci_resume_dev(struct hci_dev *hdev)
4029{
4030 hci_notify(hdev, HCI_DEV_RESUME);
4031 return 0;
4032}
4033EXPORT_SYMBOL(hci_resume_dev);
4034
4035/* Receive frame from HCI drivers */
4036int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4037{
4038 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4039 && !test_bit(HCI_INIT, &hdev->flags))) {
4040 kfree_skb(skb);
4041 return -ENXIO;
4042 }
4043
4044 /* Incoming skb */
4045 bt_cb(skb)->incoming = 1;
4046
4047 /* Time stamp */
4048 __net_timestamp(skb);
4049
4050 skb_queue_tail(&hdev->rx_q, skb);
4051 queue_work(hdev->workqueue, &hdev->rx_work);
4052
4053 return 0;
4054}
4055EXPORT_SYMBOL(hci_recv_frame);
4056
4057static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4058 int count, __u8 index)
4059{
4060 int len = 0;
4061 int hlen = 0;
4062 int remain = count;
4063 struct sk_buff *skb;
4064 struct bt_skb_cb *scb;
4065
4066 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4067 index >= NUM_REASSEMBLY)
4068 return -EILSEQ;
4069
4070 skb = hdev->reassembly[index];
4071
4072 if (!skb) {
4073 switch (type) {
4074 case HCI_ACLDATA_PKT:
4075 len = HCI_MAX_FRAME_SIZE;
4076 hlen = HCI_ACL_HDR_SIZE;
4077 break;
4078 case HCI_EVENT_PKT:
4079 len = HCI_MAX_EVENT_SIZE;
4080 hlen = HCI_EVENT_HDR_SIZE;
4081 break;
4082 case HCI_SCODATA_PKT:
4083 len = HCI_MAX_SCO_SIZE;
4084 hlen = HCI_SCO_HDR_SIZE;
4085 break;
4086 }
4087
4088 skb = bt_skb_alloc(len, GFP_ATOMIC);
4089 if (!skb)
4090 return -ENOMEM;
4091
4092 scb = (void *) skb->cb;
4093 scb->expect = hlen;
4094 scb->pkt_type = type;
4095
4096 hdev->reassembly[index] = skb;
4097 }
4098
4099 while (count) {
4100 scb = (void *) skb->cb;
4101 len = min_t(uint, scb->expect, count);
4102
4103 memcpy(skb_put(skb, len), data, len);
4104
4105 count -= len;
4106 data += len;
4107 scb->expect -= len;
4108 remain = count;
4109
4110 switch (type) {
4111 case HCI_EVENT_PKT:
4112 if (skb->len == HCI_EVENT_HDR_SIZE) {
4113 struct hci_event_hdr *h = hci_event_hdr(skb);
4114 scb->expect = h->plen;
4115
4116 if (skb_tailroom(skb) < scb->expect) {
4117 kfree_skb(skb);
4118 hdev->reassembly[index] = NULL;
4119 return -ENOMEM;
4120 }
4121 }
4122 break;
4123
4124 case HCI_ACLDATA_PKT:
4125 if (skb->len == HCI_ACL_HDR_SIZE) {
4126 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4127 scb->expect = __le16_to_cpu(h->dlen);
4128
4129 if (skb_tailroom(skb) < scb->expect) {
4130 kfree_skb(skb);
4131 hdev->reassembly[index] = NULL;
4132 return -ENOMEM;
4133 }
4134 }
4135 break;
4136
4137 case HCI_SCODATA_PKT:
4138 if (skb->len == HCI_SCO_HDR_SIZE) {
4139 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4140 scb->expect = h->dlen;
4141
4142 if (skb_tailroom(skb) < scb->expect) {
4143 kfree_skb(skb);
4144 hdev->reassembly[index] = NULL;
4145 return -ENOMEM;
4146 }
4147 }
4148 break;
4149 }
4150
4151 if (scb->expect == 0) {
4152 /* Complete frame */
4153
4154 bt_cb(skb)->pkt_type = type;
4155 hci_recv_frame(hdev, skb);
4156
4157 hdev->reassembly[index] = NULL;
4158 return remain;
4159 }
4160 }
4161
4162 return remain;
4163}
4164
4165int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4166{
4167 int rem = 0;
4168
4169 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4170 return -EILSEQ;
4171
4172 while (count) {
4173 rem = hci_reassembly(hdev, type, data, count, type - 1);
4174 if (rem < 0)
4175 return rem;
4176
4177 data += (count - rem);
4178 count = rem;
4179 }
4180
4181 return rem;
4182}
4183EXPORT_SYMBOL(hci_recv_fragment);
4184
4185#define STREAM_REASSEMBLY 0
4186
4187int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4188{
4189 int type;
4190 int rem = 0;
4191
4192 while (count) {
4193 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4194
4195 if (!skb) {
4196 struct { char type; } *pkt;
4197
4198 /* Start of the frame */
4199 pkt = data;
4200 type = pkt->type;
4201
4202 data++;
4203 count--;
4204 } else
4205 type = bt_cb(skb)->pkt_type;
4206
4207 rem = hci_reassembly(hdev, type, data, count,
4208 STREAM_REASSEMBLY);
4209 if (rem < 0)
4210 return rem;
4211
4212 data += (count - rem);
4213 count = rem;
4214 }
4215
4216 return rem;
4217}
4218EXPORT_SYMBOL(hci_recv_stream_fragment);
4219
4220/* ---- Interface to upper protocols ---- */
4221
4222int hci_register_cb(struct hci_cb *cb)
4223{
4224 BT_DBG("%p name %s", cb, cb->name);
4225
4226 write_lock(&hci_cb_list_lock);
4227 list_add(&cb->list, &hci_cb_list);
4228 write_unlock(&hci_cb_list_lock);
4229
4230 return 0;
4231}
4232EXPORT_SYMBOL(hci_register_cb);
4233
4234int hci_unregister_cb(struct hci_cb *cb)
4235{
4236 BT_DBG("%p name %s", cb, cb->name);
4237
4238 write_lock(&hci_cb_list_lock);
4239 list_del(&cb->list);
4240 write_unlock(&hci_cb_list_lock);
4241
4242 return 0;
4243}
4244EXPORT_SYMBOL(hci_unregister_cb);
4245
4246static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4247{
4248 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4249
4250 /* Time stamp */
4251 __net_timestamp(skb);
4252
4253 /* Send copy to monitor */
4254 hci_send_to_monitor(hdev, skb);
4255
4256 if (atomic_read(&hdev->promisc)) {
4257 /* Send copy to the sockets */
4258 hci_send_to_sock(hdev, skb);
4259 }
4260
4261 /* Get rid of skb owner, prior to sending to the driver. */
4262 skb_orphan(skb);
4263
4264 if (hdev->send(hdev, skb) < 0)
4265 BT_ERR("%s sending frame failed", hdev->name);
4266}
4267
4268void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4269{
4270 skb_queue_head_init(&req->cmd_q);
4271 req->hdev = hdev;
4272 req->err = 0;
4273}
4274
4275int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4276{
4277 struct hci_dev *hdev = req->hdev;
4278 struct sk_buff *skb;
4279 unsigned long flags;
4280
4281 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4282
4283 /* If an error occured during request building, remove all HCI
4284 * commands queued on the HCI request queue.
4285 */
4286 if (req->err) {
4287 skb_queue_purge(&req->cmd_q);
4288 return req->err;
4289 }
4290
4291 /* Do not allow empty requests */
4292 if (skb_queue_empty(&req->cmd_q))
4293 return -ENODATA;
4294
4295 skb = skb_peek_tail(&req->cmd_q);
4296 bt_cb(skb)->req.complete = complete;
4297
4298 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4299 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4300 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4301
4302 queue_work(hdev->workqueue, &hdev->cmd_work);
4303
4304 return 0;
4305}
4306
4307static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4308 u32 plen, const void *param)
4309{
4310 int len = HCI_COMMAND_HDR_SIZE + plen;
4311 struct hci_command_hdr *hdr;
4312 struct sk_buff *skb;
4313
4314 skb = bt_skb_alloc(len, GFP_ATOMIC);
4315 if (!skb)
4316 return NULL;
4317
4318 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4319 hdr->opcode = cpu_to_le16(opcode);
4320 hdr->plen = plen;
4321
4322 if (plen)
4323 memcpy(skb_put(skb, plen), param, plen);
4324
4325 BT_DBG("skb len %d", skb->len);
4326
4327 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4328
4329 return skb;
4330}
4331
4332/* Send HCI command */
4333int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4334 const void *param)
4335{
4336 struct sk_buff *skb;
4337
4338 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4339
4340 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4341 if (!skb) {
4342 BT_ERR("%s no memory for command", hdev->name);
4343 return -ENOMEM;
4344 }
4345
4346 /* Stand-alone HCI commands must be flaged as
4347 * single-command requests.
4348 */
4349 bt_cb(skb)->req.start = true;
4350
4351 skb_queue_tail(&hdev->cmd_q, skb);
4352 queue_work(hdev->workqueue, &hdev->cmd_work);
4353
4354 return 0;
4355}
4356
4357/* Queue a command to an asynchronous HCI request */
4358void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4359 const void *param, u8 event)
4360{
4361 struct hci_dev *hdev = req->hdev;
4362 struct sk_buff *skb;
4363
4364 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4365
4366 /* If an error occured during request building, there is no point in
4367 * queueing the HCI command. We can simply return.
4368 */
4369 if (req->err)
4370 return;
4371
4372 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4373 if (!skb) {
4374 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4375 hdev->name, opcode);
4376 req->err = -ENOMEM;
4377 return;
4378 }
4379
4380 if (skb_queue_empty(&req->cmd_q))
4381 bt_cb(skb)->req.start = true;
4382
4383 bt_cb(skb)->req.event = event;
4384
4385 skb_queue_tail(&req->cmd_q, skb);
4386}
4387
4388void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4389 const void *param)
4390{
4391 hci_req_add_ev(req, opcode, plen, param, 0);
4392}
4393
4394/* Get data from the previously sent command */
4395void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4396{
4397 struct hci_command_hdr *hdr;
4398
4399 if (!hdev->sent_cmd)
4400 return NULL;
4401
4402 hdr = (void *) hdev->sent_cmd->data;
4403
4404 if (hdr->opcode != cpu_to_le16(opcode))
4405 return NULL;
4406
4407 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4408
4409 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4410}
4411
4412/* Send ACL data */
4413static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4414{
4415 struct hci_acl_hdr *hdr;
4416 int len = skb->len;
4417
4418 skb_push(skb, HCI_ACL_HDR_SIZE);
4419 skb_reset_transport_header(skb);
4420 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4421 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4422 hdr->dlen = cpu_to_le16(len);
4423}
4424
4425static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4426 struct sk_buff *skb, __u16 flags)
4427{
4428 struct hci_conn *conn = chan->conn;
4429 struct hci_dev *hdev = conn->hdev;
4430 struct sk_buff *list;
4431
4432 skb->len = skb_headlen(skb);
4433 skb->data_len = 0;
4434
4435 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4436
4437 switch (hdev->dev_type) {
4438 case HCI_BREDR:
4439 hci_add_acl_hdr(skb, conn->handle, flags);
4440 break;
4441 case HCI_AMP:
4442 hci_add_acl_hdr(skb, chan->handle, flags);
4443 break;
4444 default:
4445 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4446 return;
4447 }
4448
4449 list = skb_shinfo(skb)->frag_list;
4450 if (!list) {
4451 /* Non fragmented */
4452 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4453
4454 skb_queue_tail(queue, skb);
4455 } else {
4456 /* Fragmented */
4457 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4458
4459 skb_shinfo(skb)->frag_list = NULL;
4460
4461 /* Queue all fragments atomically */
4462 spin_lock(&queue->lock);
4463
4464 __skb_queue_tail(queue, skb);
4465
4466 flags &= ~ACL_START;
4467 flags |= ACL_CONT;
4468 do {
4469 skb = list; list = list->next;
4470
4471 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4472 hci_add_acl_hdr(skb, conn->handle, flags);
4473
4474 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4475
4476 __skb_queue_tail(queue, skb);
4477 } while (list);
4478
4479 spin_unlock(&queue->lock);
4480 }
4481}
4482
4483void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4484{
4485 struct hci_dev *hdev = chan->conn->hdev;
4486
4487 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4488
4489 hci_queue_acl(chan, &chan->data_q, skb, flags);
4490
4491 queue_work(hdev->workqueue, &hdev->tx_work);
4492}
4493
4494/* Send SCO data */
4495void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4496{
4497 struct hci_dev *hdev = conn->hdev;
4498 struct hci_sco_hdr hdr;
4499
4500 BT_DBG("%s len %d", hdev->name, skb->len);
4501
4502 hdr.handle = cpu_to_le16(conn->handle);
4503 hdr.dlen = skb->len;
4504
4505 skb_push(skb, HCI_SCO_HDR_SIZE);
4506 skb_reset_transport_header(skb);
4507 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4508
4509 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4510
4511 skb_queue_tail(&conn->data_q, skb);
4512 queue_work(hdev->workqueue, &hdev->tx_work);
4513}
4514
4515/* ---- HCI TX task (outgoing data) ---- */
4516
4517/* HCI Connection scheduler */
4518static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4519 int *quote)
4520{
4521 struct hci_conn_hash *h = &hdev->conn_hash;
4522 struct hci_conn *conn = NULL, *c;
4523 unsigned int num = 0, min = ~0;
4524
4525 /* We don't have to lock device here. Connections are always
4526 * added and removed with TX task disabled. */
4527
4528 rcu_read_lock();
4529
4530 list_for_each_entry_rcu(c, &h->list, list) {
4531 if (c->type != type || skb_queue_empty(&c->data_q))
4532 continue;
4533
4534 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4535 continue;
4536
4537 num++;
4538
4539 if (c->sent < min) {
4540 min = c->sent;
4541 conn = c;
4542 }
4543
4544 if (hci_conn_num(hdev, type) == num)
4545 break;
4546 }
4547
4548 rcu_read_unlock();
4549
4550 if (conn) {
4551 int cnt, q;
4552
4553 switch (conn->type) {
4554 case ACL_LINK:
4555 cnt = hdev->acl_cnt;
4556 break;
4557 case SCO_LINK:
4558 case ESCO_LINK:
4559 cnt = hdev->sco_cnt;
4560 break;
4561 case LE_LINK:
4562 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4563 break;
4564 default:
4565 cnt = 0;
4566 BT_ERR("Unknown link type");
4567 }
4568
4569 q = cnt / num;
4570 *quote = q ? q : 1;
4571 } else
4572 *quote = 0;
4573
4574 BT_DBG("conn %p quote %d", conn, *quote);
4575 return conn;
4576}
4577
4578static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4579{
4580 struct hci_conn_hash *h = &hdev->conn_hash;
4581 struct hci_conn *c;
4582
4583 BT_ERR("%s link tx timeout", hdev->name);
4584
4585 rcu_read_lock();
4586
4587 /* Kill stalled connections */
4588 list_for_each_entry_rcu(c, &h->list, list) {
4589 if (c->type == type && c->sent) {
4590 BT_ERR("%s killing stalled connection %pMR",
4591 hdev->name, &c->dst);
4592 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4593 }
4594 }
4595
4596 rcu_read_unlock();
4597}
4598
4599static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4600 int *quote)
4601{
4602 struct hci_conn_hash *h = &hdev->conn_hash;
4603 struct hci_chan *chan = NULL;
4604 unsigned int num = 0, min = ~0, cur_prio = 0;
4605 struct hci_conn *conn;
4606 int cnt, q, conn_num = 0;
4607
4608 BT_DBG("%s", hdev->name);
4609
4610 rcu_read_lock();
4611
4612 list_for_each_entry_rcu(conn, &h->list, list) {
4613 struct hci_chan *tmp;
4614
4615 if (conn->type != type)
4616 continue;
4617
4618 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4619 continue;
4620
4621 conn_num++;
4622
4623 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4624 struct sk_buff *skb;
4625
4626 if (skb_queue_empty(&tmp->data_q))
4627 continue;
4628
4629 skb = skb_peek(&tmp->data_q);
4630 if (skb->priority < cur_prio)
4631 continue;
4632
4633 if (skb->priority > cur_prio) {
4634 num = 0;
4635 min = ~0;
4636 cur_prio = skb->priority;
4637 }
4638
4639 num++;
4640
4641 if (conn->sent < min) {
4642 min = conn->sent;
4643 chan = tmp;
4644 }
4645 }
4646
4647 if (hci_conn_num(hdev, type) == conn_num)
4648 break;
4649 }
4650
4651 rcu_read_unlock();
4652
4653 if (!chan)
4654 return NULL;
4655
4656 switch (chan->conn->type) {
4657 case ACL_LINK:
4658 cnt = hdev->acl_cnt;
4659 break;
4660 case AMP_LINK:
4661 cnt = hdev->block_cnt;
4662 break;
4663 case SCO_LINK:
4664 case ESCO_LINK:
4665 cnt = hdev->sco_cnt;
4666 break;
4667 case LE_LINK:
4668 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4669 break;
4670 default:
4671 cnt = 0;
4672 BT_ERR("Unknown link type");
4673 }
4674
4675 q = cnt / num;
4676 *quote = q ? q : 1;
4677 BT_DBG("chan %p quote %d", chan, *quote);
4678 return chan;
4679}
4680
4681static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4682{
4683 struct hci_conn_hash *h = &hdev->conn_hash;
4684 struct hci_conn *conn;
4685 int num = 0;
4686
4687 BT_DBG("%s", hdev->name);
4688
4689 rcu_read_lock();
4690
4691 list_for_each_entry_rcu(conn, &h->list, list) {
4692 struct hci_chan *chan;
4693
4694 if (conn->type != type)
4695 continue;
4696
4697 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4698 continue;
4699
4700 num++;
4701
4702 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4703 struct sk_buff *skb;
4704
4705 if (chan->sent) {
4706 chan->sent = 0;
4707 continue;
4708 }
4709
4710 if (skb_queue_empty(&chan->data_q))
4711 continue;
4712
4713 skb = skb_peek(&chan->data_q);
4714 if (skb->priority >= HCI_PRIO_MAX - 1)
4715 continue;
4716
4717 skb->priority = HCI_PRIO_MAX - 1;
4718
4719 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4720 skb->priority);
4721 }
4722
4723 if (hci_conn_num(hdev, type) == num)
4724 break;
4725 }
4726
4727 rcu_read_unlock();
4728
4729}
4730
4731static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4732{
4733 /* Calculate count of blocks used by this packet */
4734 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4735}
4736
4737static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4738{
4739 if (!test_bit(HCI_RAW, &hdev->flags)) {
4740 /* ACL tx timeout must be longer than maximum
4741 * link supervision timeout (40.9 seconds) */
4742 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4743 HCI_ACL_TX_TIMEOUT))
4744 hci_link_tx_to(hdev, ACL_LINK);
4745 }
4746}
4747
4748static void hci_sched_acl_pkt(struct hci_dev *hdev)
4749{
4750 unsigned int cnt = hdev->acl_cnt;
4751 struct hci_chan *chan;
4752 struct sk_buff *skb;
4753 int quote;
4754
4755 __check_timeout(hdev, cnt);
4756
4757 while (hdev->acl_cnt &&
4758 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4759 u32 priority = (skb_peek(&chan->data_q))->priority;
4760 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4761 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4762 skb->len, skb->priority);
4763
4764 /* Stop if priority has changed */
4765 if (skb->priority < priority)
4766 break;
4767
4768 skb = skb_dequeue(&chan->data_q);
4769
4770 hci_conn_enter_active_mode(chan->conn,
4771 bt_cb(skb)->force_active);
4772
4773 hci_send_frame(hdev, skb);
4774 hdev->acl_last_tx = jiffies;
4775
4776 hdev->acl_cnt--;
4777 chan->sent++;
4778 chan->conn->sent++;
4779 }
4780 }
4781
4782 if (cnt != hdev->acl_cnt)
4783 hci_prio_recalculate(hdev, ACL_LINK);
4784}
4785
4786static void hci_sched_acl_blk(struct hci_dev *hdev)
4787{
4788 unsigned int cnt = hdev->block_cnt;
4789 struct hci_chan *chan;
4790 struct sk_buff *skb;
4791 int quote;
4792 u8 type;
4793
4794 __check_timeout(hdev, cnt);
4795
4796 BT_DBG("%s", hdev->name);
4797
4798 if (hdev->dev_type == HCI_AMP)
4799 type = AMP_LINK;
4800 else
4801 type = ACL_LINK;
4802
4803 while (hdev->block_cnt > 0 &&
4804 (chan = hci_chan_sent(hdev, type, "e))) {
4805 u32 priority = (skb_peek(&chan->data_q))->priority;
4806 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4807 int blocks;
4808
4809 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4810 skb->len, skb->priority);
4811
4812 /* Stop if priority has changed */
4813 if (skb->priority < priority)
4814 break;
4815
4816 skb = skb_dequeue(&chan->data_q);
4817
4818 blocks = __get_blocks(hdev, skb);
4819 if (blocks > hdev->block_cnt)
4820 return;
4821
4822 hci_conn_enter_active_mode(chan->conn,
4823 bt_cb(skb)->force_active);
4824
4825 hci_send_frame(hdev, skb);
4826 hdev->acl_last_tx = jiffies;
4827
4828 hdev->block_cnt -= blocks;
4829 quote -= blocks;
4830
4831 chan->sent += blocks;
4832 chan->conn->sent += blocks;
4833 }
4834 }
4835
4836 if (cnt != hdev->block_cnt)
4837 hci_prio_recalculate(hdev, type);
4838}
4839
4840static void hci_sched_acl(struct hci_dev *hdev)
4841{
4842 BT_DBG("%s", hdev->name);
4843
4844 /* No ACL link over BR/EDR controller */
4845 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4846 return;
4847
4848 /* No AMP link over AMP controller */
4849 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4850 return;
4851
4852 switch (hdev->flow_ctl_mode) {
4853 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4854 hci_sched_acl_pkt(hdev);
4855 break;
4856
4857 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4858 hci_sched_acl_blk(hdev);
4859 break;
4860 }
4861}
4862
4863/* Schedule SCO */
4864static void hci_sched_sco(struct hci_dev *hdev)
4865{
4866 struct hci_conn *conn;
4867 struct sk_buff *skb;
4868 int quote;
4869
4870 BT_DBG("%s", hdev->name);
4871
4872 if (!hci_conn_num(hdev, SCO_LINK))
4873 return;
4874
4875 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4876 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4877 BT_DBG("skb %p len %d", skb, skb->len);
4878 hci_send_frame(hdev, skb);
4879
4880 conn->sent++;
4881 if (conn->sent == ~0)
4882 conn->sent = 0;
4883 }
4884 }
4885}
4886
4887static void hci_sched_esco(struct hci_dev *hdev)
4888{
4889 struct hci_conn *conn;
4890 struct sk_buff *skb;
4891 int quote;
4892
4893 BT_DBG("%s", hdev->name);
4894
4895 if (!hci_conn_num(hdev, ESCO_LINK))
4896 return;
4897
4898 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4899 "e))) {
4900 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4901 BT_DBG("skb %p len %d", skb, skb->len);
4902 hci_send_frame(hdev, skb);
4903
4904 conn->sent++;
4905 if (conn->sent == ~0)
4906 conn->sent = 0;
4907 }
4908 }
4909}
4910
4911static void hci_sched_le(struct hci_dev *hdev)
4912{
4913 struct hci_chan *chan;
4914 struct sk_buff *skb;
4915 int quote, cnt, tmp;
4916
4917 BT_DBG("%s", hdev->name);
4918
4919 if (!hci_conn_num(hdev, LE_LINK))
4920 return;
4921
4922 if (!test_bit(HCI_RAW, &hdev->flags)) {
4923 /* LE tx timeout must be longer than maximum
4924 * link supervision timeout (40.9 seconds) */
4925 if (!hdev->le_cnt && hdev->le_pkts &&
4926 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4927 hci_link_tx_to(hdev, LE_LINK);
4928 }
4929
4930 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4931 tmp = cnt;
4932 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4933 u32 priority = (skb_peek(&chan->data_q))->priority;
4934 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4935 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4936 skb->len, skb->priority);
4937
4938 /* Stop if priority has changed */
4939 if (skb->priority < priority)
4940 break;
4941
4942 skb = skb_dequeue(&chan->data_q);
4943
4944 hci_send_frame(hdev, skb);
4945 hdev->le_last_tx = jiffies;
4946
4947 cnt--;
4948 chan->sent++;
4949 chan->conn->sent++;
4950 }
4951 }
4952
4953 if (hdev->le_pkts)
4954 hdev->le_cnt = cnt;
4955 else
4956 hdev->acl_cnt = cnt;
4957
4958 if (cnt != tmp)
4959 hci_prio_recalculate(hdev, LE_LINK);
4960}
4961
4962static void hci_tx_work(struct work_struct *work)
4963{
4964 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4965 struct sk_buff *skb;
4966
4967 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4968 hdev->sco_cnt, hdev->le_cnt);
4969
4970 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4971 /* Schedule queues and send stuff to HCI driver */
4972 hci_sched_acl(hdev);
4973 hci_sched_sco(hdev);
4974 hci_sched_esco(hdev);
4975 hci_sched_le(hdev);
4976 }
4977
4978 /* Send next queued raw (unknown type) packet */
4979 while ((skb = skb_dequeue(&hdev->raw_q)))
4980 hci_send_frame(hdev, skb);
4981}
4982
4983/* ----- HCI RX task (incoming data processing) ----- */
4984
4985/* ACL data packet */
4986static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4987{
4988 struct hci_acl_hdr *hdr = (void *) skb->data;
4989 struct hci_conn *conn;
4990 __u16 handle, flags;
4991
4992 skb_pull(skb, HCI_ACL_HDR_SIZE);
4993
4994 handle = __le16_to_cpu(hdr->handle);
4995 flags = hci_flags(handle);
4996 handle = hci_handle(handle);
4997
4998 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4999 handle, flags);
5000
5001 hdev->stat.acl_rx++;
5002
5003 hci_dev_lock(hdev);
5004 conn = hci_conn_hash_lookup_handle(hdev, handle);
5005 hci_dev_unlock(hdev);
5006
5007 if (conn) {
5008 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5009
5010 /* Send to upper protocol */
5011 l2cap_recv_acldata(conn, skb, flags);
5012 return;
5013 } else {
5014 BT_ERR("%s ACL packet for unknown connection handle %d",
5015 hdev->name, handle);
5016 }
5017
5018 kfree_skb(skb);
5019}
5020
5021/* SCO data packet */
5022static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5023{
5024 struct hci_sco_hdr *hdr = (void *) skb->data;
5025 struct hci_conn *conn;
5026 __u16 handle;
5027
5028 skb_pull(skb, HCI_SCO_HDR_SIZE);
5029
5030 handle = __le16_to_cpu(hdr->handle);
5031
5032 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5033
5034 hdev->stat.sco_rx++;
5035
5036 hci_dev_lock(hdev);
5037 conn = hci_conn_hash_lookup_handle(hdev, handle);
5038 hci_dev_unlock(hdev);
5039
5040 if (conn) {
5041 /* Send to upper protocol */
5042 sco_recv_scodata(conn, skb);
5043 return;
5044 } else {
5045 BT_ERR("%s SCO packet for unknown connection handle %d",
5046 hdev->name, handle);
5047 }
5048
5049 kfree_skb(skb);
5050}
5051
5052static bool hci_req_is_complete(struct hci_dev *hdev)
5053{
5054 struct sk_buff *skb;
5055
5056 skb = skb_peek(&hdev->cmd_q);
5057 if (!skb)
5058 return true;
5059
5060 return bt_cb(skb)->req.start;
5061}
5062
5063static void hci_resend_last(struct hci_dev *hdev)
5064{
5065 struct hci_command_hdr *sent;
5066 struct sk_buff *skb;
5067 u16 opcode;
5068
5069 if (!hdev->sent_cmd)
5070 return;
5071
5072 sent = (void *) hdev->sent_cmd->data;
5073 opcode = __le16_to_cpu(sent->opcode);
5074 if (opcode == HCI_OP_RESET)
5075 return;
5076
5077 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5078 if (!skb)
5079 return;
5080
5081 skb_queue_head(&hdev->cmd_q, skb);
5082 queue_work(hdev->workqueue, &hdev->cmd_work);
5083}
5084
5085void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5086{
5087 hci_req_complete_t req_complete = NULL;
5088 struct sk_buff *skb;
5089 unsigned long flags;
5090
5091 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5092
5093 /* If the completed command doesn't match the last one that was
5094 * sent we need to do special handling of it.
5095 */
5096 if (!hci_sent_cmd_data(hdev, opcode)) {
5097 /* Some CSR based controllers generate a spontaneous
5098 * reset complete event during init and any pending
5099 * command will never be completed. In such a case we
5100 * need to resend whatever was the last sent
5101 * command.
5102 */
5103 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5104 hci_resend_last(hdev);
5105
5106 return;
5107 }
5108
5109 /* If the command succeeded and there's still more commands in
5110 * this request the request is not yet complete.
5111 */
5112 if (!status && !hci_req_is_complete(hdev))
5113 return;
5114
5115 /* If this was the last command in a request the complete
5116 * callback would be found in hdev->sent_cmd instead of the
5117 * command queue (hdev->cmd_q).
5118 */
5119 if (hdev->sent_cmd) {
5120 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5121
5122 if (req_complete) {
5123 /* We must set the complete callback to NULL to
5124 * avoid calling the callback more than once if
5125 * this function gets called again.
5126 */
5127 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5128
5129 goto call_complete;
5130 }
5131 }
5132
5133 /* Remove all pending commands belonging to this request */
5134 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5135 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5136 if (bt_cb(skb)->req.start) {
5137 __skb_queue_head(&hdev->cmd_q, skb);
5138 break;
5139 }
5140
5141 req_complete = bt_cb(skb)->req.complete;
5142 kfree_skb(skb);
5143 }
5144 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5145
5146call_complete:
5147 if (req_complete)
5148 req_complete(hdev, status);
5149}
5150
5151static void hci_rx_work(struct work_struct *work)
5152{
5153 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5154 struct sk_buff *skb;
5155
5156 BT_DBG("%s", hdev->name);
5157
5158 while ((skb = skb_dequeue(&hdev->rx_q))) {
5159 /* Send copy to monitor */
5160 hci_send_to_monitor(hdev, skb);
5161
5162 if (atomic_read(&hdev->promisc)) {
5163 /* Send copy to the sockets */
5164 hci_send_to_sock(hdev, skb);
5165 }
5166
5167 if (test_bit(HCI_RAW, &hdev->flags) ||
5168 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5169 kfree_skb(skb);
5170 continue;
5171 }
5172
5173 if (test_bit(HCI_INIT, &hdev->flags)) {
5174 /* Don't process data packets in this states. */
5175 switch (bt_cb(skb)->pkt_type) {
5176 case HCI_ACLDATA_PKT:
5177 case HCI_SCODATA_PKT:
5178 kfree_skb(skb);
5179 continue;
5180 }
5181 }
5182
5183 /* Process frame */
5184 switch (bt_cb(skb)->pkt_type) {
5185 case HCI_EVENT_PKT:
5186 BT_DBG("%s Event packet", hdev->name);
5187 hci_event_packet(hdev, skb);
5188 break;
5189
5190 case HCI_ACLDATA_PKT:
5191 BT_DBG("%s ACL data packet", hdev->name);
5192 hci_acldata_packet(hdev, skb);
5193 break;
5194
5195 case HCI_SCODATA_PKT:
5196 BT_DBG("%s SCO data packet", hdev->name);
5197 hci_scodata_packet(hdev, skb);
5198 break;
5199
5200 default:
5201 kfree_skb(skb);
5202 break;
5203 }
5204 }
5205}
5206
5207static void hci_cmd_work(struct work_struct *work)
5208{
5209 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5210 struct sk_buff *skb;
5211
5212 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5213 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5214
5215 /* Send queued commands */
5216 if (atomic_read(&hdev->cmd_cnt)) {
5217 skb = skb_dequeue(&hdev->cmd_q);
5218 if (!skb)
5219 return;
5220
5221 kfree_skb(hdev->sent_cmd);
5222
5223 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5224 if (hdev->sent_cmd) {
5225 atomic_dec(&hdev->cmd_cnt);
5226 hci_send_frame(hdev, skb);
5227 if (test_bit(HCI_RESET, &hdev->flags))
5228 del_timer(&hdev->cmd_timer);
5229 else
5230 mod_timer(&hdev->cmd_timer,
5231 jiffies + HCI_CMD_TIMEOUT);
5232 } else {
5233 skb_queue_head(&hdev->cmd_q, skb);
5234 queue_work(hdev->workqueue, &hdev->cmd_work);
5235 }
5236 }
5237}
5238
5239void hci_req_add_le_scan_disable(struct hci_request *req)
5240{
5241 struct hci_cp_le_set_scan_enable cp;
5242
5243 memset(&cp, 0, sizeof(cp));
5244 cp.enable = LE_SCAN_DISABLE;
5245 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5246}
5247
5248void hci_req_add_le_passive_scan(struct hci_request *req)
5249{
5250 struct hci_cp_le_set_scan_param param_cp;
5251 struct hci_cp_le_set_scan_enable enable_cp;
5252 struct hci_dev *hdev = req->hdev;
5253 u8 own_addr_type;
5254
5255 /* Set require_privacy to true to avoid identification from
5256 * unknown peer devices. Since this is passive scanning, no
5257 * SCAN_REQ using the local identity should be sent. Mandating
5258 * privacy is just an extra precaution.
5259 */
5260 if (hci_update_random_address(req, true, &own_addr_type))
5261 return;
5262
5263 memset(¶m_cp, 0, sizeof(param_cp));
5264 param_cp.type = LE_SCAN_PASSIVE;
5265 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5266 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5267 param_cp.own_address_type = own_addr_type;
5268 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5269 ¶m_cp);
5270
5271 memset(&enable_cp, 0, sizeof(enable_cp));
5272 enable_cp.enable = LE_SCAN_ENABLE;
5273 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5274 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5275 &enable_cp);
5276}
5277
5278static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5279{
5280 if (status)
5281 BT_DBG("HCI request failed to update background scanning: "
5282 "status 0x%2.2x", status);
5283}
5284
5285/* This function controls the background scanning based on hdev->pend_le_conns
5286 * list. If there are pending LE connection we start the background scanning,
5287 * otherwise we stop it.
5288 *
5289 * This function requires the caller holds hdev->lock.
5290 */
5291void hci_update_background_scan(struct hci_dev *hdev)
5292{
5293 struct hci_request req;
5294 struct hci_conn *conn;
5295 int err;
5296
5297 hci_req_init(&req, hdev);
5298
5299 if (list_empty(&hdev->pend_le_conns)) {
5300 /* If there is no pending LE connections, we should stop
5301 * the background scanning.
5302 */
5303
5304 /* If controller is not scanning we are done. */
5305 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5306 return;
5307
5308 hci_req_add_le_scan_disable(&req);
5309
5310 BT_DBG("%s stopping background scanning", hdev->name);
5311 } else {
5312 /* If there is at least one pending LE connection, we should
5313 * keep the background scan running.
5314 */
5315
5316 /* If controller is connecting, we should not start scanning
5317 * since some controllers are not able to scan and connect at
5318 * the same time.
5319 */
5320 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5321 if (conn)
5322 return;
5323
5324 /* If controller is currently scanning, we stop it to ensure we
5325 * don't miss any advertising (due to duplicates filter).
5326 */
5327 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5328 hci_req_add_le_scan_disable(&req);
5329
5330 hci_req_add_le_passive_scan(&req);
5331
5332 BT_DBG("%s starting background scanning", hdev->name);
5333 }
5334
5335 err = hci_req_run(&req, update_background_scan_complete);
5336 if (err)
5337 BT_ERR("Failed to run HCI request: err %d", err);
5338}