Loading...
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/idr.h>
30#include <linux/rfkill.h>
31#include <linux/debugfs.h>
32#include <linux/crypto.h>
33#include <linux/property.h>
34#include <asm/unaligned.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39#include <net/bluetooth/mgmt.h>
40
41#include "hci_request.h"
42#include "hci_debugfs.h"
43#include "smp.h"
44#include "leds.h"
45
46static void hci_rx_work(struct work_struct *work);
47static void hci_cmd_work(struct work_struct *work);
48static void hci_tx_work(struct work_struct *work);
49
50/* HCI device list */
51LIST_HEAD(hci_dev_list);
52DEFINE_RWLOCK(hci_dev_list_lock);
53
54/* HCI callback list */
55LIST_HEAD(hci_cb_list);
56DEFINE_MUTEX(hci_cb_list_lock);
57
58/* HCI ID Numbering */
59static DEFINE_IDA(hci_index_ida);
60
61/* ---- HCI debugfs entries ---- */
62
63static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
64 size_t count, loff_t *ppos)
65{
66 struct hci_dev *hdev = file->private_data;
67 char buf[3];
68
69 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
70 buf[1] = '\n';
71 buf[2] = '\0';
72 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73}
74
75static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 struct sk_buff *skb;
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 err = kstrtobool_from_user(user_buf, count, &enable);
87 if (err)
88 return err;
89
90 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
91 return -EALREADY;
92
93 hci_req_sync_lock(hdev);
94 if (enable)
95 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
96 HCI_CMD_TIMEOUT);
97 else
98 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 hci_req_sync_unlock(hdev);
101
102 if (IS_ERR(skb))
103 return PTR_ERR(skb);
104
105 kfree_skb(skb);
106
107 hci_dev_change_flag(hdev, HCI_DUT_MODE);
108
109 return count;
110}
111
112static const struct file_operations dut_mode_fops = {
113 .open = simple_open,
114 .read = dut_mode_read,
115 .write = dut_mode_write,
116 .llseek = default_llseek,
117};
118
119static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
120 size_t count, loff_t *ppos)
121{
122 struct hci_dev *hdev = file->private_data;
123 char buf[3];
124
125 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
126 buf[1] = '\n';
127 buf[2] = '\0';
128 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
129}
130
131static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
132 size_t count, loff_t *ppos)
133{
134 struct hci_dev *hdev = file->private_data;
135 bool enable;
136 int err;
137
138 err = kstrtobool_from_user(user_buf, count, &enable);
139 if (err)
140 return err;
141
142 /* When the diagnostic flags are not persistent and the transport
143 * is not active or in user channel operation, then there is no need
144 * for the vendor callback. Instead just store the desired value and
145 * the setting will be programmed when the controller gets powered on.
146 */
147 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
148 (!test_bit(HCI_RUNNING, &hdev->flags) ||
149 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
150 goto done;
151
152 hci_req_sync_lock(hdev);
153 err = hdev->set_diag(hdev, enable);
154 hci_req_sync_unlock(hdev);
155
156 if (err < 0)
157 return err;
158
159done:
160 if (enable)
161 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
162 else
163 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
164
165 return count;
166}
167
168static const struct file_operations vendor_diag_fops = {
169 .open = simple_open,
170 .read = vendor_diag_read,
171 .write = vendor_diag_write,
172 .llseek = default_llseek,
173};
174
175static void hci_debugfs_create_basic(struct hci_dev *hdev)
176{
177 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
178 &dut_mode_fops);
179
180 if (hdev->set_diag)
181 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
182 &vendor_diag_fops);
183}
184
185static int hci_reset_req(struct hci_request *req, unsigned long opt)
186{
187 BT_DBG("%s %ld", req->hdev->name, opt);
188
189 /* Reset device */
190 set_bit(HCI_RESET, &req->hdev->flags);
191 hci_req_add(req, HCI_OP_RESET, 0, NULL);
192 return 0;
193}
194
195static void bredr_init(struct hci_request *req)
196{
197 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198
199 /* Read Local Supported Features */
200 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
201
202 /* Read Local Version */
203 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
204
205 /* Read BD Address */
206 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
207}
208
209static void amp_init1(struct hci_request *req)
210{
211 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
212
213 /* Read Local Version */
214 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215
216 /* Read Local Supported Commands */
217 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
218
219 /* Read Local AMP Info */
220 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
221
222 /* Read Data Blk size */
223 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
224
225 /* Read Flow Control Mode */
226 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
227
228 /* Read Location Data */
229 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
230}
231
232static int amp_init2(struct hci_request *req)
233{
234 /* Read Local Supported Features. Not all AMP controllers
235 * support this so it's placed conditionally in the second
236 * stage init.
237 */
238 if (req->hdev->commands[14] & 0x20)
239 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
240
241 return 0;
242}
243
244static int hci_init1_req(struct hci_request *req, unsigned long opt)
245{
246 struct hci_dev *hdev = req->hdev;
247
248 BT_DBG("%s %ld", hdev->name, opt);
249
250 /* Reset */
251 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
252 hci_reset_req(req, 0);
253
254 switch (hdev->dev_type) {
255 case HCI_PRIMARY:
256 bredr_init(req);
257 break;
258 case HCI_AMP:
259 amp_init1(req);
260 break;
261 default:
262 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
263 break;
264 }
265
266 return 0;
267}
268
269static void bredr_setup(struct hci_request *req)
270{
271 __le16 param;
272 __u8 flt_type;
273
274 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
275 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
276
277 /* Read Class of Device */
278 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
279
280 /* Read Local Name */
281 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
282
283 /* Read Voice Setting */
284 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
285
286 /* Read Number of Supported IAC */
287 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
288
289 /* Read Current IAC LAP */
290 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
291
292 /* Clear Event Filters */
293 flt_type = HCI_FLT_CLEAR_ALL;
294 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
295
296 /* Connection accept timeout ~20 secs */
297 param = cpu_to_le16(0x7d00);
298 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
299}
300
301static void le_setup(struct hci_request *req)
302{
303 struct hci_dev *hdev = req->hdev;
304
305 /* Read LE Buffer Size */
306 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307
308 /* Read LE Local Supported Features */
309 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
310
311 /* Read LE Supported States */
312 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
313
314 /* LE-only controllers have LE implicitly enabled */
315 if (!lmp_bredr_capable(hdev))
316 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
317}
318
319static void hci_setup_event_mask(struct hci_request *req)
320{
321 struct hci_dev *hdev = req->hdev;
322
323 /* The second byte is 0xff instead of 0x9f (two reserved bits
324 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
325 * command otherwise.
326 */
327 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
328
329 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
330 * any event mask for pre 1.2 devices.
331 */
332 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
333 return;
334
335 if (lmp_bredr_capable(hdev)) {
336 events[4] |= 0x01; /* Flow Specification Complete */
337 } else {
338 /* Use a different default for LE-only devices */
339 memset(events, 0, sizeof(events));
340 events[1] |= 0x20; /* Command Complete */
341 events[1] |= 0x40; /* Command Status */
342 events[1] |= 0x80; /* Hardware Error */
343
344 /* If the controller supports the Disconnect command, enable
345 * the corresponding event. In addition enable packet flow
346 * control related events.
347 */
348 if (hdev->commands[0] & 0x20) {
349 events[0] |= 0x10; /* Disconnection Complete */
350 events[2] |= 0x04; /* Number of Completed Packets */
351 events[3] |= 0x02; /* Data Buffer Overflow */
352 }
353
354 /* If the controller supports the Read Remote Version
355 * Information command, enable the corresponding event.
356 */
357 if (hdev->commands[2] & 0x80)
358 events[1] |= 0x08; /* Read Remote Version Information
359 * Complete
360 */
361
362 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
363 events[0] |= 0x80; /* Encryption Change */
364 events[5] |= 0x80; /* Encryption Key Refresh Complete */
365 }
366 }
367
368 if (lmp_inq_rssi_capable(hdev) ||
369 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
370 events[4] |= 0x02; /* Inquiry Result with RSSI */
371
372 if (lmp_ext_feat_capable(hdev))
373 events[4] |= 0x04; /* Read Remote Extended Features Complete */
374
375 if (lmp_esco_capable(hdev)) {
376 events[5] |= 0x08; /* Synchronous Connection Complete */
377 events[5] |= 0x10; /* Synchronous Connection Changed */
378 }
379
380 if (lmp_sniffsubr_capable(hdev))
381 events[5] |= 0x20; /* Sniff Subrating */
382
383 if (lmp_pause_enc_capable(hdev))
384 events[5] |= 0x80; /* Encryption Key Refresh Complete */
385
386 if (lmp_ext_inq_capable(hdev))
387 events[5] |= 0x40; /* Extended Inquiry Result */
388
389 if (lmp_no_flush_capable(hdev))
390 events[7] |= 0x01; /* Enhanced Flush Complete */
391
392 if (lmp_lsto_capable(hdev))
393 events[6] |= 0x80; /* Link Supervision Timeout Changed */
394
395 if (lmp_ssp_capable(hdev)) {
396 events[6] |= 0x01; /* IO Capability Request */
397 events[6] |= 0x02; /* IO Capability Response */
398 events[6] |= 0x04; /* User Confirmation Request */
399 events[6] |= 0x08; /* User Passkey Request */
400 events[6] |= 0x10; /* Remote OOB Data Request */
401 events[6] |= 0x20; /* Simple Pairing Complete */
402 events[7] |= 0x04; /* User Passkey Notification */
403 events[7] |= 0x08; /* Keypress Notification */
404 events[7] |= 0x10; /* Remote Host Supported
405 * Features Notification
406 */
407 }
408
409 if (lmp_le_capable(hdev))
410 events[7] |= 0x20; /* LE Meta-Event */
411
412 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
413}
414
415static int hci_init2_req(struct hci_request *req, unsigned long opt)
416{
417 struct hci_dev *hdev = req->hdev;
418
419 if (hdev->dev_type == HCI_AMP)
420 return amp_init2(req);
421
422 if (lmp_bredr_capable(hdev))
423 bredr_setup(req);
424 else
425 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
426
427 if (lmp_le_capable(hdev))
428 le_setup(req);
429
430 /* All Bluetooth 1.2 and later controllers should support the
431 * HCI command for reading the local supported commands.
432 *
433 * Unfortunately some controllers indicate Bluetooth 1.2 support,
434 * but do not have support for this command. If that is the case,
435 * the driver can quirk the behavior and skip reading the local
436 * supported commands.
437 */
438 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
439 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
440 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
441
442 if (lmp_ssp_capable(hdev)) {
443 /* When SSP is available, then the host features page
444 * should also be available as well. However some
445 * controllers list the max_page as 0 as long as SSP
446 * has not been enabled. To achieve proper debugging
447 * output, force the minimum max_page to 1 at least.
448 */
449 hdev->max_page = 0x01;
450
451 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
452 u8 mode = 0x01;
453
454 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
455 sizeof(mode), &mode);
456 } else {
457 struct hci_cp_write_eir cp;
458
459 memset(hdev->eir, 0, sizeof(hdev->eir));
460 memset(&cp, 0, sizeof(cp));
461
462 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
463 }
464 }
465
466 if (lmp_inq_rssi_capable(hdev) ||
467 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
468 u8 mode;
469
470 /* If Extended Inquiry Result events are supported, then
471 * they are clearly preferred over Inquiry Result with RSSI
472 * events.
473 */
474 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
475
476 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
477 }
478
479 if (lmp_inq_tx_pwr_capable(hdev))
480 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
481
482 if (lmp_ext_feat_capable(hdev)) {
483 struct hci_cp_read_local_ext_features cp;
484
485 cp.page = 0x01;
486 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
487 sizeof(cp), &cp);
488 }
489
490 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
491 u8 enable = 1;
492 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
493 &enable);
494 }
495
496 return 0;
497}
498
499static void hci_setup_link_policy(struct hci_request *req)
500{
501 struct hci_dev *hdev = req->hdev;
502 struct hci_cp_write_def_link_policy cp;
503 u16 link_policy = 0;
504
505 if (lmp_rswitch_capable(hdev))
506 link_policy |= HCI_LP_RSWITCH;
507 if (lmp_hold_capable(hdev))
508 link_policy |= HCI_LP_HOLD;
509 if (lmp_sniff_capable(hdev))
510 link_policy |= HCI_LP_SNIFF;
511 if (lmp_park_capable(hdev))
512 link_policy |= HCI_LP_PARK;
513
514 cp.policy = cpu_to_le16(link_policy);
515 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
516}
517
518static void hci_set_le_support(struct hci_request *req)
519{
520 struct hci_dev *hdev = req->hdev;
521 struct hci_cp_write_le_host_supported cp;
522
523 /* LE-only devices do not support explicit enablement */
524 if (!lmp_bredr_capable(hdev))
525 return;
526
527 memset(&cp, 0, sizeof(cp));
528
529 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
530 cp.le = 0x01;
531 cp.simul = 0x00;
532 }
533
534 if (cp.le != lmp_host_le_capable(hdev))
535 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
536 &cp);
537}
538
539static void hci_set_event_mask_page_2(struct hci_request *req)
540{
541 struct hci_dev *hdev = req->hdev;
542 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
543 bool changed = false;
544
545 /* If Connectionless Slave Broadcast master role is supported
546 * enable all necessary events for it.
547 */
548 if (lmp_csb_master_capable(hdev)) {
549 events[1] |= 0x40; /* Triggered Clock Capture */
550 events[1] |= 0x80; /* Synchronization Train Complete */
551 events[2] |= 0x10; /* Slave Page Response Timeout */
552 events[2] |= 0x20; /* CSB Channel Map Change */
553 changed = true;
554 }
555
556 /* If Connectionless Slave Broadcast slave role is supported
557 * enable all necessary events for it.
558 */
559 if (lmp_csb_slave_capable(hdev)) {
560 events[2] |= 0x01; /* Synchronization Train Received */
561 events[2] |= 0x02; /* CSB Receive */
562 events[2] |= 0x04; /* CSB Timeout */
563 events[2] |= 0x08; /* Truncated Page Complete */
564 changed = true;
565 }
566
567 /* Enable Authenticated Payload Timeout Expired event if supported */
568 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
569 events[2] |= 0x80;
570 changed = true;
571 }
572
573 /* Some Broadcom based controllers indicate support for Set Event
574 * Mask Page 2 command, but then actually do not support it. Since
575 * the default value is all bits set to zero, the command is only
576 * required if the event mask has to be changed. In case no change
577 * to the event mask is needed, skip this command.
578 */
579 if (changed)
580 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
581 sizeof(events), events);
582}
583
584static int hci_init3_req(struct hci_request *req, unsigned long opt)
585{
586 struct hci_dev *hdev = req->hdev;
587 u8 p;
588
589 hci_setup_event_mask(req);
590
591 if (hdev->commands[6] & 0x20 &&
592 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
593 struct hci_cp_read_stored_link_key cp;
594
595 bacpy(&cp.bdaddr, BDADDR_ANY);
596 cp.read_all = 0x01;
597 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
598 }
599
600 if (hdev->commands[5] & 0x10)
601 hci_setup_link_policy(req);
602
603 if (hdev->commands[8] & 0x01)
604 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
605
606 /* Some older Broadcom based Bluetooth 1.2 controllers do not
607 * support the Read Page Scan Type command. Check support for
608 * this command in the bit mask of supported commands.
609 */
610 if (hdev->commands[13] & 0x01)
611 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
612
613 if (lmp_le_capable(hdev)) {
614 u8 events[8];
615
616 memset(events, 0, sizeof(events));
617
618 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
619 events[0] |= 0x10; /* LE Long Term Key Request */
620
621 /* If controller supports the Connection Parameters Request
622 * Link Layer Procedure, enable the corresponding event.
623 */
624 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
625 events[0] |= 0x20; /* LE Remote Connection
626 * Parameter Request
627 */
628
629 /* If the controller supports the Data Length Extension
630 * feature, enable the corresponding event.
631 */
632 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
633 events[0] |= 0x40; /* LE Data Length Change */
634
635 /* If the controller supports Extended Scanner Filter
636 * Policies, enable the correspondig event.
637 */
638 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
639 events[1] |= 0x04; /* LE Direct Advertising
640 * Report
641 */
642
643 /* If the controller supports Channel Selection Algorithm #2
644 * feature, enable the corresponding event.
645 */
646 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
647 events[2] |= 0x08; /* LE Channel Selection
648 * Algorithm
649 */
650
651 /* If the controller supports the LE Set Scan Enable command,
652 * enable the corresponding advertising report event.
653 */
654 if (hdev->commands[26] & 0x08)
655 events[0] |= 0x02; /* LE Advertising Report */
656
657 /* If the controller supports the LE Create Connection
658 * command, enable the corresponding event.
659 */
660 if (hdev->commands[26] & 0x10)
661 events[0] |= 0x01; /* LE Connection Complete */
662
663 /* If the controller supports the LE Connection Update
664 * command, enable the corresponding event.
665 */
666 if (hdev->commands[27] & 0x04)
667 events[0] |= 0x04; /* LE Connection Update
668 * Complete
669 */
670
671 /* If the controller supports the LE Read Remote Used Features
672 * command, enable the corresponding event.
673 */
674 if (hdev->commands[27] & 0x20)
675 events[0] |= 0x08; /* LE Read Remote Used
676 * Features Complete
677 */
678
679 /* If the controller supports the LE Read Local P-256
680 * Public Key command, enable the corresponding event.
681 */
682 if (hdev->commands[34] & 0x02)
683 events[0] |= 0x80; /* LE Read Local P-256
684 * Public Key Complete
685 */
686
687 /* If the controller supports the LE Generate DHKey
688 * command, enable the corresponding event.
689 */
690 if (hdev->commands[34] & 0x04)
691 events[1] |= 0x01; /* LE Generate DHKey Complete */
692
693 /* If the controller supports the LE Set Default PHY or
694 * LE Set PHY commands, enable the corresponding event.
695 */
696 if (hdev->commands[35] & (0x20 | 0x40))
697 events[1] |= 0x08; /* LE PHY Update Complete */
698
699 /* If the controller supports LE Set Extended Scan Parameters
700 * and LE Set Extended Scan Enable commands, enable the
701 * corresponding event.
702 */
703 if (use_ext_scan(hdev))
704 events[1] |= 0x10; /* LE Extended Advertising
705 * Report
706 */
707
708 /* If the controller supports the LE Extended Create Connection
709 * command, enable the corresponding event.
710 */
711 if (use_ext_conn(hdev))
712 events[1] |= 0x02; /* LE Enhanced Connection
713 * Complete
714 */
715
716 /* If the controller supports the LE Extended Advertising
717 * command, enable the corresponding event.
718 */
719 if (ext_adv_capable(hdev))
720 events[2] |= 0x02; /* LE Advertising Set
721 * Terminated
722 */
723
724 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
725 events);
726
727 /* Read LE Advertising Channel TX Power */
728 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
729 /* HCI TS spec forbids mixing of legacy and extended
730 * advertising commands wherein READ_ADV_TX_POWER is
731 * also included. So do not call it if extended adv
732 * is supported otherwise controller will return
733 * COMMAND_DISALLOWED for extended commands.
734 */
735 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
736 }
737
738 if (hdev->commands[26] & 0x40) {
739 /* Read LE White List Size */
740 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
741 0, NULL);
742 }
743
744 if (hdev->commands[26] & 0x80) {
745 /* Clear LE White List */
746 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
747 }
748
749 if (hdev->commands[34] & 0x40) {
750 /* Read LE Resolving List Size */
751 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
752 0, NULL);
753 }
754
755 if (hdev->commands[34] & 0x20) {
756 /* Clear LE Resolving List */
757 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
758 }
759
760 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
761 /* Read LE Maximum Data Length */
762 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
763
764 /* Read LE Suggested Default Data Length */
765 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
766 }
767
768 if (ext_adv_capable(hdev)) {
769 /* Read LE Number of Supported Advertising Sets */
770 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
771 0, NULL);
772 }
773
774 hci_set_le_support(req);
775 }
776
777 /* Read features beyond page 1 if available */
778 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
779 struct hci_cp_read_local_ext_features cp;
780
781 cp.page = p;
782 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
783 sizeof(cp), &cp);
784 }
785
786 return 0;
787}
788
789static int hci_init4_req(struct hci_request *req, unsigned long opt)
790{
791 struct hci_dev *hdev = req->hdev;
792
793 /* Some Broadcom based Bluetooth controllers do not support the
794 * Delete Stored Link Key command. They are clearly indicating its
795 * absence in the bit mask of supported commands.
796 *
797 * Check the supported commands and only if the the command is marked
798 * as supported send it. If not supported assume that the controller
799 * does not have actual support for stored link keys which makes this
800 * command redundant anyway.
801 *
802 * Some controllers indicate that they support handling deleting
803 * stored link keys, but they don't. The quirk lets a driver
804 * just disable this command.
805 */
806 if (hdev->commands[6] & 0x80 &&
807 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
808 struct hci_cp_delete_stored_link_key cp;
809
810 bacpy(&cp.bdaddr, BDADDR_ANY);
811 cp.delete_all = 0x01;
812 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
813 sizeof(cp), &cp);
814 }
815
816 /* Set event mask page 2 if the HCI command for it is supported */
817 if (hdev->commands[22] & 0x04)
818 hci_set_event_mask_page_2(req);
819
820 /* Read local codec list if the HCI command is supported */
821 if (hdev->commands[29] & 0x20)
822 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
823
824 /* Get MWS transport configuration if the HCI command is supported */
825 if (hdev->commands[30] & 0x08)
826 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
827
828 /* Check for Synchronization Train support */
829 if (lmp_sync_train_capable(hdev))
830 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
831
832 /* Enable Secure Connections if supported and configured */
833 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
834 bredr_sc_enabled(hdev)) {
835 u8 support = 0x01;
836
837 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
838 sizeof(support), &support);
839 }
840
841 /* Set Suggested Default Data Length to maximum if supported */
842 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 struct hci_cp_le_write_def_data_len cp;
844
845 cp.tx_len = hdev->le_max_tx_len;
846 cp.tx_time = hdev->le_max_tx_time;
847 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
848 }
849
850 /* Set Default PHY parameters if command is supported */
851 if (hdev->commands[35] & 0x20) {
852 struct hci_cp_le_set_default_phy cp;
853
854 cp.all_phys = 0x00;
855 cp.tx_phys = hdev->le_tx_def_phys;
856 cp.rx_phys = hdev->le_rx_def_phys;
857
858 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
859 }
860
861 return 0;
862}
863
864static int __hci_init(struct hci_dev *hdev)
865{
866 int err;
867
868 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
869 if (err < 0)
870 return err;
871
872 if (hci_dev_test_flag(hdev, HCI_SETUP))
873 hci_debugfs_create_basic(hdev);
874
875 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
876 if (err < 0)
877 return err;
878
879 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
880 * BR/EDR/LE type controllers. AMP controllers only need the
881 * first two stages of init.
882 */
883 if (hdev->dev_type != HCI_PRIMARY)
884 return 0;
885
886 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
887 if (err < 0)
888 return err;
889
890 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
891 if (err < 0)
892 return err;
893
894 /* This function is only called when the controller is actually in
895 * configured state. When the controller is marked as unconfigured,
896 * this initialization procedure is not run.
897 *
898 * It means that it is possible that a controller runs through its
899 * setup phase and then discovers missing settings. If that is the
900 * case, then this function will not be called. It then will only
901 * be called during the config phase.
902 *
903 * So only when in setup phase or config phase, create the debugfs
904 * entries and register the SMP channels.
905 */
906 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
907 !hci_dev_test_flag(hdev, HCI_CONFIG))
908 return 0;
909
910 hci_debugfs_create_common(hdev);
911
912 if (lmp_bredr_capable(hdev))
913 hci_debugfs_create_bredr(hdev);
914
915 if (lmp_le_capable(hdev))
916 hci_debugfs_create_le(hdev);
917
918 return 0;
919}
920
921static int hci_init0_req(struct hci_request *req, unsigned long opt)
922{
923 struct hci_dev *hdev = req->hdev;
924
925 BT_DBG("%s %ld", hdev->name, opt);
926
927 /* Reset */
928 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
929 hci_reset_req(req, 0);
930
931 /* Read Local Version */
932 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
933
934 /* Read BD Address */
935 if (hdev->set_bdaddr)
936 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
937
938 return 0;
939}
940
941static int __hci_unconf_init(struct hci_dev *hdev)
942{
943 int err;
944
945 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
946 return 0;
947
948 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
949 if (err < 0)
950 return err;
951
952 if (hci_dev_test_flag(hdev, HCI_SETUP))
953 hci_debugfs_create_basic(hdev);
954
955 return 0;
956}
957
958static int hci_scan_req(struct hci_request *req, unsigned long opt)
959{
960 __u8 scan = opt;
961
962 BT_DBG("%s %x", req->hdev->name, scan);
963
964 /* Inquiry and Page scans */
965 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
966 return 0;
967}
968
969static int hci_auth_req(struct hci_request *req, unsigned long opt)
970{
971 __u8 auth = opt;
972
973 BT_DBG("%s %x", req->hdev->name, auth);
974
975 /* Authentication */
976 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
977 return 0;
978}
979
980static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
981{
982 __u8 encrypt = opt;
983
984 BT_DBG("%s %x", req->hdev->name, encrypt);
985
986 /* Encryption */
987 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
988 return 0;
989}
990
991static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
992{
993 __le16 policy = cpu_to_le16(opt);
994
995 BT_DBG("%s %x", req->hdev->name, policy);
996
997 /* Default link policy */
998 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
999 return 0;
1000}
1001
1002/* Get HCI device by index.
1003 * Device is held on return. */
1004struct hci_dev *hci_dev_get(int index)
1005{
1006 struct hci_dev *hdev = NULL, *d;
1007
1008 BT_DBG("%d", index);
1009
1010 if (index < 0)
1011 return NULL;
1012
1013 read_lock(&hci_dev_list_lock);
1014 list_for_each_entry(d, &hci_dev_list, list) {
1015 if (d->id == index) {
1016 hdev = hci_dev_hold(d);
1017 break;
1018 }
1019 }
1020 read_unlock(&hci_dev_list_lock);
1021 return hdev;
1022}
1023
1024/* ---- Inquiry support ---- */
1025
1026bool hci_discovery_active(struct hci_dev *hdev)
1027{
1028 struct discovery_state *discov = &hdev->discovery;
1029
1030 switch (discov->state) {
1031 case DISCOVERY_FINDING:
1032 case DISCOVERY_RESOLVING:
1033 return true;
1034
1035 default:
1036 return false;
1037 }
1038}
1039
1040void hci_discovery_set_state(struct hci_dev *hdev, int state)
1041{
1042 int old_state = hdev->discovery.state;
1043
1044 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1045
1046 if (old_state == state)
1047 return;
1048
1049 hdev->discovery.state = state;
1050
1051 switch (state) {
1052 case DISCOVERY_STOPPED:
1053 hci_update_background_scan(hdev);
1054
1055 if (old_state != DISCOVERY_STARTING)
1056 mgmt_discovering(hdev, 0);
1057 break;
1058 case DISCOVERY_STARTING:
1059 break;
1060 case DISCOVERY_FINDING:
1061 mgmt_discovering(hdev, 1);
1062 break;
1063 case DISCOVERY_RESOLVING:
1064 break;
1065 case DISCOVERY_STOPPING:
1066 break;
1067 }
1068}
1069
1070void hci_inquiry_cache_flush(struct hci_dev *hdev)
1071{
1072 struct discovery_state *cache = &hdev->discovery;
1073 struct inquiry_entry *p, *n;
1074
1075 list_for_each_entry_safe(p, n, &cache->all, all) {
1076 list_del(&p->all);
1077 kfree(p);
1078 }
1079
1080 INIT_LIST_HEAD(&cache->unknown);
1081 INIT_LIST_HEAD(&cache->resolve);
1082}
1083
1084struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1085 bdaddr_t *bdaddr)
1086{
1087 struct discovery_state *cache = &hdev->discovery;
1088 struct inquiry_entry *e;
1089
1090 BT_DBG("cache %p, %pMR", cache, bdaddr);
1091
1092 list_for_each_entry(e, &cache->all, all) {
1093 if (!bacmp(&e->data.bdaddr, bdaddr))
1094 return e;
1095 }
1096
1097 return NULL;
1098}
1099
1100struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1101 bdaddr_t *bdaddr)
1102{
1103 struct discovery_state *cache = &hdev->discovery;
1104 struct inquiry_entry *e;
1105
1106 BT_DBG("cache %p, %pMR", cache, bdaddr);
1107
1108 list_for_each_entry(e, &cache->unknown, list) {
1109 if (!bacmp(&e->data.bdaddr, bdaddr))
1110 return e;
1111 }
1112
1113 return NULL;
1114}
1115
1116struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1117 bdaddr_t *bdaddr,
1118 int state)
1119{
1120 struct discovery_state *cache = &hdev->discovery;
1121 struct inquiry_entry *e;
1122
1123 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1124
1125 list_for_each_entry(e, &cache->resolve, list) {
1126 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1127 return e;
1128 if (!bacmp(&e->data.bdaddr, bdaddr))
1129 return e;
1130 }
1131
1132 return NULL;
1133}
1134
1135void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1136 struct inquiry_entry *ie)
1137{
1138 struct discovery_state *cache = &hdev->discovery;
1139 struct list_head *pos = &cache->resolve;
1140 struct inquiry_entry *p;
1141
1142 list_del(&ie->list);
1143
1144 list_for_each_entry(p, &cache->resolve, list) {
1145 if (p->name_state != NAME_PENDING &&
1146 abs(p->data.rssi) >= abs(ie->data.rssi))
1147 break;
1148 pos = &p->list;
1149 }
1150
1151 list_add(&ie->list, pos);
1152}
1153
1154u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1155 bool name_known)
1156{
1157 struct discovery_state *cache = &hdev->discovery;
1158 struct inquiry_entry *ie;
1159 u32 flags = 0;
1160
1161 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1162
1163 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1164
1165 if (!data->ssp_mode)
1166 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1167
1168 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1169 if (ie) {
1170 if (!ie->data.ssp_mode)
1171 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1172
1173 if (ie->name_state == NAME_NEEDED &&
1174 data->rssi != ie->data.rssi) {
1175 ie->data.rssi = data->rssi;
1176 hci_inquiry_cache_update_resolve(hdev, ie);
1177 }
1178
1179 goto update;
1180 }
1181
1182 /* Entry not in the cache. Add new one. */
1183 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1184 if (!ie) {
1185 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1186 goto done;
1187 }
1188
1189 list_add(&ie->all, &cache->all);
1190
1191 if (name_known) {
1192 ie->name_state = NAME_KNOWN;
1193 } else {
1194 ie->name_state = NAME_NOT_KNOWN;
1195 list_add(&ie->list, &cache->unknown);
1196 }
1197
1198update:
1199 if (name_known && ie->name_state != NAME_KNOWN &&
1200 ie->name_state != NAME_PENDING) {
1201 ie->name_state = NAME_KNOWN;
1202 list_del(&ie->list);
1203 }
1204
1205 memcpy(&ie->data, data, sizeof(*data));
1206 ie->timestamp = jiffies;
1207 cache->timestamp = jiffies;
1208
1209 if (ie->name_state == NAME_NOT_KNOWN)
1210 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211
1212done:
1213 return flags;
1214}
1215
1216static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1217{
1218 struct discovery_state *cache = &hdev->discovery;
1219 struct inquiry_info *info = (struct inquiry_info *) buf;
1220 struct inquiry_entry *e;
1221 int copied = 0;
1222
1223 list_for_each_entry(e, &cache->all, all) {
1224 struct inquiry_data *data = &e->data;
1225
1226 if (copied >= num)
1227 break;
1228
1229 bacpy(&info->bdaddr, &data->bdaddr);
1230 info->pscan_rep_mode = data->pscan_rep_mode;
1231 info->pscan_period_mode = data->pscan_period_mode;
1232 info->pscan_mode = data->pscan_mode;
1233 memcpy(info->dev_class, data->dev_class, 3);
1234 info->clock_offset = data->clock_offset;
1235
1236 info++;
1237 copied++;
1238 }
1239
1240 BT_DBG("cache %p, copied %d", cache, copied);
1241 return copied;
1242}
1243
1244static int hci_inq_req(struct hci_request *req, unsigned long opt)
1245{
1246 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1247 struct hci_dev *hdev = req->hdev;
1248 struct hci_cp_inquiry cp;
1249
1250 BT_DBG("%s", hdev->name);
1251
1252 if (test_bit(HCI_INQUIRY, &hdev->flags))
1253 return 0;
1254
1255 /* Start Inquiry */
1256 memcpy(&cp.lap, &ir->lap, 3);
1257 cp.length = ir->length;
1258 cp.num_rsp = ir->num_rsp;
1259 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1260
1261 return 0;
1262}
1263
1264int hci_inquiry(void __user *arg)
1265{
1266 __u8 __user *ptr = arg;
1267 struct hci_inquiry_req ir;
1268 struct hci_dev *hdev;
1269 int err = 0, do_inquiry = 0, max_rsp;
1270 long timeo;
1271 __u8 *buf;
1272
1273 if (copy_from_user(&ir, ptr, sizeof(ir)))
1274 return -EFAULT;
1275
1276 hdev = hci_dev_get(ir.dev_id);
1277 if (!hdev)
1278 return -ENODEV;
1279
1280 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1281 err = -EBUSY;
1282 goto done;
1283 }
1284
1285 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1286 err = -EOPNOTSUPP;
1287 goto done;
1288 }
1289
1290 if (hdev->dev_type != HCI_PRIMARY) {
1291 err = -EOPNOTSUPP;
1292 goto done;
1293 }
1294
1295 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1296 err = -EOPNOTSUPP;
1297 goto done;
1298 }
1299
1300 hci_dev_lock(hdev);
1301 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1302 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1303 hci_inquiry_cache_flush(hdev);
1304 do_inquiry = 1;
1305 }
1306 hci_dev_unlock(hdev);
1307
1308 timeo = ir.length * msecs_to_jiffies(2000);
1309
1310 if (do_inquiry) {
1311 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1312 timeo, NULL);
1313 if (err < 0)
1314 goto done;
1315
1316 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1317 * cleared). If it is interrupted by a signal, return -EINTR.
1318 */
1319 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1320 TASK_INTERRUPTIBLE))
1321 return -EINTR;
1322 }
1323
1324 /* for unlimited number of responses we will use buffer with
1325 * 255 entries
1326 */
1327 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1328
1329 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1330 * copy it to the user space.
1331 */
1332 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1333 if (!buf) {
1334 err = -ENOMEM;
1335 goto done;
1336 }
1337
1338 hci_dev_lock(hdev);
1339 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1340 hci_dev_unlock(hdev);
1341
1342 BT_DBG("num_rsp %d", ir.num_rsp);
1343
1344 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1345 ptr += sizeof(ir);
1346 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1347 ir.num_rsp))
1348 err = -EFAULT;
1349 } else
1350 err = -EFAULT;
1351
1352 kfree(buf);
1353
1354done:
1355 hci_dev_put(hdev);
1356 return err;
1357}
1358
1359/**
1360 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1361 * (BD_ADDR) for a HCI device from
1362 * a firmware node property.
1363 * @hdev: The HCI device
1364 *
1365 * Search the firmware node for 'local-bd-address'.
1366 *
1367 * All-zero BD addresses are rejected, because those could be properties
1368 * that exist in the firmware tables, but were not updated by the firmware. For
1369 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1370 */
1371static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1372{
1373 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1374 bdaddr_t ba;
1375 int ret;
1376
1377 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1378 (u8 *)&ba, sizeof(ba));
1379 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1380 return;
1381
1382 bacpy(&hdev->public_addr, &ba);
1383}
1384
1385static int hci_dev_do_open(struct hci_dev *hdev)
1386{
1387 int ret = 0;
1388
1389 BT_DBG("%s %p", hdev->name, hdev);
1390
1391 hci_req_sync_lock(hdev);
1392
1393 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1394 ret = -ENODEV;
1395 goto done;
1396 }
1397
1398 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1400 /* Check for rfkill but allow the HCI setup stage to
1401 * proceed (which in itself doesn't cause any RF activity).
1402 */
1403 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1404 ret = -ERFKILL;
1405 goto done;
1406 }
1407
1408 /* Check for valid public address or a configured static
1409 * random adddress, but let the HCI setup proceed to
1410 * be able to determine if there is a public address
1411 * or not.
1412 *
1413 * In case of user channel usage, it is not important
1414 * if a public address or static random address is
1415 * available.
1416 *
1417 * This check is only valid for BR/EDR controllers
1418 * since AMP controllers do not have an address.
1419 */
1420 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1421 hdev->dev_type == HCI_PRIMARY &&
1422 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1423 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1424 ret = -EADDRNOTAVAIL;
1425 goto done;
1426 }
1427 }
1428
1429 if (test_bit(HCI_UP, &hdev->flags)) {
1430 ret = -EALREADY;
1431 goto done;
1432 }
1433
1434 if (hdev->open(hdev)) {
1435 ret = -EIO;
1436 goto done;
1437 }
1438
1439 set_bit(HCI_RUNNING, &hdev->flags);
1440 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1441
1442 atomic_set(&hdev->cmd_cnt, 1);
1443 set_bit(HCI_INIT, &hdev->flags);
1444
1445 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1446 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1447 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1448
1449 if (hdev->setup)
1450 ret = hdev->setup(hdev);
1451
1452 if (ret)
1453 goto setup_failed;
1454
1455 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1456 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1457 hci_dev_get_bd_addr_from_property(hdev);
1458
1459 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1460 hdev->set_bdaddr)
1461 ret = hdev->set_bdaddr(hdev,
1462 &hdev->public_addr);
1463 }
1464
1465setup_failed:
1466 /* The transport driver can set these quirks before
1467 * creating the HCI device or in its setup callback.
1468 *
1469 * In case any of them is set, the controller has to
1470 * start up as unconfigured.
1471 */
1472 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1473 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1474 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1475
1476 /* For an unconfigured controller it is required to
1477 * read at least the version information provided by
1478 * the Read Local Version Information command.
1479 *
1480 * If the set_bdaddr driver callback is provided, then
1481 * also the original Bluetooth public device address
1482 * will be read using the Read BD Address command.
1483 */
1484 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1485 ret = __hci_unconf_init(hdev);
1486 }
1487
1488 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1489 /* If public address change is configured, ensure that
1490 * the address gets programmed. If the driver does not
1491 * support changing the public address, fail the power
1492 * on procedure.
1493 */
1494 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1495 hdev->set_bdaddr)
1496 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1497 else
1498 ret = -EADDRNOTAVAIL;
1499 }
1500
1501 if (!ret) {
1502 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1504 ret = __hci_init(hdev);
1505 if (!ret && hdev->post_init)
1506 ret = hdev->post_init(hdev);
1507 }
1508 }
1509
1510 /* If the HCI Reset command is clearing all diagnostic settings,
1511 * then they need to be reprogrammed after the init procedure
1512 * completed.
1513 */
1514 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1515 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1516 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1517 ret = hdev->set_diag(hdev, true);
1518
1519 clear_bit(HCI_INIT, &hdev->flags);
1520
1521 if (!ret) {
1522 hci_dev_hold(hdev);
1523 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1524 hci_adv_instances_set_rpa_expired(hdev, true);
1525 set_bit(HCI_UP, &hdev->flags);
1526 hci_sock_dev_event(hdev, HCI_DEV_UP);
1527 hci_leds_update_powered(hdev, true);
1528 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1529 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1530 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1531 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1532 hci_dev_test_flag(hdev, HCI_MGMT) &&
1533 hdev->dev_type == HCI_PRIMARY) {
1534 ret = __hci_req_hci_power_on(hdev);
1535 mgmt_power_on(hdev, ret);
1536 }
1537 } else {
1538 /* Init failed, cleanup */
1539 flush_work(&hdev->tx_work);
1540 flush_work(&hdev->cmd_work);
1541 flush_work(&hdev->rx_work);
1542
1543 skb_queue_purge(&hdev->cmd_q);
1544 skb_queue_purge(&hdev->rx_q);
1545
1546 if (hdev->flush)
1547 hdev->flush(hdev);
1548
1549 if (hdev->sent_cmd) {
1550 kfree_skb(hdev->sent_cmd);
1551 hdev->sent_cmd = NULL;
1552 }
1553
1554 clear_bit(HCI_RUNNING, &hdev->flags);
1555 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1556
1557 hdev->close(hdev);
1558 hdev->flags &= BIT(HCI_RAW);
1559 }
1560
1561done:
1562 hci_req_sync_unlock(hdev);
1563 return ret;
1564}
1565
1566/* ---- HCI ioctl helpers ---- */
1567
1568int hci_dev_open(__u16 dev)
1569{
1570 struct hci_dev *hdev;
1571 int err;
1572
1573 hdev = hci_dev_get(dev);
1574 if (!hdev)
1575 return -ENODEV;
1576
1577 /* Devices that are marked as unconfigured can only be powered
1578 * up as user channel. Trying to bring them up as normal devices
1579 * will result into a failure. Only user channel operation is
1580 * possible.
1581 *
1582 * When this function is called for a user channel, the flag
1583 * HCI_USER_CHANNEL will be set first before attempting to
1584 * open the device.
1585 */
1586 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1587 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1588 err = -EOPNOTSUPP;
1589 goto done;
1590 }
1591
1592 /* We need to ensure that no other power on/off work is pending
1593 * before proceeding to call hci_dev_do_open. This is
1594 * particularly important if the setup procedure has not yet
1595 * completed.
1596 */
1597 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1598 cancel_delayed_work(&hdev->power_off);
1599
1600 /* After this call it is guaranteed that the setup procedure
1601 * has finished. This means that error conditions like RFKILL
1602 * or no valid public or static random address apply.
1603 */
1604 flush_workqueue(hdev->req_workqueue);
1605
1606 /* For controllers not using the management interface and that
1607 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1608 * so that pairing works for them. Once the management interface
1609 * is in use this bit will be cleared again and userspace has
1610 * to explicitly enable it.
1611 */
1612 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1613 !hci_dev_test_flag(hdev, HCI_MGMT))
1614 hci_dev_set_flag(hdev, HCI_BONDABLE);
1615
1616 err = hci_dev_do_open(hdev);
1617
1618done:
1619 hci_dev_put(hdev);
1620 return err;
1621}
1622
1623/* This function requires the caller holds hdev->lock */
1624static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1625{
1626 struct hci_conn_params *p;
1627
1628 list_for_each_entry(p, &hdev->le_conn_params, list) {
1629 if (p->conn) {
1630 hci_conn_drop(p->conn);
1631 hci_conn_put(p->conn);
1632 p->conn = NULL;
1633 }
1634 list_del_init(&p->action);
1635 }
1636
1637 BT_DBG("All LE pending actions cleared");
1638}
1639
1640int hci_dev_do_close(struct hci_dev *hdev)
1641{
1642 bool auto_off;
1643
1644 BT_DBG("%s %p", hdev->name, hdev);
1645
1646 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1647 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1648 test_bit(HCI_UP, &hdev->flags)) {
1649 /* Execute vendor specific shutdown routine */
1650 if (hdev->shutdown)
1651 hdev->shutdown(hdev);
1652 }
1653
1654 cancel_delayed_work(&hdev->power_off);
1655
1656 hci_request_cancel_all(hdev);
1657 hci_req_sync_lock(hdev);
1658
1659 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1660 cancel_delayed_work_sync(&hdev->cmd_timer);
1661 hci_req_sync_unlock(hdev);
1662 return 0;
1663 }
1664
1665 hci_leds_update_powered(hdev, false);
1666
1667 /* Flush RX and TX works */
1668 flush_work(&hdev->tx_work);
1669 flush_work(&hdev->rx_work);
1670
1671 if (hdev->discov_timeout > 0) {
1672 hdev->discov_timeout = 0;
1673 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1674 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1675 }
1676
1677 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1678 cancel_delayed_work(&hdev->service_cache);
1679
1680 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1681 struct adv_info *adv_instance;
1682
1683 cancel_delayed_work_sync(&hdev->rpa_expired);
1684
1685 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1686 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1687 }
1688
1689 /* Avoid potential lockdep warnings from the *_flush() calls by
1690 * ensuring the workqueue is empty up front.
1691 */
1692 drain_workqueue(hdev->workqueue);
1693
1694 hci_dev_lock(hdev);
1695
1696 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1697
1698 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1699
1700 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1701 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1702 hci_dev_test_flag(hdev, HCI_MGMT))
1703 __mgmt_power_off(hdev);
1704
1705 hci_inquiry_cache_flush(hdev);
1706 hci_pend_le_actions_clear(hdev);
1707 hci_conn_hash_flush(hdev);
1708 hci_dev_unlock(hdev);
1709
1710 smp_unregister(hdev);
1711
1712 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1713
1714 if (hdev->flush)
1715 hdev->flush(hdev);
1716
1717 /* Reset device */
1718 skb_queue_purge(&hdev->cmd_q);
1719 atomic_set(&hdev->cmd_cnt, 1);
1720 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1721 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1722 set_bit(HCI_INIT, &hdev->flags);
1723 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1724 clear_bit(HCI_INIT, &hdev->flags);
1725 }
1726
1727 /* flush cmd work */
1728 flush_work(&hdev->cmd_work);
1729
1730 /* Drop queues */
1731 skb_queue_purge(&hdev->rx_q);
1732 skb_queue_purge(&hdev->cmd_q);
1733 skb_queue_purge(&hdev->raw_q);
1734
1735 /* Drop last sent command */
1736 if (hdev->sent_cmd) {
1737 cancel_delayed_work_sync(&hdev->cmd_timer);
1738 kfree_skb(hdev->sent_cmd);
1739 hdev->sent_cmd = NULL;
1740 }
1741
1742 clear_bit(HCI_RUNNING, &hdev->flags);
1743 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1744
1745 /* After this point our queues are empty
1746 * and no tasks are scheduled. */
1747 hdev->close(hdev);
1748
1749 /* Clear flags */
1750 hdev->flags &= BIT(HCI_RAW);
1751 hci_dev_clear_volatile_flags(hdev);
1752
1753 /* Controller radio is available but is currently powered down */
1754 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1755
1756 memset(hdev->eir, 0, sizeof(hdev->eir));
1757 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1758 bacpy(&hdev->random_addr, BDADDR_ANY);
1759
1760 hci_req_sync_unlock(hdev);
1761
1762 hci_dev_put(hdev);
1763 return 0;
1764}
1765
1766int hci_dev_close(__u16 dev)
1767{
1768 struct hci_dev *hdev;
1769 int err;
1770
1771 hdev = hci_dev_get(dev);
1772 if (!hdev)
1773 return -ENODEV;
1774
1775 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1776 err = -EBUSY;
1777 goto done;
1778 }
1779
1780 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1781 cancel_delayed_work(&hdev->power_off);
1782
1783 err = hci_dev_do_close(hdev);
1784
1785done:
1786 hci_dev_put(hdev);
1787 return err;
1788}
1789
1790static int hci_dev_do_reset(struct hci_dev *hdev)
1791{
1792 int ret;
1793
1794 BT_DBG("%s %p", hdev->name, hdev);
1795
1796 hci_req_sync_lock(hdev);
1797
1798 /* Drop queues */
1799 skb_queue_purge(&hdev->rx_q);
1800 skb_queue_purge(&hdev->cmd_q);
1801
1802 /* Avoid potential lockdep warnings from the *_flush() calls by
1803 * ensuring the workqueue is empty up front.
1804 */
1805 drain_workqueue(hdev->workqueue);
1806
1807 hci_dev_lock(hdev);
1808 hci_inquiry_cache_flush(hdev);
1809 hci_conn_hash_flush(hdev);
1810 hci_dev_unlock(hdev);
1811
1812 if (hdev->flush)
1813 hdev->flush(hdev);
1814
1815 atomic_set(&hdev->cmd_cnt, 1);
1816 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1817
1818 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1819
1820 hci_req_sync_unlock(hdev);
1821 return ret;
1822}
1823
1824int hci_dev_reset(__u16 dev)
1825{
1826 struct hci_dev *hdev;
1827 int err;
1828
1829 hdev = hci_dev_get(dev);
1830 if (!hdev)
1831 return -ENODEV;
1832
1833 if (!test_bit(HCI_UP, &hdev->flags)) {
1834 err = -ENETDOWN;
1835 goto done;
1836 }
1837
1838 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1839 err = -EBUSY;
1840 goto done;
1841 }
1842
1843 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1844 err = -EOPNOTSUPP;
1845 goto done;
1846 }
1847
1848 err = hci_dev_do_reset(hdev);
1849
1850done:
1851 hci_dev_put(hdev);
1852 return err;
1853}
1854
1855int hci_dev_reset_stat(__u16 dev)
1856{
1857 struct hci_dev *hdev;
1858 int ret = 0;
1859
1860 hdev = hci_dev_get(dev);
1861 if (!hdev)
1862 return -ENODEV;
1863
1864 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1865 ret = -EBUSY;
1866 goto done;
1867 }
1868
1869 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1870 ret = -EOPNOTSUPP;
1871 goto done;
1872 }
1873
1874 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1875
1876done:
1877 hci_dev_put(hdev);
1878 return ret;
1879}
1880
1881static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1882{
1883 bool conn_changed, discov_changed;
1884
1885 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1886
1887 if ((scan & SCAN_PAGE))
1888 conn_changed = !hci_dev_test_and_set_flag(hdev,
1889 HCI_CONNECTABLE);
1890 else
1891 conn_changed = hci_dev_test_and_clear_flag(hdev,
1892 HCI_CONNECTABLE);
1893
1894 if ((scan & SCAN_INQUIRY)) {
1895 discov_changed = !hci_dev_test_and_set_flag(hdev,
1896 HCI_DISCOVERABLE);
1897 } else {
1898 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1899 discov_changed = hci_dev_test_and_clear_flag(hdev,
1900 HCI_DISCOVERABLE);
1901 }
1902
1903 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1904 return;
1905
1906 if (conn_changed || discov_changed) {
1907 /* In case this was disabled through mgmt */
1908 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1909
1910 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1911 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1912
1913 mgmt_new_settings(hdev);
1914 }
1915}
1916
1917int hci_dev_cmd(unsigned int cmd, void __user *arg)
1918{
1919 struct hci_dev *hdev;
1920 struct hci_dev_req dr;
1921 int err = 0;
1922
1923 if (copy_from_user(&dr, arg, sizeof(dr)))
1924 return -EFAULT;
1925
1926 hdev = hci_dev_get(dr.dev_id);
1927 if (!hdev)
1928 return -ENODEV;
1929
1930 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1931 err = -EBUSY;
1932 goto done;
1933 }
1934
1935 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1936 err = -EOPNOTSUPP;
1937 goto done;
1938 }
1939
1940 if (hdev->dev_type != HCI_PRIMARY) {
1941 err = -EOPNOTSUPP;
1942 goto done;
1943 }
1944
1945 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1946 err = -EOPNOTSUPP;
1947 goto done;
1948 }
1949
1950 switch (cmd) {
1951 case HCISETAUTH:
1952 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1953 HCI_INIT_TIMEOUT, NULL);
1954 break;
1955
1956 case HCISETENCRYPT:
1957 if (!lmp_encrypt_capable(hdev)) {
1958 err = -EOPNOTSUPP;
1959 break;
1960 }
1961
1962 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1963 /* Auth must be enabled first */
1964 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1965 HCI_INIT_TIMEOUT, NULL);
1966 if (err)
1967 break;
1968 }
1969
1970 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1971 HCI_INIT_TIMEOUT, NULL);
1972 break;
1973
1974 case HCISETSCAN:
1975 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1976 HCI_INIT_TIMEOUT, NULL);
1977
1978 /* Ensure that the connectable and discoverable states
1979 * get correctly modified as this was a non-mgmt change.
1980 */
1981 if (!err)
1982 hci_update_scan_state(hdev, dr.dev_opt);
1983 break;
1984
1985 case HCISETLINKPOL:
1986 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1987 HCI_INIT_TIMEOUT, NULL);
1988 break;
1989
1990 case HCISETLINKMODE:
1991 hdev->link_mode = ((__u16) dr.dev_opt) &
1992 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1993 break;
1994
1995 case HCISETPTYPE:
1996 if (hdev->pkt_type == (__u16) dr.dev_opt)
1997 break;
1998
1999 hdev->pkt_type = (__u16) dr.dev_opt;
2000 mgmt_phy_configuration_changed(hdev, NULL);
2001 break;
2002
2003 case HCISETACLMTU:
2004 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2005 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2006 break;
2007
2008 case HCISETSCOMTU:
2009 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2010 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2011 break;
2012
2013 default:
2014 err = -EINVAL;
2015 break;
2016 }
2017
2018done:
2019 hci_dev_put(hdev);
2020 return err;
2021}
2022
2023int hci_get_dev_list(void __user *arg)
2024{
2025 struct hci_dev *hdev;
2026 struct hci_dev_list_req *dl;
2027 struct hci_dev_req *dr;
2028 int n = 0, size, err;
2029 __u16 dev_num;
2030
2031 if (get_user(dev_num, (__u16 __user *) arg))
2032 return -EFAULT;
2033
2034 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2035 return -EINVAL;
2036
2037 size = sizeof(*dl) + dev_num * sizeof(*dr);
2038
2039 dl = kzalloc(size, GFP_KERNEL);
2040 if (!dl)
2041 return -ENOMEM;
2042
2043 dr = dl->dev_req;
2044
2045 read_lock(&hci_dev_list_lock);
2046 list_for_each_entry(hdev, &hci_dev_list, list) {
2047 unsigned long flags = hdev->flags;
2048
2049 /* When the auto-off is configured it means the transport
2050 * is running, but in that case still indicate that the
2051 * device is actually down.
2052 */
2053 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2054 flags &= ~BIT(HCI_UP);
2055
2056 (dr + n)->dev_id = hdev->id;
2057 (dr + n)->dev_opt = flags;
2058
2059 if (++n >= dev_num)
2060 break;
2061 }
2062 read_unlock(&hci_dev_list_lock);
2063
2064 dl->dev_num = n;
2065 size = sizeof(*dl) + n * sizeof(*dr);
2066
2067 err = copy_to_user(arg, dl, size);
2068 kfree(dl);
2069
2070 return err ? -EFAULT : 0;
2071}
2072
2073int hci_get_dev_info(void __user *arg)
2074{
2075 struct hci_dev *hdev;
2076 struct hci_dev_info di;
2077 unsigned long flags;
2078 int err = 0;
2079
2080 if (copy_from_user(&di, arg, sizeof(di)))
2081 return -EFAULT;
2082
2083 hdev = hci_dev_get(di.dev_id);
2084 if (!hdev)
2085 return -ENODEV;
2086
2087 /* When the auto-off is configured it means the transport
2088 * is running, but in that case still indicate that the
2089 * device is actually down.
2090 */
2091 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2092 flags = hdev->flags & ~BIT(HCI_UP);
2093 else
2094 flags = hdev->flags;
2095
2096 strcpy(di.name, hdev->name);
2097 di.bdaddr = hdev->bdaddr;
2098 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2099 di.flags = flags;
2100 di.pkt_type = hdev->pkt_type;
2101 if (lmp_bredr_capable(hdev)) {
2102 di.acl_mtu = hdev->acl_mtu;
2103 di.acl_pkts = hdev->acl_pkts;
2104 di.sco_mtu = hdev->sco_mtu;
2105 di.sco_pkts = hdev->sco_pkts;
2106 } else {
2107 di.acl_mtu = hdev->le_mtu;
2108 di.acl_pkts = hdev->le_pkts;
2109 di.sco_mtu = 0;
2110 di.sco_pkts = 0;
2111 }
2112 di.link_policy = hdev->link_policy;
2113 di.link_mode = hdev->link_mode;
2114
2115 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2116 memcpy(&di.features, &hdev->features, sizeof(di.features));
2117
2118 if (copy_to_user(arg, &di, sizeof(di)))
2119 err = -EFAULT;
2120
2121 hci_dev_put(hdev);
2122
2123 return err;
2124}
2125
2126/* ---- Interface to HCI drivers ---- */
2127
2128static int hci_rfkill_set_block(void *data, bool blocked)
2129{
2130 struct hci_dev *hdev = data;
2131
2132 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2133
2134 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2135 return -EBUSY;
2136
2137 if (blocked) {
2138 hci_dev_set_flag(hdev, HCI_RFKILLED);
2139 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2140 !hci_dev_test_flag(hdev, HCI_CONFIG))
2141 hci_dev_do_close(hdev);
2142 } else {
2143 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2144 }
2145
2146 return 0;
2147}
2148
2149static const struct rfkill_ops hci_rfkill_ops = {
2150 .set_block = hci_rfkill_set_block,
2151};
2152
2153static void hci_power_on(struct work_struct *work)
2154{
2155 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2156 int err;
2157
2158 BT_DBG("%s", hdev->name);
2159
2160 if (test_bit(HCI_UP, &hdev->flags) &&
2161 hci_dev_test_flag(hdev, HCI_MGMT) &&
2162 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2163 cancel_delayed_work(&hdev->power_off);
2164 hci_req_sync_lock(hdev);
2165 err = __hci_req_hci_power_on(hdev);
2166 hci_req_sync_unlock(hdev);
2167 mgmt_power_on(hdev, err);
2168 return;
2169 }
2170
2171 err = hci_dev_do_open(hdev);
2172 if (err < 0) {
2173 hci_dev_lock(hdev);
2174 mgmt_set_powered_failed(hdev, err);
2175 hci_dev_unlock(hdev);
2176 return;
2177 }
2178
2179 /* During the HCI setup phase, a few error conditions are
2180 * ignored and they need to be checked now. If they are still
2181 * valid, it is important to turn the device back off.
2182 */
2183 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2184 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2185 (hdev->dev_type == HCI_PRIMARY &&
2186 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2187 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2188 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2189 hci_dev_do_close(hdev);
2190 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2191 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2192 HCI_AUTO_OFF_TIMEOUT);
2193 }
2194
2195 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2196 /* For unconfigured devices, set the HCI_RAW flag
2197 * so that userspace can easily identify them.
2198 */
2199 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2200 set_bit(HCI_RAW, &hdev->flags);
2201
2202 /* For fully configured devices, this will send
2203 * the Index Added event. For unconfigured devices,
2204 * it will send Unconfigued Index Added event.
2205 *
2206 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2207 * and no event will be send.
2208 */
2209 mgmt_index_added(hdev);
2210 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2211 /* When the controller is now configured, then it
2212 * is important to clear the HCI_RAW flag.
2213 */
2214 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2215 clear_bit(HCI_RAW, &hdev->flags);
2216
2217 /* Powering on the controller with HCI_CONFIG set only
2218 * happens with the transition from unconfigured to
2219 * configured. This will send the Index Added event.
2220 */
2221 mgmt_index_added(hdev);
2222 }
2223}
2224
2225static void hci_power_off(struct work_struct *work)
2226{
2227 struct hci_dev *hdev = container_of(work, struct hci_dev,
2228 power_off.work);
2229
2230 BT_DBG("%s", hdev->name);
2231
2232 hci_dev_do_close(hdev);
2233}
2234
2235static void hci_error_reset(struct work_struct *work)
2236{
2237 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2238
2239 BT_DBG("%s", hdev->name);
2240
2241 if (hdev->hw_error)
2242 hdev->hw_error(hdev, hdev->hw_error_code);
2243 else
2244 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2245
2246 if (hci_dev_do_close(hdev))
2247 return;
2248
2249 hci_dev_do_open(hdev);
2250}
2251
2252void hci_uuids_clear(struct hci_dev *hdev)
2253{
2254 struct bt_uuid *uuid, *tmp;
2255
2256 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2257 list_del(&uuid->list);
2258 kfree(uuid);
2259 }
2260}
2261
2262void hci_link_keys_clear(struct hci_dev *hdev)
2263{
2264 struct link_key *key;
2265
2266 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2267 list_del_rcu(&key->list);
2268 kfree_rcu(key, rcu);
2269 }
2270}
2271
2272void hci_smp_ltks_clear(struct hci_dev *hdev)
2273{
2274 struct smp_ltk *k;
2275
2276 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277 list_del_rcu(&k->list);
2278 kfree_rcu(k, rcu);
2279 }
2280}
2281
2282void hci_smp_irks_clear(struct hci_dev *hdev)
2283{
2284 struct smp_irk *k;
2285
2286 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2287 list_del_rcu(&k->list);
2288 kfree_rcu(k, rcu);
2289 }
2290}
2291
2292struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2293{
2294 struct link_key *k;
2295
2296 rcu_read_lock();
2297 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2298 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2299 rcu_read_unlock();
2300 return k;
2301 }
2302 }
2303 rcu_read_unlock();
2304
2305 return NULL;
2306}
2307
2308static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2309 u8 key_type, u8 old_key_type)
2310{
2311 /* Legacy key */
2312 if (key_type < 0x03)
2313 return true;
2314
2315 /* Debug keys are insecure so don't store them persistently */
2316 if (key_type == HCI_LK_DEBUG_COMBINATION)
2317 return false;
2318
2319 /* Changed combination key and there's no previous one */
2320 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2321 return false;
2322
2323 /* Security mode 3 case */
2324 if (!conn)
2325 return true;
2326
2327 /* BR/EDR key derived using SC from an LE link */
2328 if (conn->type == LE_LINK)
2329 return true;
2330
2331 /* Neither local nor remote side had no-bonding as requirement */
2332 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2333 return true;
2334
2335 /* Local side had dedicated bonding as requirement */
2336 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2337 return true;
2338
2339 /* Remote side had dedicated bonding as requirement */
2340 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2341 return true;
2342
2343 /* If none of the above criteria match, then don't store the key
2344 * persistently */
2345 return false;
2346}
2347
2348static u8 ltk_role(u8 type)
2349{
2350 if (type == SMP_LTK)
2351 return HCI_ROLE_MASTER;
2352
2353 return HCI_ROLE_SLAVE;
2354}
2355
2356struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2357 u8 addr_type, u8 role)
2358{
2359 struct smp_ltk *k;
2360
2361 rcu_read_lock();
2362 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2363 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2364 continue;
2365
2366 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2367 rcu_read_unlock();
2368 return k;
2369 }
2370 }
2371 rcu_read_unlock();
2372
2373 return NULL;
2374}
2375
2376struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2377{
2378 struct smp_irk *irk;
2379
2380 rcu_read_lock();
2381 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2382 if (!bacmp(&irk->rpa, rpa)) {
2383 rcu_read_unlock();
2384 return irk;
2385 }
2386 }
2387
2388 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2389 if (smp_irk_matches(hdev, irk->val, rpa)) {
2390 bacpy(&irk->rpa, rpa);
2391 rcu_read_unlock();
2392 return irk;
2393 }
2394 }
2395 rcu_read_unlock();
2396
2397 return NULL;
2398}
2399
2400struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401 u8 addr_type)
2402{
2403 struct smp_irk *irk;
2404
2405 /* Identity Address must be public or static random */
2406 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2407 return NULL;
2408
2409 rcu_read_lock();
2410 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2411 if (addr_type == irk->addr_type &&
2412 bacmp(bdaddr, &irk->bdaddr) == 0) {
2413 rcu_read_unlock();
2414 return irk;
2415 }
2416 }
2417 rcu_read_unlock();
2418
2419 return NULL;
2420}
2421
2422struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2423 bdaddr_t *bdaddr, u8 *val, u8 type,
2424 u8 pin_len, bool *persistent)
2425{
2426 struct link_key *key, *old_key;
2427 u8 old_key_type;
2428
2429 old_key = hci_find_link_key(hdev, bdaddr);
2430 if (old_key) {
2431 old_key_type = old_key->type;
2432 key = old_key;
2433 } else {
2434 old_key_type = conn ? conn->key_type : 0xff;
2435 key = kzalloc(sizeof(*key), GFP_KERNEL);
2436 if (!key)
2437 return NULL;
2438 list_add_rcu(&key->list, &hdev->link_keys);
2439 }
2440
2441 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2442
2443 /* Some buggy controller combinations generate a changed
2444 * combination key for legacy pairing even when there's no
2445 * previous key */
2446 if (type == HCI_LK_CHANGED_COMBINATION &&
2447 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2448 type = HCI_LK_COMBINATION;
2449 if (conn)
2450 conn->key_type = type;
2451 }
2452
2453 bacpy(&key->bdaddr, bdaddr);
2454 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2455 key->pin_len = pin_len;
2456
2457 if (type == HCI_LK_CHANGED_COMBINATION)
2458 key->type = old_key_type;
2459 else
2460 key->type = type;
2461
2462 if (persistent)
2463 *persistent = hci_persistent_key(hdev, conn, type,
2464 old_key_type);
2465
2466 return key;
2467}
2468
2469struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2470 u8 addr_type, u8 type, u8 authenticated,
2471 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2472{
2473 struct smp_ltk *key, *old_key;
2474 u8 role = ltk_role(type);
2475
2476 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2477 if (old_key)
2478 key = old_key;
2479 else {
2480 key = kzalloc(sizeof(*key), GFP_KERNEL);
2481 if (!key)
2482 return NULL;
2483 list_add_rcu(&key->list, &hdev->long_term_keys);
2484 }
2485
2486 bacpy(&key->bdaddr, bdaddr);
2487 key->bdaddr_type = addr_type;
2488 memcpy(key->val, tk, sizeof(key->val));
2489 key->authenticated = authenticated;
2490 key->ediv = ediv;
2491 key->rand = rand;
2492 key->enc_size = enc_size;
2493 key->type = type;
2494
2495 return key;
2496}
2497
2498struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2499 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2500{
2501 struct smp_irk *irk;
2502
2503 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2504 if (!irk) {
2505 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2506 if (!irk)
2507 return NULL;
2508
2509 bacpy(&irk->bdaddr, bdaddr);
2510 irk->addr_type = addr_type;
2511
2512 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2513 }
2514
2515 memcpy(irk->val, val, 16);
2516 bacpy(&irk->rpa, rpa);
2517
2518 return irk;
2519}
2520
2521int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2522{
2523 struct link_key *key;
2524
2525 key = hci_find_link_key(hdev, bdaddr);
2526 if (!key)
2527 return -ENOENT;
2528
2529 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2530
2531 list_del_rcu(&key->list);
2532 kfree_rcu(key, rcu);
2533
2534 return 0;
2535}
2536
2537int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2538{
2539 struct smp_ltk *k;
2540 int removed = 0;
2541
2542 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2543 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2544 continue;
2545
2546 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2547
2548 list_del_rcu(&k->list);
2549 kfree_rcu(k, rcu);
2550 removed++;
2551 }
2552
2553 return removed ? 0 : -ENOENT;
2554}
2555
2556void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2557{
2558 struct smp_irk *k;
2559
2560 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2561 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2562 continue;
2563
2564 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2565
2566 list_del_rcu(&k->list);
2567 kfree_rcu(k, rcu);
2568 }
2569}
2570
2571bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2572{
2573 struct smp_ltk *k;
2574 struct smp_irk *irk;
2575 u8 addr_type;
2576
2577 if (type == BDADDR_BREDR) {
2578 if (hci_find_link_key(hdev, bdaddr))
2579 return true;
2580 return false;
2581 }
2582
2583 /* Convert to HCI addr type which struct smp_ltk uses */
2584 if (type == BDADDR_LE_PUBLIC)
2585 addr_type = ADDR_LE_DEV_PUBLIC;
2586 else
2587 addr_type = ADDR_LE_DEV_RANDOM;
2588
2589 irk = hci_get_irk(hdev, bdaddr, addr_type);
2590 if (irk) {
2591 bdaddr = &irk->bdaddr;
2592 addr_type = irk->addr_type;
2593 }
2594
2595 rcu_read_lock();
2596 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2597 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2598 rcu_read_unlock();
2599 return true;
2600 }
2601 }
2602 rcu_read_unlock();
2603
2604 return false;
2605}
2606
2607/* HCI command timer function */
2608static void hci_cmd_timeout(struct work_struct *work)
2609{
2610 struct hci_dev *hdev = container_of(work, struct hci_dev,
2611 cmd_timer.work);
2612
2613 if (hdev->sent_cmd) {
2614 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2615 u16 opcode = __le16_to_cpu(sent->opcode);
2616
2617 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2618 } else {
2619 bt_dev_err(hdev, "command tx timeout");
2620 }
2621
2622 if (hdev->cmd_timeout)
2623 hdev->cmd_timeout(hdev);
2624
2625 atomic_set(&hdev->cmd_cnt, 1);
2626 queue_work(hdev->workqueue, &hdev->cmd_work);
2627}
2628
2629struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2630 bdaddr_t *bdaddr, u8 bdaddr_type)
2631{
2632 struct oob_data *data;
2633
2634 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2635 if (bacmp(bdaddr, &data->bdaddr) != 0)
2636 continue;
2637 if (data->bdaddr_type != bdaddr_type)
2638 continue;
2639 return data;
2640 }
2641
2642 return NULL;
2643}
2644
2645int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2646 u8 bdaddr_type)
2647{
2648 struct oob_data *data;
2649
2650 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2651 if (!data)
2652 return -ENOENT;
2653
2654 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2655
2656 list_del(&data->list);
2657 kfree(data);
2658
2659 return 0;
2660}
2661
2662void hci_remote_oob_data_clear(struct hci_dev *hdev)
2663{
2664 struct oob_data *data, *n;
2665
2666 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2667 list_del(&data->list);
2668 kfree(data);
2669 }
2670}
2671
2672int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2673 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2674 u8 *hash256, u8 *rand256)
2675{
2676 struct oob_data *data;
2677
2678 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2679 if (!data) {
2680 data = kmalloc(sizeof(*data), GFP_KERNEL);
2681 if (!data)
2682 return -ENOMEM;
2683
2684 bacpy(&data->bdaddr, bdaddr);
2685 data->bdaddr_type = bdaddr_type;
2686 list_add(&data->list, &hdev->remote_oob_data);
2687 }
2688
2689 if (hash192 && rand192) {
2690 memcpy(data->hash192, hash192, sizeof(data->hash192));
2691 memcpy(data->rand192, rand192, sizeof(data->rand192));
2692 if (hash256 && rand256)
2693 data->present = 0x03;
2694 } else {
2695 memset(data->hash192, 0, sizeof(data->hash192));
2696 memset(data->rand192, 0, sizeof(data->rand192));
2697 if (hash256 && rand256)
2698 data->present = 0x02;
2699 else
2700 data->present = 0x00;
2701 }
2702
2703 if (hash256 && rand256) {
2704 memcpy(data->hash256, hash256, sizeof(data->hash256));
2705 memcpy(data->rand256, rand256, sizeof(data->rand256));
2706 } else {
2707 memset(data->hash256, 0, sizeof(data->hash256));
2708 memset(data->rand256, 0, sizeof(data->rand256));
2709 if (hash192 && rand192)
2710 data->present = 0x01;
2711 }
2712
2713 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2714
2715 return 0;
2716}
2717
2718/* This function requires the caller holds hdev->lock */
2719struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2720{
2721 struct adv_info *adv_instance;
2722
2723 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2724 if (adv_instance->instance == instance)
2725 return adv_instance;
2726 }
2727
2728 return NULL;
2729}
2730
2731/* This function requires the caller holds hdev->lock */
2732struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2733{
2734 struct adv_info *cur_instance;
2735
2736 cur_instance = hci_find_adv_instance(hdev, instance);
2737 if (!cur_instance)
2738 return NULL;
2739
2740 if (cur_instance == list_last_entry(&hdev->adv_instances,
2741 struct adv_info, list))
2742 return list_first_entry(&hdev->adv_instances,
2743 struct adv_info, list);
2744 else
2745 return list_next_entry(cur_instance, list);
2746}
2747
2748/* This function requires the caller holds hdev->lock */
2749int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2750{
2751 struct adv_info *adv_instance;
2752
2753 adv_instance = hci_find_adv_instance(hdev, instance);
2754 if (!adv_instance)
2755 return -ENOENT;
2756
2757 BT_DBG("%s removing %dMR", hdev->name, instance);
2758
2759 if (hdev->cur_adv_instance == instance) {
2760 if (hdev->adv_instance_timeout) {
2761 cancel_delayed_work(&hdev->adv_instance_expire);
2762 hdev->adv_instance_timeout = 0;
2763 }
2764 hdev->cur_adv_instance = 0x00;
2765 }
2766
2767 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2768
2769 list_del(&adv_instance->list);
2770 kfree(adv_instance);
2771
2772 hdev->adv_instance_cnt--;
2773
2774 return 0;
2775}
2776
2777void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2778{
2779 struct adv_info *adv_instance, *n;
2780
2781 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2782 adv_instance->rpa_expired = rpa_expired;
2783}
2784
2785/* This function requires the caller holds hdev->lock */
2786void hci_adv_instances_clear(struct hci_dev *hdev)
2787{
2788 struct adv_info *adv_instance, *n;
2789
2790 if (hdev->adv_instance_timeout) {
2791 cancel_delayed_work(&hdev->adv_instance_expire);
2792 hdev->adv_instance_timeout = 0;
2793 }
2794
2795 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2796 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2797 list_del(&adv_instance->list);
2798 kfree(adv_instance);
2799 }
2800
2801 hdev->adv_instance_cnt = 0;
2802 hdev->cur_adv_instance = 0x00;
2803}
2804
2805static void adv_instance_rpa_expired(struct work_struct *work)
2806{
2807 struct adv_info *adv_instance = container_of(work, struct adv_info,
2808 rpa_expired_cb.work);
2809
2810 BT_DBG("");
2811
2812 adv_instance->rpa_expired = true;
2813}
2814
2815/* This function requires the caller holds hdev->lock */
2816int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2817 u16 adv_data_len, u8 *adv_data,
2818 u16 scan_rsp_len, u8 *scan_rsp_data,
2819 u16 timeout, u16 duration)
2820{
2821 struct adv_info *adv_instance;
2822
2823 adv_instance = hci_find_adv_instance(hdev, instance);
2824 if (adv_instance) {
2825 memset(adv_instance->adv_data, 0,
2826 sizeof(adv_instance->adv_data));
2827 memset(adv_instance->scan_rsp_data, 0,
2828 sizeof(adv_instance->scan_rsp_data));
2829 } else {
2830 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2831 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2832 return -EOVERFLOW;
2833
2834 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2835 if (!adv_instance)
2836 return -ENOMEM;
2837
2838 adv_instance->pending = true;
2839 adv_instance->instance = instance;
2840 list_add(&adv_instance->list, &hdev->adv_instances);
2841 hdev->adv_instance_cnt++;
2842 }
2843
2844 adv_instance->flags = flags;
2845 adv_instance->adv_data_len = adv_data_len;
2846 adv_instance->scan_rsp_len = scan_rsp_len;
2847
2848 if (adv_data_len)
2849 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2850
2851 if (scan_rsp_len)
2852 memcpy(adv_instance->scan_rsp_data,
2853 scan_rsp_data, scan_rsp_len);
2854
2855 adv_instance->timeout = timeout;
2856 adv_instance->remaining_time = timeout;
2857
2858 if (duration == 0)
2859 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2860 else
2861 adv_instance->duration = duration;
2862
2863 adv_instance->tx_power = HCI_TX_POWER_INVALID;
2864
2865 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2866 adv_instance_rpa_expired);
2867
2868 BT_DBG("%s for %dMR", hdev->name, instance);
2869
2870 return 0;
2871}
2872
2873struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2874 bdaddr_t *bdaddr, u8 type)
2875{
2876 struct bdaddr_list *b;
2877
2878 list_for_each_entry(b, bdaddr_list, list) {
2879 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2880 return b;
2881 }
2882
2883 return NULL;
2884}
2885
2886struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2887 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2888 u8 type)
2889{
2890 struct bdaddr_list_with_irk *b;
2891
2892 list_for_each_entry(b, bdaddr_list, list) {
2893 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2894 return b;
2895 }
2896
2897 return NULL;
2898}
2899
2900void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2901{
2902 struct bdaddr_list *b, *n;
2903
2904 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2905 list_del(&b->list);
2906 kfree(b);
2907 }
2908}
2909
2910int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2911{
2912 struct bdaddr_list *entry;
2913
2914 if (!bacmp(bdaddr, BDADDR_ANY))
2915 return -EBADF;
2916
2917 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2918 return -EEXIST;
2919
2920 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2921 if (!entry)
2922 return -ENOMEM;
2923
2924 bacpy(&entry->bdaddr, bdaddr);
2925 entry->bdaddr_type = type;
2926
2927 list_add(&entry->list, list);
2928
2929 return 0;
2930}
2931
2932int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2933 u8 type, u8 *peer_irk, u8 *local_irk)
2934{
2935 struct bdaddr_list_with_irk *entry;
2936
2937 if (!bacmp(bdaddr, BDADDR_ANY))
2938 return -EBADF;
2939
2940 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2941 return -EEXIST;
2942
2943 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2944 if (!entry)
2945 return -ENOMEM;
2946
2947 bacpy(&entry->bdaddr, bdaddr);
2948 entry->bdaddr_type = type;
2949
2950 if (peer_irk)
2951 memcpy(entry->peer_irk, peer_irk, 16);
2952
2953 if (local_irk)
2954 memcpy(entry->local_irk, local_irk, 16);
2955
2956 list_add(&entry->list, list);
2957
2958 return 0;
2959}
2960
2961int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2962{
2963 struct bdaddr_list *entry;
2964
2965 if (!bacmp(bdaddr, BDADDR_ANY)) {
2966 hci_bdaddr_list_clear(list);
2967 return 0;
2968 }
2969
2970 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2971 if (!entry)
2972 return -ENOENT;
2973
2974 list_del(&entry->list);
2975 kfree(entry);
2976
2977 return 0;
2978}
2979
2980int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2981 u8 type)
2982{
2983 struct bdaddr_list_with_irk *entry;
2984
2985 if (!bacmp(bdaddr, BDADDR_ANY)) {
2986 hci_bdaddr_list_clear(list);
2987 return 0;
2988 }
2989
2990 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2991 if (!entry)
2992 return -ENOENT;
2993
2994 list_del(&entry->list);
2995 kfree(entry);
2996
2997 return 0;
2998}
2999
3000/* This function requires the caller holds hdev->lock */
3001struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3002 bdaddr_t *addr, u8 addr_type)
3003{
3004 struct hci_conn_params *params;
3005
3006 list_for_each_entry(params, &hdev->le_conn_params, list) {
3007 if (bacmp(¶ms->addr, addr) == 0 &&
3008 params->addr_type == addr_type) {
3009 return params;
3010 }
3011 }
3012
3013 return NULL;
3014}
3015
3016/* This function requires the caller holds hdev->lock */
3017struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3018 bdaddr_t *addr, u8 addr_type)
3019{
3020 struct hci_conn_params *param;
3021
3022 list_for_each_entry(param, list, action) {
3023 if (bacmp(¶m->addr, addr) == 0 &&
3024 param->addr_type == addr_type)
3025 return param;
3026 }
3027
3028 return NULL;
3029}
3030
3031/* This function requires the caller holds hdev->lock */
3032struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3033 bdaddr_t *addr, u8 addr_type)
3034{
3035 struct hci_conn_params *params;
3036
3037 params = hci_conn_params_lookup(hdev, addr, addr_type);
3038 if (params)
3039 return params;
3040
3041 params = kzalloc(sizeof(*params), GFP_KERNEL);
3042 if (!params) {
3043 bt_dev_err(hdev, "out of memory");
3044 return NULL;
3045 }
3046
3047 bacpy(¶ms->addr, addr);
3048 params->addr_type = addr_type;
3049
3050 list_add(¶ms->list, &hdev->le_conn_params);
3051 INIT_LIST_HEAD(¶ms->action);
3052
3053 params->conn_min_interval = hdev->le_conn_min_interval;
3054 params->conn_max_interval = hdev->le_conn_max_interval;
3055 params->conn_latency = hdev->le_conn_latency;
3056 params->supervision_timeout = hdev->le_supv_timeout;
3057 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3058
3059 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3060
3061 return params;
3062}
3063
3064static void hci_conn_params_free(struct hci_conn_params *params)
3065{
3066 if (params->conn) {
3067 hci_conn_drop(params->conn);
3068 hci_conn_put(params->conn);
3069 }
3070
3071 list_del(¶ms->action);
3072 list_del(¶ms->list);
3073 kfree(params);
3074}
3075
3076/* This function requires the caller holds hdev->lock */
3077void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3078{
3079 struct hci_conn_params *params;
3080
3081 params = hci_conn_params_lookup(hdev, addr, addr_type);
3082 if (!params)
3083 return;
3084
3085 hci_conn_params_free(params);
3086
3087 hci_update_background_scan(hdev);
3088
3089 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3090}
3091
3092/* This function requires the caller holds hdev->lock */
3093void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3094{
3095 struct hci_conn_params *params, *tmp;
3096
3097 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3098 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3099 continue;
3100
3101 /* If trying to estabilish one time connection to disabled
3102 * device, leave the params, but mark them as just once.
3103 */
3104 if (params->explicit_connect) {
3105 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3106 continue;
3107 }
3108
3109 list_del(¶ms->list);
3110 kfree(params);
3111 }
3112
3113 BT_DBG("All LE disabled connection parameters were removed");
3114}
3115
3116/* This function requires the caller holds hdev->lock */
3117static void hci_conn_params_clear_all(struct hci_dev *hdev)
3118{
3119 struct hci_conn_params *params, *tmp;
3120
3121 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3122 hci_conn_params_free(params);
3123
3124 BT_DBG("All LE connection parameters were removed");
3125}
3126
3127/* Copy the Identity Address of the controller.
3128 *
3129 * If the controller has a public BD_ADDR, then by default use that one.
3130 * If this is a LE only controller without a public address, default to
3131 * the static random address.
3132 *
3133 * For debugging purposes it is possible to force controllers with a
3134 * public address to use the static random address instead.
3135 *
3136 * In case BR/EDR has been disabled on a dual-mode controller and
3137 * userspace has configured a static address, then that address
3138 * becomes the identity address instead of the public BR/EDR address.
3139 */
3140void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3141 u8 *bdaddr_type)
3142{
3143 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3144 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3145 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3146 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3147 bacpy(bdaddr, &hdev->static_addr);
3148 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3149 } else {
3150 bacpy(bdaddr, &hdev->bdaddr);
3151 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3152 }
3153}
3154
3155/* Alloc HCI device */
3156struct hci_dev *hci_alloc_dev(void)
3157{
3158 struct hci_dev *hdev;
3159
3160 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3161 if (!hdev)
3162 return NULL;
3163
3164 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3165 hdev->esco_type = (ESCO_HV1);
3166 hdev->link_mode = (HCI_LM_ACCEPT);
3167 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3168 hdev->io_capability = 0x03; /* No Input No Output */
3169 hdev->manufacturer = 0xffff; /* Default to internal use */
3170 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3171 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3172 hdev->adv_instance_cnt = 0;
3173 hdev->cur_adv_instance = 0x00;
3174 hdev->adv_instance_timeout = 0;
3175
3176 hdev->sniff_max_interval = 800;
3177 hdev->sniff_min_interval = 80;
3178
3179 hdev->le_adv_channel_map = 0x07;
3180 hdev->le_adv_min_interval = 0x0800;
3181 hdev->le_adv_max_interval = 0x0800;
3182 hdev->le_scan_interval = 0x0060;
3183 hdev->le_scan_window = 0x0030;
3184 hdev->le_conn_min_interval = 0x0018;
3185 hdev->le_conn_max_interval = 0x0028;
3186 hdev->le_conn_latency = 0x0000;
3187 hdev->le_supv_timeout = 0x002a;
3188 hdev->le_def_tx_len = 0x001b;
3189 hdev->le_def_tx_time = 0x0148;
3190 hdev->le_max_tx_len = 0x001b;
3191 hdev->le_max_tx_time = 0x0148;
3192 hdev->le_max_rx_len = 0x001b;
3193 hdev->le_max_rx_time = 0x0148;
3194 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3195 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3196 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3197 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3198 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3199
3200 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3201 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3202 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3203 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3204 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3205 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3206
3207 mutex_init(&hdev->lock);
3208 mutex_init(&hdev->req_lock);
3209
3210 INIT_LIST_HEAD(&hdev->mgmt_pending);
3211 INIT_LIST_HEAD(&hdev->blacklist);
3212 INIT_LIST_HEAD(&hdev->whitelist);
3213 INIT_LIST_HEAD(&hdev->uuids);
3214 INIT_LIST_HEAD(&hdev->link_keys);
3215 INIT_LIST_HEAD(&hdev->long_term_keys);
3216 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3217 INIT_LIST_HEAD(&hdev->remote_oob_data);
3218 INIT_LIST_HEAD(&hdev->le_white_list);
3219 INIT_LIST_HEAD(&hdev->le_resolv_list);
3220 INIT_LIST_HEAD(&hdev->le_conn_params);
3221 INIT_LIST_HEAD(&hdev->pend_le_conns);
3222 INIT_LIST_HEAD(&hdev->pend_le_reports);
3223 INIT_LIST_HEAD(&hdev->conn_hash.list);
3224 INIT_LIST_HEAD(&hdev->adv_instances);
3225
3226 INIT_WORK(&hdev->rx_work, hci_rx_work);
3227 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3228 INIT_WORK(&hdev->tx_work, hci_tx_work);
3229 INIT_WORK(&hdev->power_on, hci_power_on);
3230 INIT_WORK(&hdev->error_reset, hci_error_reset);
3231
3232 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3233
3234 skb_queue_head_init(&hdev->rx_q);
3235 skb_queue_head_init(&hdev->cmd_q);
3236 skb_queue_head_init(&hdev->raw_q);
3237
3238 init_waitqueue_head(&hdev->req_wait_q);
3239
3240 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3241
3242 hci_request_setup(hdev);
3243
3244 hci_init_sysfs(hdev);
3245 discovery_init(hdev);
3246
3247 return hdev;
3248}
3249EXPORT_SYMBOL(hci_alloc_dev);
3250
3251/* Free HCI device */
3252void hci_free_dev(struct hci_dev *hdev)
3253{
3254 /* will free via device release */
3255 put_device(&hdev->dev);
3256}
3257EXPORT_SYMBOL(hci_free_dev);
3258
3259/* Register HCI device */
3260int hci_register_dev(struct hci_dev *hdev)
3261{
3262 int id, error;
3263
3264 if (!hdev->open || !hdev->close || !hdev->send)
3265 return -EINVAL;
3266
3267 /* Do not allow HCI_AMP devices to register at index 0,
3268 * so the index can be used as the AMP controller ID.
3269 */
3270 switch (hdev->dev_type) {
3271 case HCI_PRIMARY:
3272 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3273 break;
3274 case HCI_AMP:
3275 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3276 break;
3277 default:
3278 return -EINVAL;
3279 }
3280
3281 if (id < 0)
3282 return id;
3283
3284 sprintf(hdev->name, "hci%d", id);
3285 hdev->id = id;
3286
3287 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3288
3289 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3290 if (!hdev->workqueue) {
3291 error = -ENOMEM;
3292 goto err;
3293 }
3294
3295 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3296 hdev->name);
3297 if (!hdev->req_workqueue) {
3298 destroy_workqueue(hdev->workqueue);
3299 error = -ENOMEM;
3300 goto err;
3301 }
3302
3303 if (!IS_ERR_OR_NULL(bt_debugfs))
3304 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3305
3306 dev_set_name(&hdev->dev, "%s", hdev->name);
3307
3308 error = device_add(&hdev->dev);
3309 if (error < 0)
3310 goto err_wqueue;
3311
3312 hci_leds_init(hdev);
3313
3314 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3315 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3316 hdev);
3317 if (hdev->rfkill) {
3318 if (rfkill_register(hdev->rfkill) < 0) {
3319 rfkill_destroy(hdev->rfkill);
3320 hdev->rfkill = NULL;
3321 }
3322 }
3323
3324 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3325 hci_dev_set_flag(hdev, HCI_RFKILLED);
3326
3327 hci_dev_set_flag(hdev, HCI_SETUP);
3328 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3329
3330 if (hdev->dev_type == HCI_PRIMARY) {
3331 /* Assume BR/EDR support until proven otherwise (such as
3332 * through reading supported features during init.
3333 */
3334 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3335 }
3336
3337 write_lock(&hci_dev_list_lock);
3338 list_add(&hdev->list, &hci_dev_list);
3339 write_unlock(&hci_dev_list_lock);
3340
3341 /* Devices that are marked for raw-only usage are unconfigured
3342 * and should not be included in normal operation.
3343 */
3344 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3345 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3346
3347 hci_sock_dev_event(hdev, HCI_DEV_REG);
3348 hci_dev_hold(hdev);
3349
3350 queue_work(hdev->req_workqueue, &hdev->power_on);
3351
3352 return id;
3353
3354err_wqueue:
3355 destroy_workqueue(hdev->workqueue);
3356 destroy_workqueue(hdev->req_workqueue);
3357err:
3358 ida_simple_remove(&hci_index_ida, hdev->id);
3359
3360 return error;
3361}
3362EXPORT_SYMBOL(hci_register_dev);
3363
3364/* Unregister HCI device */
3365void hci_unregister_dev(struct hci_dev *hdev)
3366{
3367 int id;
3368
3369 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3370
3371 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3372
3373 id = hdev->id;
3374
3375 write_lock(&hci_dev_list_lock);
3376 list_del(&hdev->list);
3377 write_unlock(&hci_dev_list_lock);
3378
3379 cancel_work_sync(&hdev->power_on);
3380
3381 hci_dev_do_close(hdev);
3382
3383 if (!test_bit(HCI_INIT, &hdev->flags) &&
3384 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3385 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3386 hci_dev_lock(hdev);
3387 mgmt_index_removed(hdev);
3388 hci_dev_unlock(hdev);
3389 }
3390
3391 /* mgmt_index_removed should take care of emptying the
3392 * pending list */
3393 BUG_ON(!list_empty(&hdev->mgmt_pending));
3394
3395 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3396
3397 if (hdev->rfkill) {
3398 rfkill_unregister(hdev->rfkill);
3399 rfkill_destroy(hdev->rfkill);
3400 }
3401
3402 device_del(&hdev->dev);
3403
3404 debugfs_remove_recursive(hdev->debugfs);
3405 kfree_const(hdev->hw_info);
3406 kfree_const(hdev->fw_info);
3407
3408 destroy_workqueue(hdev->workqueue);
3409 destroy_workqueue(hdev->req_workqueue);
3410
3411 hci_dev_lock(hdev);
3412 hci_bdaddr_list_clear(&hdev->blacklist);
3413 hci_bdaddr_list_clear(&hdev->whitelist);
3414 hci_uuids_clear(hdev);
3415 hci_link_keys_clear(hdev);
3416 hci_smp_ltks_clear(hdev);
3417 hci_smp_irks_clear(hdev);
3418 hci_remote_oob_data_clear(hdev);
3419 hci_adv_instances_clear(hdev);
3420 hci_bdaddr_list_clear(&hdev->le_white_list);
3421 hci_bdaddr_list_clear(&hdev->le_resolv_list);
3422 hci_conn_params_clear_all(hdev);
3423 hci_discovery_filter_clear(hdev);
3424 hci_dev_unlock(hdev);
3425
3426 hci_dev_put(hdev);
3427
3428 ida_simple_remove(&hci_index_ida, id);
3429}
3430EXPORT_SYMBOL(hci_unregister_dev);
3431
3432/* Suspend HCI device */
3433int hci_suspend_dev(struct hci_dev *hdev)
3434{
3435 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3436 return 0;
3437}
3438EXPORT_SYMBOL(hci_suspend_dev);
3439
3440/* Resume HCI device */
3441int hci_resume_dev(struct hci_dev *hdev)
3442{
3443 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3444 return 0;
3445}
3446EXPORT_SYMBOL(hci_resume_dev);
3447
3448/* Reset HCI device */
3449int hci_reset_dev(struct hci_dev *hdev)
3450{
3451 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3452 struct sk_buff *skb;
3453
3454 skb = bt_skb_alloc(3, GFP_ATOMIC);
3455 if (!skb)
3456 return -ENOMEM;
3457
3458 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3459 skb_put_data(skb, hw_err, 3);
3460
3461 /* Send Hardware Error to upper stack */
3462 return hci_recv_frame(hdev, skb);
3463}
3464EXPORT_SYMBOL(hci_reset_dev);
3465
3466/* Receive frame from HCI drivers */
3467int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3468{
3469 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3470 && !test_bit(HCI_INIT, &hdev->flags))) {
3471 kfree_skb(skb);
3472 return -ENXIO;
3473 }
3474
3475 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3476 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3477 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3478 kfree_skb(skb);
3479 return -EINVAL;
3480 }
3481
3482 /* Incoming skb */
3483 bt_cb(skb)->incoming = 1;
3484
3485 /* Time stamp */
3486 __net_timestamp(skb);
3487
3488 skb_queue_tail(&hdev->rx_q, skb);
3489 queue_work(hdev->workqueue, &hdev->rx_work);
3490
3491 return 0;
3492}
3493EXPORT_SYMBOL(hci_recv_frame);
3494
3495/* Receive diagnostic message from HCI drivers */
3496int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3497{
3498 /* Mark as diagnostic packet */
3499 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3500
3501 /* Time stamp */
3502 __net_timestamp(skb);
3503
3504 skb_queue_tail(&hdev->rx_q, skb);
3505 queue_work(hdev->workqueue, &hdev->rx_work);
3506
3507 return 0;
3508}
3509EXPORT_SYMBOL(hci_recv_diag);
3510
3511void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3512{
3513 va_list vargs;
3514
3515 va_start(vargs, fmt);
3516 kfree_const(hdev->hw_info);
3517 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3518 va_end(vargs);
3519}
3520EXPORT_SYMBOL(hci_set_hw_info);
3521
3522void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3523{
3524 va_list vargs;
3525
3526 va_start(vargs, fmt);
3527 kfree_const(hdev->fw_info);
3528 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3529 va_end(vargs);
3530}
3531EXPORT_SYMBOL(hci_set_fw_info);
3532
3533/* ---- Interface to upper protocols ---- */
3534
3535int hci_register_cb(struct hci_cb *cb)
3536{
3537 BT_DBG("%p name %s", cb, cb->name);
3538
3539 mutex_lock(&hci_cb_list_lock);
3540 list_add_tail(&cb->list, &hci_cb_list);
3541 mutex_unlock(&hci_cb_list_lock);
3542
3543 return 0;
3544}
3545EXPORT_SYMBOL(hci_register_cb);
3546
3547int hci_unregister_cb(struct hci_cb *cb)
3548{
3549 BT_DBG("%p name %s", cb, cb->name);
3550
3551 mutex_lock(&hci_cb_list_lock);
3552 list_del(&cb->list);
3553 mutex_unlock(&hci_cb_list_lock);
3554
3555 return 0;
3556}
3557EXPORT_SYMBOL(hci_unregister_cb);
3558
3559static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3560{
3561 int err;
3562
3563 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3564 skb->len);
3565
3566 /* Time stamp */
3567 __net_timestamp(skb);
3568
3569 /* Send copy to monitor */
3570 hci_send_to_monitor(hdev, skb);
3571
3572 if (atomic_read(&hdev->promisc)) {
3573 /* Send copy to the sockets */
3574 hci_send_to_sock(hdev, skb);
3575 }
3576
3577 /* Get rid of skb owner, prior to sending to the driver. */
3578 skb_orphan(skb);
3579
3580 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3581 kfree_skb(skb);
3582 return;
3583 }
3584
3585 err = hdev->send(hdev, skb);
3586 if (err < 0) {
3587 bt_dev_err(hdev, "sending frame failed (%d)", err);
3588 kfree_skb(skb);
3589 }
3590}
3591
3592/* Send HCI command */
3593int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3594 const void *param)
3595{
3596 struct sk_buff *skb;
3597
3598 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3599
3600 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3601 if (!skb) {
3602 bt_dev_err(hdev, "no memory for command");
3603 return -ENOMEM;
3604 }
3605
3606 /* Stand-alone HCI commands must be flagged as
3607 * single-command requests.
3608 */
3609 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3610
3611 skb_queue_tail(&hdev->cmd_q, skb);
3612 queue_work(hdev->workqueue, &hdev->cmd_work);
3613
3614 return 0;
3615}
3616
3617int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3618 const void *param)
3619{
3620 struct sk_buff *skb;
3621
3622 if (hci_opcode_ogf(opcode) != 0x3f) {
3623 /* A controller receiving a command shall respond with either
3624 * a Command Status Event or a Command Complete Event.
3625 * Therefore, all standard HCI commands must be sent via the
3626 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3627 * Some vendors do not comply with this rule for vendor-specific
3628 * commands and do not return any event. We want to support
3629 * unresponded commands for such cases only.
3630 */
3631 bt_dev_err(hdev, "unresponded command not supported");
3632 return -EINVAL;
3633 }
3634
3635 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3636 if (!skb) {
3637 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3638 opcode);
3639 return -ENOMEM;
3640 }
3641
3642 hci_send_frame(hdev, skb);
3643
3644 return 0;
3645}
3646EXPORT_SYMBOL(__hci_cmd_send);
3647
3648/* Get data from the previously sent command */
3649void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3650{
3651 struct hci_command_hdr *hdr;
3652
3653 if (!hdev->sent_cmd)
3654 return NULL;
3655
3656 hdr = (void *) hdev->sent_cmd->data;
3657
3658 if (hdr->opcode != cpu_to_le16(opcode))
3659 return NULL;
3660
3661 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3662
3663 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3664}
3665
3666/* Send HCI command and wait for command commplete event */
3667struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3668 const void *param, u32 timeout)
3669{
3670 struct sk_buff *skb;
3671
3672 if (!test_bit(HCI_UP, &hdev->flags))
3673 return ERR_PTR(-ENETDOWN);
3674
3675 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3676
3677 hci_req_sync_lock(hdev);
3678 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3679 hci_req_sync_unlock(hdev);
3680
3681 return skb;
3682}
3683EXPORT_SYMBOL(hci_cmd_sync);
3684
3685/* Send ACL data */
3686static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3687{
3688 struct hci_acl_hdr *hdr;
3689 int len = skb->len;
3690
3691 skb_push(skb, HCI_ACL_HDR_SIZE);
3692 skb_reset_transport_header(skb);
3693 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3694 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3695 hdr->dlen = cpu_to_le16(len);
3696}
3697
3698static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3699 struct sk_buff *skb, __u16 flags)
3700{
3701 struct hci_conn *conn = chan->conn;
3702 struct hci_dev *hdev = conn->hdev;
3703 struct sk_buff *list;
3704
3705 skb->len = skb_headlen(skb);
3706 skb->data_len = 0;
3707
3708 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3709
3710 switch (hdev->dev_type) {
3711 case HCI_PRIMARY:
3712 hci_add_acl_hdr(skb, conn->handle, flags);
3713 break;
3714 case HCI_AMP:
3715 hci_add_acl_hdr(skb, chan->handle, flags);
3716 break;
3717 default:
3718 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3719 return;
3720 }
3721
3722 list = skb_shinfo(skb)->frag_list;
3723 if (!list) {
3724 /* Non fragmented */
3725 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3726
3727 skb_queue_tail(queue, skb);
3728 } else {
3729 /* Fragmented */
3730 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3731
3732 skb_shinfo(skb)->frag_list = NULL;
3733
3734 /* Queue all fragments atomically. We need to use spin_lock_bh
3735 * here because of 6LoWPAN links, as there this function is
3736 * called from softirq and using normal spin lock could cause
3737 * deadlocks.
3738 */
3739 spin_lock_bh(&queue->lock);
3740
3741 __skb_queue_tail(queue, skb);
3742
3743 flags &= ~ACL_START;
3744 flags |= ACL_CONT;
3745 do {
3746 skb = list; list = list->next;
3747
3748 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3749 hci_add_acl_hdr(skb, conn->handle, flags);
3750
3751 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3752
3753 __skb_queue_tail(queue, skb);
3754 } while (list);
3755
3756 spin_unlock_bh(&queue->lock);
3757 }
3758}
3759
3760void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3761{
3762 struct hci_dev *hdev = chan->conn->hdev;
3763
3764 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3765
3766 hci_queue_acl(chan, &chan->data_q, skb, flags);
3767
3768 queue_work(hdev->workqueue, &hdev->tx_work);
3769}
3770
3771/* Send SCO data */
3772void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3773{
3774 struct hci_dev *hdev = conn->hdev;
3775 struct hci_sco_hdr hdr;
3776
3777 BT_DBG("%s len %d", hdev->name, skb->len);
3778
3779 hdr.handle = cpu_to_le16(conn->handle);
3780 hdr.dlen = skb->len;
3781
3782 skb_push(skb, HCI_SCO_HDR_SIZE);
3783 skb_reset_transport_header(skb);
3784 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3785
3786 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3787
3788 skb_queue_tail(&conn->data_q, skb);
3789 queue_work(hdev->workqueue, &hdev->tx_work);
3790}
3791
3792/* ---- HCI TX task (outgoing data) ---- */
3793
3794/* HCI Connection scheduler */
3795static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3796 int *quote)
3797{
3798 struct hci_conn_hash *h = &hdev->conn_hash;
3799 struct hci_conn *conn = NULL, *c;
3800 unsigned int num = 0, min = ~0;
3801
3802 /* We don't have to lock device here. Connections are always
3803 * added and removed with TX task disabled. */
3804
3805 rcu_read_lock();
3806
3807 list_for_each_entry_rcu(c, &h->list, list) {
3808 if (c->type != type || skb_queue_empty(&c->data_q))
3809 continue;
3810
3811 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3812 continue;
3813
3814 num++;
3815
3816 if (c->sent < min) {
3817 min = c->sent;
3818 conn = c;
3819 }
3820
3821 if (hci_conn_num(hdev, type) == num)
3822 break;
3823 }
3824
3825 rcu_read_unlock();
3826
3827 if (conn) {
3828 int cnt, q;
3829
3830 switch (conn->type) {
3831 case ACL_LINK:
3832 cnt = hdev->acl_cnt;
3833 break;
3834 case SCO_LINK:
3835 case ESCO_LINK:
3836 cnt = hdev->sco_cnt;
3837 break;
3838 case LE_LINK:
3839 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3840 break;
3841 default:
3842 cnt = 0;
3843 bt_dev_err(hdev, "unknown link type %d", conn->type);
3844 }
3845
3846 q = cnt / num;
3847 *quote = q ? q : 1;
3848 } else
3849 *quote = 0;
3850
3851 BT_DBG("conn %p quote %d", conn, *quote);
3852 return conn;
3853}
3854
3855static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3856{
3857 struct hci_conn_hash *h = &hdev->conn_hash;
3858 struct hci_conn *c;
3859
3860 bt_dev_err(hdev, "link tx timeout");
3861
3862 rcu_read_lock();
3863
3864 /* Kill stalled connections */
3865 list_for_each_entry_rcu(c, &h->list, list) {
3866 if (c->type == type && c->sent) {
3867 bt_dev_err(hdev, "killing stalled connection %pMR",
3868 &c->dst);
3869 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3870 }
3871 }
3872
3873 rcu_read_unlock();
3874}
3875
3876static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3877 int *quote)
3878{
3879 struct hci_conn_hash *h = &hdev->conn_hash;
3880 struct hci_chan *chan = NULL;
3881 unsigned int num = 0, min = ~0, cur_prio = 0;
3882 struct hci_conn *conn;
3883 int cnt, q, conn_num = 0;
3884
3885 BT_DBG("%s", hdev->name);
3886
3887 rcu_read_lock();
3888
3889 list_for_each_entry_rcu(conn, &h->list, list) {
3890 struct hci_chan *tmp;
3891
3892 if (conn->type != type)
3893 continue;
3894
3895 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3896 continue;
3897
3898 conn_num++;
3899
3900 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3901 struct sk_buff *skb;
3902
3903 if (skb_queue_empty(&tmp->data_q))
3904 continue;
3905
3906 skb = skb_peek(&tmp->data_q);
3907 if (skb->priority < cur_prio)
3908 continue;
3909
3910 if (skb->priority > cur_prio) {
3911 num = 0;
3912 min = ~0;
3913 cur_prio = skb->priority;
3914 }
3915
3916 num++;
3917
3918 if (conn->sent < min) {
3919 min = conn->sent;
3920 chan = tmp;
3921 }
3922 }
3923
3924 if (hci_conn_num(hdev, type) == conn_num)
3925 break;
3926 }
3927
3928 rcu_read_unlock();
3929
3930 if (!chan)
3931 return NULL;
3932
3933 switch (chan->conn->type) {
3934 case ACL_LINK:
3935 cnt = hdev->acl_cnt;
3936 break;
3937 case AMP_LINK:
3938 cnt = hdev->block_cnt;
3939 break;
3940 case SCO_LINK:
3941 case ESCO_LINK:
3942 cnt = hdev->sco_cnt;
3943 break;
3944 case LE_LINK:
3945 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3946 break;
3947 default:
3948 cnt = 0;
3949 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3950 }
3951
3952 q = cnt / num;
3953 *quote = q ? q : 1;
3954 BT_DBG("chan %p quote %d", chan, *quote);
3955 return chan;
3956}
3957
3958static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3959{
3960 struct hci_conn_hash *h = &hdev->conn_hash;
3961 struct hci_conn *conn;
3962 int num = 0;
3963
3964 BT_DBG("%s", hdev->name);
3965
3966 rcu_read_lock();
3967
3968 list_for_each_entry_rcu(conn, &h->list, list) {
3969 struct hci_chan *chan;
3970
3971 if (conn->type != type)
3972 continue;
3973
3974 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3975 continue;
3976
3977 num++;
3978
3979 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3980 struct sk_buff *skb;
3981
3982 if (chan->sent) {
3983 chan->sent = 0;
3984 continue;
3985 }
3986
3987 if (skb_queue_empty(&chan->data_q))
3988 continue;
3989
3990 skb = skb_peek(&chan->data_q);
3991 if (skb->priority >= HCI_PRIO_MAX - 1)
3992 continue;
3993
3994 skb->priority = HCI_PRIO_MAX - 1;
3995
3996 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3997 skb->priority);
3998 }
3999
4000 if (hci_conn_num(hdev, type) == num)
4001 break;
4002 }
4003
4004 rcu_read_unlock();
4005
4006}
4007
4008static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4009{
4010 /* Calculate count of blocks used by this packet */
4011 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4012}
4013
4014static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4015{
4016 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4017 /* ACL tx timeout must be longer than maximum
4018 * link supervision timeout (40.9 seconds) */
4019 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4020 HCI_ACL_TX_TIMEOUT))
4021 hci_link_tx_to(hdev, ACL_LINK);
4022 }
4023}
4024
4025static void hci_sched_acl_pkt(struct hci_dev *hdev)
4026{
4027 unsigned int cnt = hdev->acl_cnt;
4028 struct hci_chan *chan;
4029 struct sk_buff *skb;
4030 int quote;
4031
4032 __check_timeout(hdev, cnt);
4033
4034 while (hdev->acl_cnt &&
4035 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4036 u32 priority = (skb_peek(&chan->data_q))->priority;
4037 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4038 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4039 skb->len, skb->priority);
4040
4041 /* Stop if priority has changed */
4042 if (skb->priority < priority)
4043 break;
4044
4045 skb = skb_dequeue(&chan->data_q);
4046
4047 hci_conn_enter_active_mode(chan->conn,
4048 bt_cb(skb)->force_active);
4049
4050 hci_send_frame(hdev, skb);
4051 hdev->acl_last_tx = jiffies;
4052
4053 hdev->acl_cnt--;
4054 chan->sent++;
4055 chan->conn->sent++;
4056 }
4057 }
4058
4059 if (cnt != hdev->acl_cnt)
4060 hci_prio_recalculate(hdev, ACL_LINK);
4061}
4062
4063static void hci_sched_acl_blk(struct hci_dev *hdev)
4064{
4065 unsigned int cnt = hdev->block_cnt;
4066 struct hci_chan *chan;
4067 struct sk_buff *skb;
4068 int quote;
4069 u8 type;
4070
4071 __check_timeout(hdev, cnt);
4072
4073 BT_DBG("%s", hdev->name);
4074
4075 if (hdev->dev_type == HCI_AMP)
4076 type = AMP_LINK;
4077 else
4078 type = ACL_LINK;
4079
4080 while (hdev->block_cnt > 0 &&
4081 (chan = hci_chan_sent(hdev, type, "e))) {
4082 u32 priority = (skb_peek(&chan->data_q))->priority;
4083 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4084 int blocks;
4085
4086 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4087 skb->len, skb->priority);
4088
4089 /* Stop if priority has changed */
4090 if (skb->priority < priority)
4091 break;
4092
4093 skb = skb_dequeue(&chan->data_q);
4094
4095 blocks = __get_blocks(hdev, skb);
4096 if (blocks > hdev->block_cnt)
4097 return;
4098
4099 hci_conn_enter_active_mode(chan->conn,
4100 bt_cb(skb)->force_active);
4101
4102 hci_send_frame(hdev, skb);
4103 hdev->acl_last_tx = jiffies;
4104
4105 hdev->block_cnt -= blocks;
4106 quote -= blocks;
4107
4108 chan->sent += blocks;
4109 chan->conn->sent += blocks;
4110 }
4111 }
4112
4113 if (cnt != hdev->block_cnt)
4114 hci_prio_recalculate(hdev, type);
4115}
4116
4117static void hci_sched_acl(struct hci_dev *hdev)
4118{
4119 BT_DBG("%s", hdev->name);
4120
4121 /* No ACL link over BR/EDR controller */
4122 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4123 return;
4124
4125 /* No AMP link over AMP controller */
4126 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4127 return;
4128
4129 switch (hdev->flow_ctl_mode) {
4130 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4131 hci_sched_acl_pkt(hdev);
4132 break;
4133
4134 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4135 hci_sched_acl_blk(hdev);
4136 break;
4137 }
4138}
4139
4140/* Schedule SCO */
4141static void hci_sched_sco(struct hci_dev *hdev)
4142{
4143 struct hci_conn *conn;
4144 struct sk_buff *skb;
4145 int quote;
4146
4147 BT_DBG("%s", hdev->name);
4148
4149 if (!hci_conn_num(hdev, SCO_LINK))
4150 return;
4151
4152 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4153 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4154 BT_DBG("skb %p len %d", skb, skb->len);
4155 hci_send_frame(hdev, skb);
4156
4157 conn->sent++;
4158 if (conn->sent == ~0)
4159 conn->sent = 0;
4160 }
4161 }
4162}
4163
4164static void hci_sched_esco(struct hci_dev *hdev)
4165{
4166 struct hci_conn *conn;
4167 struct sk_buff *skb;
4168 int quote;
4169
4170 BT_DBG("%s", hdev->name);
4171
4172 if (!hci_conn_num(hdev, ESCO_LINK))
4173 return;
4174
4175 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4176 "e))) {
4177 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4178 BT_DBG("skb %p len %d", skb, skb->len);
4179 hci_send_frame(hdev, skb);
4180
4181 conn->sent++;
4182 if (conn->sent == ~0)
4183 conn->sent = 0;
4184 }
4185 }
4186}
4187
4188static void hci_sched_le(struct hci_dev *hdev)
4189{
4190 struct hci_chan *chan;
4191 struct sk_buff *skb;
4192 int quote, cnt, tmp;
4193
4194 BT_DBG("%s", hdev->name);
4195
4196 if (!hci_conn_num(hdev, LE_LINK))
4197 return;
4198
4199 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4200 /* LE tx timeout must be longer than maximum
4201 * link supervision timeout (40.9 seconds) */
4202 if (!hdev->le_cnt && hdev->le_pkts &&
4203 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4204 hci_link_tx_to(hdev, LE_LINK);
4205 }
4206
4207 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4208 tmp = cnt;
4209 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4210 u32 priority = (skb_peek(&chan->data_q))->priority;
4211 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4212 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4213 skb->len, skb->priority);
4214
4215 /* Stop if priority has changed */
4216 if (skb->priority < priority)
4217 break;
4218
4219 skb = skb_dequeue(&chan->data_q);
4220
4221 hci_send_frame(hdev, skb);
4222 hdev->le_last_tx = jiffies;
4223
4224 cnt--;
4225 chan->sent++;
4226 chan->conn->sent++;
4227 }
4228 }
4229
4230 if (hdev->le_pkts)
4231 hdev->le_cnt = cnt;
4232 else
4233 hdev->acl_cnt = cnt;
4234
4235 if (cnt != tmp)
4236 hci_prio_recalculate(hdev, LE_LINK);
4237}
4238
4239static void hci_tx_work(struct work_struct *work)
4240{
4241 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4242 struct sk_buff *skb;
4243
4244 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4245 hdev->sco_cnt, hdev->le_cnt);
4246
4247 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4248 /* Schedule queues and send stuff to HCI driver */
4249 hci_sched_acl(hdev);
4250 hci_sched_sco(hdev);
4251 hci_sched_esco(hdev);
4252 hci_sched_le(hdev);
4253 }
4254
4255 /* Send next queued raw (unknown type) packet */
4256 while ((skb = skb_dequeue(&hdev->raw_q)))
4257 hci_send_frame(hdev, skb);
4258}
4259
4260/* ----- HCI RX task (incoming data processing) ----- */
4261
4262/* ACL data packet */
4263static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4264{
4265 struct hci_acl_hdr *hdr = (void *) skb->data;
4266 struct hci_conn *conn;
4267 __u16 handle, flags;
4268
4269 skb_pull(skb, HCI_ACL_HDR_SIZE);
4270
4271 handle = __le16_to_cpu(hdr->handle);
4272 flags = hci_flags(handle);
4273 handle = hci_handle(handle);
4274
4275 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4276 handle, flags);
4277
4278 hdev->stat.acl_rx++;
4279
4280 hci_dev_lock(hdev);
4281 conn = hci_conn_hash_lookup_handle(hdev, handle);
4282 hci_dev_unlock(hdev);
4283
4284 if (conn) {
4285 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4286
4287 /* Send to upper protocol */
4288 l2cap_recv_acldata(conn, skb, flags);
4289 return;
4290 } else {
4291 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4292 handle);
4293 }
4294
4295 kfree_skb(skb);
4296}
4297
4298/* SCO data packet */
4299static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4300{
4301 struct hci_sco_hdr *hdr = (void *) skb->data;
4302 struct hci_conn *conn;
4303 __u16 handle;
4304
4305 skb_pull(skb, HCI_SCO_HDR_SIZE);
4306
4307 handle = __le16_to_cpu(hdr->handle);
4308
4309 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4310
4311 hdev->stat.sco_rx++;
4312
4313 hci_dev_lock(hdev);
4314 conn = hci_conn_hash_lookup_handle(hdev, handle);
4315 hci_dev_unlock(hdev);
4316
4317 if (conn) {
4318 /* Send to upper protocol */
4319 sco_recv_scodata(conn, skb);
4320 return;
4321 } else {
4322 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4323 handle);
4324 }
4325
4326 kfree_skb(skb);
4327}
4328
4329static bool hci_req_is_complete(struct hci_dev *hdev)
4330{
4331 struct sk_buff *skb;
4332
4333 skb = skb_peek(&hdev->cmd_q);
4334 if (!skb)
4335 return true;
4336
4337 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4338}
4339
4340static void hci_resend_last(struct hci_dev *hdev)
4341{
4342 struct hci_command_hdr *sent;
4343 struct sk_buff *skb;
4344 u16 opcode;
4345
4346 if (!hdev->sent_cmd)
4347 return;
4348
4349 sent = (void *) hdev->sent_cmd->data;
4350 opcode = __le16_to_cpu(sent->opcode);
4351 if (opcode == HCI_OP_RESET)
4352 return;
4353
4354 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4355 if (!skb)
4356 return;
4357
4358 skb_queue_head(&hdev->cmd_q, skb);
4359 queue_work(hdev->workqueue, &hdev->cmd_work);
4360}
4361
4362void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4363 hci_req_complete_t *req_complete,
4364 hci_req_complete_skb_t *req_complete_skb)
4365{
4366 struct sk_buff *skb;
4367 unsigned long flags;
4368
4369 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4370
4371 /* If the completed command doesn't match the last one that was
4372 * sent we need to do special handling of it.
4373 */
4374 if (!hci_sent_cmd_data(hdev, opcode)) {
4375 /* Some CSR based controllers generate a spontaneous
4376 * reset complete event during init and any pending
4377 * command will never be completed. In such a case we
4378 * need to resend whatever was the last sent
4379 * command.
4380 */
4381 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4382 hci_resend_last(hdev);
4383
4384 return;
4385 }
4386
4387 /* If we reach this point this event matches the last command sent */
4388 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4389
4390 /* If the command succeeded and there's still more commands in
4391 * this request the request is not yet complete.
4392 */
4393 if (!status && !hci_req_is_complete(hdev))
4394 return;
4395
4396 /* If this was the last command in a request the complete
4397 * callback would be found in hdev->sent_cmd instead of the
4398 * command queue (hdev->cmd_q).
4399 */
4400 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4401 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4402 return;
4403 }
4404
4405 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4406 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4407 return;
4408 }
4409
4410 /* Remove all pending commands belonging to this request */
4411 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4412 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4413 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4414 __skb_queue_head(&hdev->cmd_q, skb);
4415 break;
4416 }
4417
4418 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4419 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4420 else
4421 *req_complete = bt_cb(skb)->hci.req_complete;
4422 kfree_skb(skb);
4423 }
4424 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4425}
4426
4427static void hci_rx_work(struct work_struct *work)
4428{
4429 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4430 struct sk_buff *skb;
4431
4432 BT_DBG("%s", hdev->name);
4433
4434 while ((skb = skb_dequeue(&hdev->rx_q))) {
4435 /* Send copy to monitor */
4436 hci_send_to_monitor(hdev, skb);
4437
4438 if (atomic_read(&hdev->promisc)) {
4439 /* Send copy to the sockets */
4440 hci_send_to_sock(hdev, skb);
4441 }
4442
4443 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4444 kfree_skb(skb);
4445 continue;
4446 }
4447
4448 if (test_bit(HCI_INIT, &hdev->flags)) {
4449 /* Don't process data packets in this states. */
4450 switch (hci_skb_pkt_type(skb)) {
4451 case HCI_ACLDATA_PKT:
4452 case HCI_SCODATA_PKT:
4453 kfree_skb(skb);
4454 continue;
4455 }
4456 }
4457
4458 /* Process frame */
4459 switch (hci_skb_pkt_type(skb)) {
4460 case HCI_EVENT_PKT:
4461 BT_DBG("%s Event packet", hdev->name);
4462 hci_event_packet(hdev, skb);
4463 break;
4464
4465 case HCI_ACLDATA_PKT:
4466 BT_DBG("%s ACL data packet", hdev->name);
4467 hci_acldata_packet(hdev, skb);
4468 break;
4469
4470 case HCI_SCODATA_PKT:
4471 BT_DBG("%s SCO data packet", hdev->name);
4472 hci_scodata_packet(hdev, skb);
4473 break;
4474
4475 default:
4476 kfree_skb(skb);
4477 break;
4478 }
4479 }
4480}
4481
4482static void hci_cmd_work(struct work_struct *work)
4483{
4484 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4485 struct sk_buff *skb;
4486
4487 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4488 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4489
4490 /* Send queued commands */
4491 if (atomic_read(&hdev->cmd_cnt)) {
4492 skb = skb_dequeue(&hdev->cmd_q);
4493 if (!skb)
4494 return;
4495
4496 kfree_skb(hdev->sent_cmd);
4497
4498 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4499 if (hdev->sent_cmd) {
4500 if (hci_req_status_pend(hdev))
4501 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4502 atomic_dec(&hdev->cmd_cnt);
4503 hci_send_frame(hdev, skb);
4504 if (test_bit(HCI_RESET, &hdev->flags))
4505 cancel_delayed_work(&hdev->cmd_timer);
4506 else
4507 schedule_delayed_work(&hdev->cmd_timer,
4508 HCI_CMD_TIMEOUT);
4509 } else {
4510 skb_queue_head(&hdev->cmd_q, skb);
4511 queue_work(hdev->workqueue, &hdev->cmd_work);
4512 }
4513 }
4514}
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/rfkill.h>
30#include <linux/debugfs.h>
31#include <linux/crypto.h>
32#include <linux/kcov.h>
33#include <linux/property.h>
34#include <linux/suspend.h>
35#include <linux/wait.h>
36#include <linux/unaligned.h>
37
38#include <net/bluetooth/bluetooth.h>
39#include <net/bluetooth/hci_core.h>
40#include <net/bluetooth/l2cap.h>
41#include <net/bluetooth/mgmt.h>
42
43#include "hci_debugfs.h"
44#include "smp.h"
45#include "leds.h"
46#include "msft.h"
47#include "aosp.h"
48#include "hci_codec.h"
49
50static void hci_rx_work(struct work_struct *work);
51static void hci_cmd_work(struct work_struct *work);
52static void hci_tx_work(struct work_struct *work);
53
54/* HCI device list */
55LIST_HEAD(hci_dev_list);
56DEFINE_RWLOCK(hci_dev_list_lock);
57
58/* HCI callback list */
59LIST_HEAD(hci_cb_list);
60
61/* HCI ID Numbering */
62static DEFINE_IDA(hci_index_ida);
63
64/* Get HCI device by index.
65 * Device is held on return. */
66struct hci_dev *hci_dev_get(int index)
67{
68 struct hci_dev *hdev = NULL, *d;
69
70 BT_DBG("%d", index);
71
72 if (index < 0)
73 return NULL;
74
75 read_lock(&hci_dev_list_lock);
76 list_for_each_entry(d, &hci_dev_list, list) {
77 if (d->id == index) {
78 hdev = hci_dev_hold(d);
79 break;
80 }
81 }
82 read_unlock(&hci_dev_list_lock);
83 return hdev;
84}
85
86/* ---- Inquiry support ---- */
87
88bool hci_discovery_active(struct hci_dev *hdev)
89{
90 struct discovery_state *discov = &hdev->discovery;
91
92 switch (discov->state) {
93 case DISCOVERY_FINDING:
94 case DISCOVERY_RESOLVING:
95 return true;
96
97 default:
98 return false;
99 }
100}
101
102void hci_discovery_set_state(struct hci_dev *hdev, int state)
103{
104 int old_state = hdev->discovery.state;
105
106 if (old_state == state)
107 return;
108
109 hdev->discovery.state = state;
110
111 switch (state) {
112 case DISCOVERY_STOPPED:
113 hci_update_passive_scan(hdev);
114
115 if (old_state != DISCOVERY_STARTING)
116 mgmt_discovering(hdev, 0);
117 break;
118 case DISCOVERY_STARTING:
119 break;
120 case DISCOVERY_FINDING:
121 mgmt_discovering(hdev, 1);
122 break;
123 case DISCOVERY_RESOLVING:
124 break;
125 case DISCOVERY_STOPPING:
126 break;
127 }
128
129 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
130}
131
132void hci_inquiry_cache_flush(struct hci_dev *hdev)
133{
134 struct discovery_state *cache = &hdev->discovery;
135 struct inquiry_entry *p, *n;
136
137 list_for_each_entry_safe(p, n, &cache->all, all) {
138 list_del(&p->all);
139 kfree(p);
140 }
141
142 INIT_LIST_HEAD(&cache->unknown);
143 INIT_LIST_HEAD(&cache->resolve);
144}
145
146struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
147 bdaddr_t *bdaddr)
148{
149 struct discovery_state *cache = &hdev->discovery;
150 struct inquiry_entry *e;
151
152 BT_DBG("cache %p, %pMR", cache, bdaddr);
153
154 list_for_each_entry(e, &cache->all, all) {
155 if (!bacmp(&e->data.bdaddr, bdaddr))
156 return e;
157 }
158
159 return NULL;
160}
161
162struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
163 bdaddr_t *bdaddr)
164{
165 struct discovery_state *cache = &hdev->discovery;
166 struct inquiry_entry *e;
167
168 BT_DBG("cache %p, %pMR", cache, bdaddr);
169
170 list_for_each_entry(e, &cache->unknown, list) {
171 if (!bacmp(&e->data.bdaddr, bdaddr))
172 return e;
173 }
174
175 return NULL;
176}
177
178struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
179 bdaddr_t *bdaddr,
180 int state)
181{
182 struct discovery_state *cache = &hdev->discovery;
183 struct inquiry_entry *e;
184
185 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
186
187 list_for_each_entry(e, &cache->resolve, list) {
188 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
189 return e;
190 if (!bacmp(&e->data.bdaddr, bdaddr))
191 return e;
192 }
193
194 return NULL;
195}
196
197void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
198 struct inquiry_entry *ie)
199{
200 struct discovery_state *cache = &hdev->discovery;
201 struct list_head *pos = &cache->resolve;
202 struct inquiry_entry *p;
203
204 list_del(&ie->list);
205
206 list_for_each_entry(p, &cache->resolve, list) {
207 if (p->name_state != NAME_PENDING &&
208 abs(p->data.rssi) >= abs(ie->data.rssi))
209 break;
210 pos = &p->list;
211 }
212
213 list_add(&ie->list, pos);
214}
215
216u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
217 bool name_known)
218{
219 struct discovery_state *cache = &hdev->discovery;
220 struct inquiry_entry *ie;
221 u32 flags = 0;
222
223 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
224
225 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
226
227 if (!data->ssp_mode)
228 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
229
230 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
231 if (ie) {
232 if (!ie->data.ssp_mode)
233 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
234
235 if (ie->name_state == NAME_NEEDED &&
236 data->rssi != ie->data.rssi) {
237 ie->data.rssi = data->rssi;
238 hci_inquiry_cache_update_resolve(hdev, ie);
239 }
240
241 goto update;
242 }
243
244 /* Entry not in the cache. Add new one. */
245 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
246 if (!ie) {
247 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
248 goto done;
249 }
250
251 list_add(&ie->all, &cache->all);
252
253 if (name_known) {
254 ie->name_state = NAME_KNOWN;
255 } else {
256 ie->name_state = NAME_NOT_KNOWN;
257 list_add(&ie->list, &cache->unknown);
258 }
259
260update:
261 if (name_known && ie->name_state != NAME_KNOWN &&
262 ie->name_state != NAME_PENDING) {
263 ie->name_state = NAME_KNOWN;
264 list_del(&ie->list);
265 }
266
267 memcpy(&ie->data, data, sizeof(*data));
268 ie->timestamp = jiffies;
269 cache->timestamp = jiffies;
270
271 if (ie->name_state == NAME_NOT_KNOWN)
272 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
273
274done:
275 return flags;
276}
277
278static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
279{
280 struct discovery_state *cache = &hdev->discovery;
281 struct inquiry_info *info = (struct inquiry_info *) buf;
282 struct inquiry_entry *e;
283 int copied = 0;
284
285 list_for_each_entry(e, &cache->all, all) {
286 struct inquiry_data *data = &e->data;
287
288 if (copied >= num)
289 break;
290
291 bacpy(&info->bdaddr, &data->bdaddr);
292 info->pscan_rep_mode = data->pscan_rep_mode;
293 info->pscan_period_mode = data->pscan_period_mode;
294 info->pscan_mode = data->pscan_mode;
295 memcpy(info->dev_class, data->dev_class, 3);
296 info->clock_offset = data->clock_offset;
297
298 info++;
299 copied++;
300 }
301
302 BT_DBG("cache %p, copied %d", cache, copied);
303 return copied;
304}
305
306int hci_inquiry(void __user *arg)
307{
308 __u8 __user *ptr = arg;
309 struct hci_inquiry_req ir;
310 struct hci_dev *hdev;
311 int err = 0, do_inquiry = 0, max_rsp;
312 __u8 *buf;
313
314 if (copy_from_user(&ir, ptr, sizeof(ir)))
315 return -EFAULT;
316
317 hdev = hci_dev_get(ir.dev_id);
318 if (!hdev)
319 return -ENODEV;
320
321 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
322 err = -EBUSY;
323 goto done;
324 }
325
326 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
327 err = -EOPNOTSUPP;
328 goto done;
329 }
330
331 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
332 err = -EOPNOTSUPP;
333 goto done;
334 }
335
336 /* Restrict maximum inquiry length to 60 seconds */
337 if (ir.length > 60) {
338 err = -EINVAL;
339 goto done;
340 }
341
342 hci_dev_lock(hdev);
343 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
344 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
345 hci_inquiry_cache_flush(hdev);
346 do_inquiry = 1;
347 }
348 hci_dev_unlock(hdev);
349
350 if (do_inquiry) {
351 hci_req_sync_lock(hdev);
352 err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
353 hci_req_sync_unlock(hdev);
354
355 if (err < 0)
356 goto done;
357
358 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
359 * cleared). If it is interrupted by a signal, return -EINTR.
360 */
361 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
362 TASK_INTERRUPTIBLE)) {
363 err = -EINTR;
364 goto done;
365 }
366 }
367
368 /* for unlimited number of responses we will use buffer with
369 * 255 entries
370 */
371 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
372
373 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
374 * copy it to the user space.
375 */
376 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
377 if (!buf) {
378 err = -ENOMEM;
379 goto done;
380 }
381
382 hci_dev_lock(hdev);
383 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
384 hci_dev_unlock(hdev);
385
386 BT_DBG("num_rsp %d", ir.num_rsp);
387
388 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
389 ptr += sizeof(ir);
390 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
391 ir.num_rsp))
392 err = -EFAULT;
393 } else
394 err = -EFAULT;
395
396 kfree(buf);
397
398done:
399 hci_dev_put(hdev);
400 return err;
401}
402
403static int hci_dev_do_open(struct hci_dev *hdev)
404{
405 int ret = 0;
406
407 BT_DBG("%s %p", hdev->name, hdev);
408
409 hci_req_sync_lock(hdev);
410
411 ret = hci_dev_open_sync(hdev);
412
413 hci_req_sync_unlock(hdev);
414 return ret;
415}
416
417/* ---- HCI ioctl helpers ---- */
418
419int hci_dev_open(__u16 dev)
420{
421 struct hci_dev *hdev;
422 int err;
423
424 hdev = hci_dev_get(dev);
425 if (!hdev)
426 return -ENODEV;
427
428 /* Devices that are marked as unconfigured can only be powered
429 * up as user channel. Trying to bring them up as normal devices
430 * will result into a failure. Only user channel operation is
431 * possible.
432 *
433 * When this function is called for a user channel, the flag
434 * HCI_USER_CHANNEL will be set first before attempting to
435 * open the device.
436 */
437 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
438 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
439 err = -EOPNOTSUPP;
440 goto done;
441 }
442
443 /* We need to ensure that no other power on/off work is pending
444 * before proceeding to call hci_dev_do_open. This is
445 * particularly important if the setup procedure has not yet
446 * completed.
447 */
448 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
449 cancel_delayed_work(&hdev->power_off);
450
451 /* After this call it is guaranteed that the setup procedure
452 * has finished. This means that error conditions like RFKILL
453 * or no valid public or static random address apply.
454 */
455 flush_workqueue(hdev->req_workqueue);
456
457 /* For controllers not using the management interface and that
458 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
459 * so that pairing works for them. Once the management interface
460 * is in use this bit will be cleared again and userspace has
461 * to explicitly enable it.
462 */
463 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
464 !hci_dev_test_flag(hdev, HCI_MGMT))
465 hci_dev_set_flag(hdev, HCI_BONDABLE);
466
467 err = hci_dev_do_open(hdev);
468
469done:
470 hci_dev_put(hdev);
471 return err;
472}
473
474int hci_dev_do_close(struct hci_dev *hdev)
475{
476 int err;
477
478 BT_DBG("%s %p", hdev->name, hdev);
479
480 hci_req_sync_lock(hdev);
481
482 err = hci_dev_close_sync(hdev);
483
484 hci_req_sync_unlock(hdev);
485
486 return err;
487}
488
489int hci_dev_close(__u16 dev)
490{
491 struct hci_dev *hdev;
492 int err;
493
494 hdev = hci_dev_get(dev);
495 if (!hdev)
496 return -ENODEV;
497
498 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
499 err = -EBUSY;
500 goto done;
501 }
502
503 cancel_work_sync(&hdev->power_on);
504 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
505 cancel_delayed_work(&hdev->power_off);
506
507 err = hci_dev_do_close(hdev);
508
509done:
510 hci_dev_put(hdev);
511 return err;
512}
513
514static int hci_dev_do_reset(struct hci_dev *hdev)
515{
516 int ret;
517
518 BT_DBG("%s %p", hdev->name, hdev);
519
520 hci_req_sync_lock(hdev);
521
522 /* Drop queues */
523 skb_queue_purge(&hdev->rx_q);
524 skb_queue_purge(&hdev->cmd_q);
525
526 /* Cancel these to avoid queueing non-chained pending work */
527 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
528 /* Wait for
529 *
530 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
531 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
532 *
533 * inside RCU section to see the flag or complete scheduling.
534 */
535 synchronize_rcu();
536 /* Explicitly cancel works in case scheduled after setting the flag. */
537 cancel_delayed_work(&hdev->cmd_timer);
538 cancel_delayed_work(&hdev->ncmd_timer);
539
540 /* Avoid potential lockdep warnings from the *_flush() calls by
541 * ensuring the workqueue is empty up front.
542 */
543 drain_workqueue(hdev->workqueue);
544
545 hci_dev_lock(hdev);
546 hci_inquiry_cache_flush(hdev);
547 hci_conn_hash_flush(hdev);
548 hci_dev_unlock(hdev);
549
550 if (hdev->flush)
551 hdev->flush(hdev);
552
553 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
554
555 atomic_set(&hdev->cmd_cnt, 1);
556 hdev->acl_cnt = 0;
557 hdev->sco_cnt = 0;
558 hdev->le_cnt = 0;
559 hdev->iso_cnt = 0;
560
561 ret = hci_reset_sync(hdev);
562
563 hci_req_sync_unlock(hdev);
564 return ret;
565}
566
567int hci_dev_reset(__u16 dev)
568{
569 struct hci_dev *hdev;
570 int err;
571
572 hdev = hci_dev_get(dev);
573 if (!hdev)
574 return -ENODEV;
575
576 if (!test_bit(HCI_UP, &hdev->flags)) {
577 err = -ENETDOWN;
578 goto done;
579 }
580
581 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
582 err = -EBUSY;
583 goto done;
584 }
585
586 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
587 err = -EOPNOTSUPP;
588 goto done;
589 }
590
591 err = hci_dev_do_reset(hdev);
592
593done:
594 hci_dev_put(hdev);
595 return err;
596}
597
598int hci_dev_reset_stat(__u16 dev)
599{
600 struct hci_dev *hdev;
601 int ret = 0;
602
603 hdev = hci_dev_get(dev);
604 if (!hdev)
605 return -ENODEV;
606
607 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
608 ret = -EBUSY;
609 goto done;
610 }
611
612 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
613 ret = -EOPNOTSUPP;
614 goto done;
615 }
616
617 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
618
619done:
620 hci_dev_put(hdev);
621 return ret;
622}
623
624static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
625{
626 bool conn_changed, discov_changed;
627
628 BT_DBG("%s scan 0x%02x", hdev->name, scan);
629
630 if ((scan & SCAN_PAGE))
631 conn_changed = !hci_dev_test_and_set_flag(hdev,
632 HCI_CONNECTABLE);
633 else
634 conn_changed = hci_dev_test_and_clear_flag(hdev,
635 HCI_CONNECTABLE);
636
637 if ((scan & SCAN_INQUIRY)) {
638 discov_changed = !hci_dev_test_and_set_flag(hdev,
639 HCI_DISCOVERABLE);
640 } else {
641 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
642 discov_changed = hci_dev_test_and_clear_flag(hdev,
643 HCI_DISCOVERABLE);
644 }
645
646 if (!hci_dev_test_flag(hdev, HCI_MGMT))
647 return;
648
649 if (conn_changed || discov_changed) {
650 /* In case this was disabled through mgmt */
651 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
652
653 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
654 hci_update_adv_data(hdev, hdev->cur_adv_instance);
655
656 mgmt_new_settings(hdev);
657 }
658}
659
660int hci_dev_cmd(unsigned int cmd, void __user *arg)
661{
662 struct hci_dev *hdev;
663 struct hci_dev_req dr;
664 __le16 policy;
665 int err = 0;
666
667 if (copy_from_user(&dr, arg, sizeof(dr)))
668 return -EFAULT;
669
670 hdev = hci_dev_get(dr.dev_id);
671 if (!hdev)
672 return -ENODEV;
673
674 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
675 err = -EBUSY;
676 goto done;
677 }
678
679 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
680 err = -EOPNOTSUPP;
681 goto done;
682 }
683
684 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
685 err = -EOPNOTSUPP;
686 goto done;
687 }
688
689 switch (cmd) {
690 case HCISETAUTH:
691 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
692 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
693 break;
694
695 case HCISETENCRYPT:
696 if (!lmp_encrypt_capable(hdev)) {
697 err = -EOPNOTSUPP;
698 break;
699 }
700
701 if (!test_bit(HCI_AUTH, &hdev->flags)) {
702 /* Auth must be enabled first */
703 err = hci_cmd_sync_status(hdev,
704 HCI_OP_WRITE_AUTH_ENABLE,
705 1, &dr.dev_opt,
706 HCI_CMD_TIMEOUT);
707 if (err)
708 break;
709 }
710
711 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
712 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
713 break;
714
715 case HCISETSCAN:
716 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
717 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
718
719 /* Ensure that the connectable and discoverable states
720 * get correctly modified as this was a non-mgmt change.
721 */
722 if (!err)
723 hci_update_passive_scan_state(hdev, dr.dev_opt);
724 break;
725
726 case HCISETLINKPOL:
727 policy = cpu_to_le16(dr.dev_opt);
728
729 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
730 2, &policy, HCI_CMD_TIMEOUT);
731 break;
732
733 case HCISETLINKMODE:
734 hdev->link_mode = ((__u16) dr.dev_opt) &
735 (HCI_LM_MASTER | HCI_LM_ACCEPT);
736 break;
737
738 case HCISETPTYPE:
739 if (hdev->pkt_type == (__u16) dr.dev_opt)
740 break;
741
742 hdev->pkt_type = (__u16) dr.dev_opt;
743 mgmt_phy_configuration_changed(hdev, NULL);
744 break;
745
746 case HCISETACLMTU:
747 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
748 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
749 break;
750
751 case HCISETSCOMTU:
752 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
753 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
754 break;
755
756 default:
757 err = -EINVAL;
758 break;
759 }
760
761done:
762 hci_dev_put(hdev);
763 return err;
764}
765
766int hci_get_dev_list(void __user *arg)
767{
768 struct hci_dev *hdev;
769 struct hci_dev_list_req *dl;
770 struct hci_dev_req *dr;
771 int n = 0, err;
772 __u16 dev_num;
773
774 if (get_user(dev_num, (__u16 __user *) arg))
775 return -EFAULT;
776
777 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
778 return -EINVAL;
779
780 dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
781 if (!dl)
782 return -ENOMEM;
783
784 dl->dev_num = dev_num;
785 dr = dl->dev_req;
786
787 read_lock(&hci_dev_list_lock);
788 list_for_each_entry(hdev, &hci_dev_list, list) {
789 unsigned long flags = hdev->flags;
790
791 /* When the auto-off is configured it means the transport
792 * is running, but in that case still indicate that the
793 * device is actually down.
794 */
795 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
796 flags &= ~BIT(HCI_UP);
797
798 dr[n].dev_id = hdev->id;
799 dr[n].dev_opt = flags;
800
801 if (++n >= dev_num)
802 break;
803 }
804 read_unlock(&hci_dev_list_lock);
805
806 dl->dev_num = n;
807 err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
808 kfree(dl);
809
810 return err ? -EFAULT : 0;
811}
812
813int hci_get_dev_info(void __user *arg)
814{
815 struct hci_dev *hdev;
816 struct hci_dev_info di;
817 unsigned long flags;
818 int err = 0;
819
820 if (copy_from_user(&di, arg, sizeof(di)))
821 return -EFAULT;
822
823 hdev = hci_dev_get(di.dev_id);
824 if (!hdev)
825 return -ENODEV;
826
827 /* When the auto-off is configured it means the transport
828 * is running, but in that case still indicate that the
829 * device is actually down.
830 */
831 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
832 flags = hdev->flags & ~BIT(HCI_UP);
833 else
834 flags = hdev->flags;
835
836 strscpy(di.name, hdev->name, sizeof(di.name));
837 di.bdaddr = hdev->bdaddr;
838 di.type = (hdev->bus & 0x0f);
839 di.flags = flags;
840 di.pkt_type = hdev->pkt_type;
841 if (lmp_bredr_capable(hdev)) {
842 di.acl_mtu = hdev->acl_mtu;
843 di.acl_pkts = hdev->acl_pkts;
844 di.sco_mtu = hdev->sco_mtu;
845 di.sco_pkts = hdev->sco_pkts;
846 } else {
847 di.acl_mtu = hdev->le_mtu;
848 di.acl_pkts = hdev->le_pkts;
849 di.sco_mtu = 0;
850 di.sco_pkts = 0;
851 }
852 di.link_policy = hdev->link_policy;
853 di.link_mode = hdev->link_mode;
854
855 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
856 memcpy(&di.features, &hdev->features, sizeof(di.features));
857
858 if (copy_to_user(arg, &di, sizeof(di)))
859 err = -EFAULT;
860
861 hci_dev_put(hdev);
862
863 return err;
864}
865
866/* ---- Interface to HCI drivers ---- */
867
868static int hci_dev_do_poweroff(struct hci_dev *hdev)
869{
870 int err;
871
872 BT_DBG("%s %p", hdev->name, hdev);
873
874 hci_req_sync_lock(hdev);
875
876 err = hci_set_powered_sync(hdev, false);
877
878 hci_req_sync_unlock(hdev);
879
880 return err;
881}
882
883static int hci_rfkill_set_block(void *data, bool blocked)
884{
885 struct hci_dev *hdev = data;
886 int err;
887
888 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
889
890 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
891 return -EBUSY;
892
893 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
894 return 0;
895
896 if (blocked) {
897 hci_dev_set_flag(hdev, HCI_RFKILLED);
898
899 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
900 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
901 err = hci_dev_do_poweroff(hdev);
902 if (err) {
903 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
904 err);
905
906 /* Make sure the device is still closed even if
907 * anything during power off sequence (eg.
908 * disconnecting devices) failed.
909 */
910 hci_dev_do_close(hdev);
911 }
912 }
913 } else {
914 hci_dev_clear_flag(hdev, HCI_RFKILLED);
915 }
916
917 return 0;
918}
919
920static const struct rfkill_ops hci_rfkill_ops = {
921 .set_block = hci_rfkill_set_block,
922};
923
924static void hci_power_on(struct work_struct *work)
925{
926 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
927 int err;
928
929 BT_DBG("%s", hdev->name);
930
931 if (test_bit(HCI_UP, &hdev->flags) &&
932 hci_dev_test_flag(hdev, HCI_MGMT) &&
933 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
934 cancel_delayed_work(&hdev->power_off);
935 err = hci_powered_update_sync(hdev);
936 mgmt_power_on(hdev, err);
937 return;
938 }
939
940 err = hci_dev_do_open(hdev);
941 if (err < 0) {
942 hci_dev_lock(hdev);
943 mgmt_set_powered_failed(hdev, err);
944 hci_dev_unlock(hdev);
945 return;
946 }
947
948 /* During the HCI setup phase, a few error conditions are
949 * ignored and they need to be checked now. If they are still
950 * valid, it is important to turn the device back off.
951 */
952 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
953 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
954 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
955 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
956 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
957 hci_dev_do_close(hdev);
958 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
959 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
960 HCI_AUTO_OFF_TIMEOUT);
961 }
962
963 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
964 /* For unconfigured devices, set the HCI_RAW flag
965 * so that userspace can easily identify them.
966 */
967 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
968 set_bit(HCI_RAW, &hdev->flags);
969
970 /* For fully configured devices, this will send
971 * the Index Added event. For unconfigured devices,
972 * it will send Unconfigued Index Added event.
973 *
974 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
975 * and no event will be send.
976 */
977 mgmt_index_added(hdev);
978 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
979 /* When the controller is now configured, then it
980 * is important to clear the HCI_RAW flag.
981 */
982 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
983 clear_bit(HCI_RAW, &hdev->flags);
984
985 /* Powering on the controller with HCI_CONFIG set only
986 * happens with the transition from unconfigured to
987 * configured. This will send the Index Added event.
988 */
989 mgmt_index_added(hdev);
990 }
991}
992
993static void hci_power_off(struct work_struct *work)
994{
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 power_off.work);
997
998 BT_DBG("%s", hdev->name);
999
1000 hci_dev_do_close(hdev);
1001}
1002
1003static void hci_error_reset(struct work_struct *work)
1004{
1005 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1006
1007 hci_dev_hold(hdev);
1008 BT_DBG("%s", hdev->name);
1009
1010 if (hdev->hw_error)
1011 hdev->hw_error(hdev, hdev->hw_error_code);
1012 else
1013 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1014
1015 if (!hci_dev_do_close(hdev))
1016 hci_dev_do_open(hdev);
1017
1018 hci_dev_put(hdev);
1019}
1020
1021void hci_uuids_clear(struct hci_dev *hdev)
1022{
1023 struct bt_uuid *uuid, *tmp;
1024
1025 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1026 list_del(&uuid->list);
1027 kfree(uuid);
1028 }
1029}
1030
1031void hci_link_keys_clear(struct hci_dev *hdev)
1032{
1033 struct link_key *key, *tmp;
1034
1035 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1036 list_del_rcu(&key->list);
1037 kfree_rcu(key, rcu);
1038 }
1039}
1040
1041void hci_smp_ltks_clear(struct hci_dev *hdev)
1042{
1043 struct smp_ltk *k, *tmp;
1044
1045 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1046 list_del_rcu(&k->list);
1047 kfree_rcu(k, rcu);
1048 }
1049}
1050
1051void hci_smp_irks_clear(struct hci_dev *hdev)
1052{
1053 struct smp_irk *k, *tmp;
1054
1055 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1056 list_del_rcu(&k->list);
1057 kfree_rcu(k, rcu);
1058 }
1059}
1060
1061void hci_blocked_keys_clear(struct hci_dev *hdev)
1062{
1063 struct blocked_key *b, *tmp;
1064
1065 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1066 list_del_rcu(&b->list);
1067 kfree_rcu(b, rcu);
1068 }
1069}
1070
1071bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1072{
1073 bool blocked = false;
1074 struct blocked_key *b;
1075
1076 rcu_read_lock();
1077 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1078 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1079 blocked = true;
1080 break;
1081 }
1082 }
1083
1084 rcu_read_unlock();
1085 return blocked;
1086}
1087
1088struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1089{
1090 struct link_key *k;
1091
1092 rcu_read_lock();
1093 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1094 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1095 rcu_read_unlock();
1096
1097 if (hci_is_blocked_key(hdev,
1098 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1099 k->val)) {
1100 bt_dev_warn_ratelimited(hdev,
1101 "Link key blocked for %pMR",
1102 &k->bdaddr);
1103 return NULL;
1104 }
1105
1106 return k;
1107 }
1108 }
1109 rcu_read_unlock();
1110
1111 return NULL;
1112}
1113
1114static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1115 u8 key_type, u8 old_key_type)
1116{
1117 /* Legacy key */
1118 if (key_type < 0x03)
1119 return true;
1120
1121 /* Debug keys are insecure so don't store them persistently */
1122 if (key_type == HCI_LK_DEBUG_COMBINATION)
1123 return false;
1124
1125 /* Changed combination key and there's no previous one */
1126 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1127 return false;
1128
1129 /* Security mode 3 case */
1130 if (!conn)
1131 return true;
1132
1133 /* BR/EDR key derived using SC from an LE link */
1134 if (conn->type == LE_LINK)
1135 return true;
1136
1137 /* Neither local nor remote side had no-bonding as requirement */
1138 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1139 return true;
1140
1141 /* Local side had dedicated bonding as requirement */
1142 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1143 return true;
1144
1145 /* Remote side had dedicated bonding as requirement */
1146 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1147 return true;
1148
1149 /* If none of the above criteria match, then don't store the key
1150 * persistently */
1151 return false;
1152}
1153
1154static u8 ltk_role(u8 type)
1155{
1156 if (type == SMP_LTK)
1157 return HCI_ROLE_MASTER;
1158
1159 return HCI_ROLE_SLAVE;
1160}
1161
1162struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1163 u8 addr_type, u8 role)
1164{
1165 struct smp_ltk *k;
1166
1167 rcu_read_lock();
1168 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1169 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1170 continue;
1171
1172 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1173 rcu_read_unlock();
1174
1175 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1176 k->val)) {
1177 bt_dev_warn_ratelimited(hdev,
1178 "LTK blocked for %pMR",
1179 &k->bdaddr);
1180 return NULL;
1181 }
1182
1183 return k;
1184 }
1185 }
1186 rcu_read_unlock();
1187
1188 return NULL;
1189}
1190
1191struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1192{
1193 struct smp_irk *irk_to_return = NULL;
1194 struct smp_irk *irk;
1195
1196 rcu_read_lock();
1197 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1198 if (!bacmp(&irk->rpa, rpa)) {
1199 irk_to_return = irk;
1200 goto done;
1201 }
1202 }
1203
1204 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1205 if (smp_irk_matches(hdev, irk->val, rpa)) {
1206 bacpy(&irk->rpa, rpa);
1207 irk_to_return = irk;
1208 goto done;
1209 }
1210 }
1211
1212done:
1213 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1214 irk_to_return->val)) {
1215 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1216 &irk_to_return->bdaddr);
1217 irk_to_return = NULL;
1218 }
1219
1220 rcu_read_unlock();
1221
1222 return irk_to_return;
1223}
1224
1225struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1226 u8 addr_type)
1227{
1228 struct smp_irk *irk_to_return = NULL;
1229 struct smp_irk *irk;
1230
1231 /* Identity Address must be public or static random */
1232 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1233 return NULL;
1234
1235 rcu_read_lock();
1236 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1237 if (addr_type == irk->addr_type &&
1238 bacmp(bdaddr, &irk->bdaddr) == 0) {
1239 irk_to_return = irk;
1240 goto done;
1241 }
1242 }
1243
1244done:
1245
1246 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1247 irk_to_return->val)) {
1248 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1249 &irk_to_return->bdaddr);
1250 irk_to_return = NULL;
1251 }
1252
1253 rcu_read_unlock();
1254
1255 return irk_to_return;
1256}
1257
1258struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1259 bdaddr_t *bdaddr, u8 *val, u8 type,
1260 u8 pin_len, bool *persistent)
1261{
1262 struct link_key *key, *old_key;
1263 u8 old_key_type;
1264
1265 old_key = hci_find_link_key(hdev, bdaddr);
1266 if (old_key) {
1267 old_key_type = old_key->type;
1268 key = old_key;
1269 } else {
1270 old_key_type = conn ? conn->key_type : 0xff;
1271 key = kzalloc(sizeof(*key), GFP_KERNEL);
1272 if (!key)
1273 return NULL;
1274 list_add_rcu(&key->list, &hdev->link_keys);
1275 }
1276
1277 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1278
1279 /* Some buggy controller combinations generate a changed
1280 * combination key for legacy pairing even when there's no
1281 * previous key */
1282 if (type == HCI_LK_CHANGED_COMBINATION &&
1283 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1284 type = HCI_LK_COMBINATION;
1285 if (conn)
1286 conn->key_type = type;
1287 }
1288
1289 bacpy(&key->bdaddr, bdaddr);
1290 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1291 key->pin_len = pin_len;
1292
1293 if (type == HCI_LK_CHANGED_COMBINATION)
1294 key->type = old_key_type;
1295 else
1296 key->type = type;
1297
1298 if (persistent)
1299 *persistent = hci_persistent_key(hdev, conn, type,
1300 old_key_type);
1301
1302 return key;
1303}
1304
1305struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1306 u8 addr_type, u8 type, u8 authenticated,
1307 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1308{
1309 struct smp_ltk *key, *old_key;
1310 u8 role = ltk_role(type);
1311
1312 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1313 if (old_key)
1314 key = old_key;
1315 else {
1316 key = kzalloc(sizeof(*key), GFP_KERNEL);
1317 if (!key)
1318 return NULL;
1319 list_add_rcu(&key->list, &hdev->long_term_keys);
1320 }
1321
1322 bacpy(&key->bdaddr, bdaddr);
1323 key->bdaddr_type = addr_type;
1324 memcpy(key->val, tk, sizeof(key->val));
1325 key->authenticated = authenticated;
1326 key->ediv = ediv;
1327 key->rand = rand;
1328 key->enc_size = enc_size;
1329 key->type = type;
1330
1331 return key;
1332}
1333
1334struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1335 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1336{
1337 struct smp_irk *irk;
1338
1339 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1340 if (!irk) {
1341 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1342 if (!irk)
1343 return NULL;
1344
1345 bacpy(&irk->bdaddr, bdaddr);
1346 irk->addr_type = addr_type;
1347
1348 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1349 }
1350
1351 memcpy(irk->val, val, 16);
1352 bacpy(&irk->rpa, rpa);
1353
1354 return irk;
1355}
1356
1357int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1358{
1359 struct link_key *key;
1360
1361 key = hci_find_link_key(hdev, bdaddr);
1362 if (!key)
1363 return -ENOENT;
1364
1365 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1366
1367 list_del_rcu(&key->list);
1368 kfree_rcu(key, rcu);
1369
1370 return 0;
1371}
1372
1373int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1374{
1375 struct smp_ltk *k, *tmp;
1376 int removed = 0;
1377
1378 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1379 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1380 continue;
1381
1382 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1383
1384 list_del_rcu(&k->list);
1385 kfree_rcu(k, rcu);
1386 removed++;
1387 }
1388
1389 return removed ? 0 : -ENOENT;
1390}
1391
1392void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1393{
1394 struct smp_irk *k, *tmp;
1395
1396 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1397 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1398 continue;
1399
1400 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1401
1402 list_del_rcu(&k->list);
1403 kfree_rcu(k, rcu);
1404 }
1405}
1406
1407bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1408{
1409 struct smp_ltk *k;
1410 struct smp_irk *irk;
1411 u8 addr_type;
1412
1413 if (type == BDADDR_BREDR) {
1414 if (hci_find_link_key(hdev, bdaddr))
1415 return true;
1416 return false;
1417 }
1418
1419 /* Convert to HCI addr type which struct smp_ltk uses */
1420 if (type == BDADDR_LE_PUBLIC)
1421 addr_type = ADDR_LE_DEV_PUBLIC;
1422 else
1423 addr_type = ADDR_LE_DEV_RANDOM;
1424
1425 irk = hci_get_irk(hdev, bdaddr, addr_type);
1426 if (irk) {
1427 bdaddr = &irk->bdaddr;
1428 addr_type = irk->addr_type;
1429 }
1430
1431 rcu_read_lock();
1432 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1433 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1434 rcu_read_unlock();
1435 return true;
1436 }
1437 }
1438 rcu_read_unlock();
1439
1440 return false;
1441}
1442
1443/* HCI command timer function */
1444static void hci_cmd_timeout(struct work_struct *work)
1445{
1446 struct hci_dev *hdev = container_of(work, struct hci_dev,
1447 cmd_timer.work);
1448
1449 if (hdev->req_skb) {
1450 u16 opcode = hci_skb_opcode(hdev->req_skb);
1451
1452 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1453
1454 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1455 } else {
1456 bt_dev_err(hdev, "command tx timeout");
1457 }
1458
1459 if (hdev->cmd_timeout)
1460 hdev->cmd_timeout(hdev);
1461
1462 atomic_set(&hdev->cmd_cnt, 1);
1463 queue_work(hdev->workqueue, &hdev->cmd_work);
1464}
1465
1466/* HCI ncmd timer function */
1467static void hci_ncmd_timeout(struct work_struct *work)
1468{
1469 struct hci_dev *hdev = container_of(work, struct hci_dev,
1470 ncmd_timer.work);
1471
1472 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1473
1474 /* During HCI_INIT phase no events can be injected if the ncmd timer
1475 * triggers since the procedure has its own timeout handling.
1476 */
1477 if (test_bit(HCI_INIT, &hdev->flags))
1478 return;
1479
1480 /* This is an irrecoverable state, inject hardware error event */
1481 hci_reset_dev(hdev);
1482}
1483
1484struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1485 bdaddr_t *bdaddr, u8 bdaddr_type)
1486{
1487 struct oob_data *data;
1488
1489 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1490 if (bacmp(bdaddr, &data->bdaddr) != 0)
1491 continue;
1492 if (data->bdaddr_type != bdaddr_type)
1493 continue;
1494 return data;
1495 }
1496
1497 return NULL;
1498}
1499
1500int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1501 u8 bdaddr_type)
1502{
1503 struct oob_data *data;
1504
1505 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1506 if (!data)
1507 return -ENOENT;
1508
1509 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1510
1511 list_del(&data->list);
1512 kfree(data);
1513
1514 return 0;
1515}
1516
1517void hci_remote_oob_data_clear(struct hci_dev *hdev)
1518{
1519 struct oob_data *data, *n;
1520
1521 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1522 list_del(&data->list);
1523 kfree(data);
1524 }
1525}
1526
1527int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1528 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1529 u8 *hash256, u8 *rand256)
1530{
1531 struct oob_data *data;
1532
1533 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1534 if (!data) {
1535 data = kmalloc(sizeof(*data), GFP_KERNEL);
1536 if (!data)
1537 return -ENOMEM;
1538
1539 bacpy(&data->bdaddr, bdaddr);
1540 data->bdaddr_type = bdaddr_type;
1541 list_add(&data->list, &hdev->remote_oob_data);
1542 }
1543
1544 if (hash192 && rand192) {
1545 memcpy(data->hash192, hash192, sizeof(data->hash192));
1546 memcpy(data->rand192, rand192, sizeof(data->rand192));
1547 if (hash256 && rand256)
1548 data->present = 0x03;
1549 } else {
1550 memset(data->hash192, 0, sizeof(data->hash192));
1551 memset(data->rand192, 0, sizeof(data->rand192));
1552 if (hash256 && rand256)
1553 data->present = 0x02;
1554 else
1555 data->present = 0x00;
1556 }
1557
1558 if (hash256 && rand256) {
1559 memcpy(data->hash256, hash256, sizeof(data->hash256));
1560 memcpy(data->rand256, rand256, sizeof(data->rand256));
1561 } else {
1562 memset(data->hash256, 0, sizeof(data->hash256));
1563 memset(data->rand256, 0, sizeof(data->rand256));
1564 if (hash192 && rand192)
1565 data->present = 0x01;
1566 }
1567
1568 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1569
1570 return 0;
1571}
1572
1573/* This function requires the caller holds hdev->lock */
1574struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1575{
1576 struct adv_info *adv_instance;
1577
1578 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1579 if (adv_instance->instance == instance)
1580 return adv_instance;
1581 }
1582
1583 return NULL;
1584}
1585
1586/* This function requires the caller holds hdev->lock */
1587struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1588{
1589 struct adv_info *cur_instance;
1590
1591 cur_instance = hci_find_adv_instance(hdev, instance);
1592 if (!cur_instance)
1593 return NULL;
1594
1595 if (cur_instance == list_last_entry(&hdev->adv_instances,
1596 struct adv_info, list))
1597 return list_first_entry(&hdev->adv_instances,
1598 struct adv_info, list);
1599 else
1600 return list_next_entry(cur_instance, list);
1601}
1602
1603/* This function requires the caller holds hdev->lock */
1604int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1605{
1606 struct adv_info *adv_instance;
1607
1608 adv_instance = hci_find_adv_instance(hdev, instance);
1609 if (!adv_instance)
1610 return -ENOENT;
1611
1612 BT_DBG("%s removing %dMR", hdev->name, instance);
1613
1614 if (hdev->cur_adv_instance == instance) {
1615 if (hdev->adv_instance_timeout) {
1616 cancel_delayed_work(&hdev->adv_instance_expire);
1617 hdev->adv_instance_timeout = 0;
1618 }
1619 hdev->cur_adv_instance = 0x00;
1620 }
1621
1622 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1623
1624 list_del(&adv_instance->list);
1625 kfree(adv_instance);
1626
1627 hdev->adv_instance_cnt--;
1628
1629 return 0;
1630}
1631
1632void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1633{
1634 struct adv_info *adv_instance, *n;
1635
1636 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1637 adv_instance->rpa_expired = rpa_expired;
1638}
1639
1640/* This function requires the caller holds hdev->lock */
1641void hci_adv_instances_clear(struct hci_dev *hdev)
1642{
1643 struct adv_info *adv_instance, *n;
1644
1645 if (hdev->adv_instance_timeout) {
1646 disable_delayed_work(&hdev->adv_instance_expire);
1647 hdev->adv_instance_timeout = 0;
1648 }
1649
1650 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1651 disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
1652 list_del(&adv_instance->list);
1653 kfree(adv_instance);
1654 }
1655
1656 hdev->adv_instance_cnt = 0;
1657 hdev->cur_adv_instance = 0x00;
1658}
1659
1660static void adv_instance_rpa_expired(struct work_struct *work)
1661{
1662 struct adv_info *adv_instance = container_of(work, struct adv_info,
1663 rpa_expired_cb.work);
1664
1665 BT_DBG("");
1666
1667 adv_instance->rpa_expired = true;
1668}
1669
1670/* This function requires the caller holds hdev->lock */
1671struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1672 u32 flags, u16 adv_data_len, u8 *adv_data,
1673 u16 scan_rsp_len, u8 *scan_rsp_data,
1674 u16 timeout, u16 duration, s8 tx_power,
1675 u32 min_interval, u32 max_interval,
1676 u8 mesh_handle)
1677{
1678 struct adv_info *adv;
1679
1680 adv = hci_find_adv_instance(hdev, instance);
1681 if (adv) {
1682 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1683 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1684 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1685 } else {
1686 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1687 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1688 return ERR_PTR(-EOVERFLOW);
1689
1690 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1691 if (!adv)
1692 return ERR_PTR(-ENOMEM);
1693
1694 adv->pending = true;
1695 adv->instance = instance;
1696
1697 /* If controller support only one set and the instance is set to
1698 * 1 then there is no option other than using handle 0x00.
1699 */
1700 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1701 adv->handle = 0x00;
1702 else
1703 adv->handle = instance;
1704
1705 list_add(&adv->list, &hdev->adv_instances);
1706 hdev->adv_instance_cnt++;
1707 }
1708
1709 adv->flags = flags;
1710 adv->min_interval = min_interval;
1711 adv->max_interval = max_interval;
1712 adv->tx_power = tx_power;
1713 /* Defining a mesh_handle changes the timing units to ms,
1714 * rather than seconds, and ties the instance to the requested
1715 * mesh_tx queue.
1716 */
1717 adv->mesh = mesh_handle;
1718
1719 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1720 scan_rsp_len, scan_rsp_data);
1721
1722 adv->timeout = timeout;
1723 adv->remaining_time = timeout;
1724
1725 if (duration == 0)
1726 adv->duration = hdev->def_multi_adv_rotation_duration;
1727 else
1728 adv->duration = duration;
1729
1730 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1731
1732 BT_DBG("%s for %dMR", hdev->name, instance);
1733
1734 return adv;
1735}
1736
1737/* This function requires the caller holds hdev->lock */
1738struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1739 u32 flags, u8 data_len, u8 *data,
1740 u32 min_interval, u32 max_interval)
1741{
1742 struct adv_info *adv;
1743
1744 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1745 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1746 min_interval, max_interval, 0);
1747 if (IS_ERR(adv))
1748 return adv;
1749
1750 adv->periodic = true;
1751 adv->per_adv_data_len = data_len;
1752
1753 if (data)
1754 memcpy(adv->per_adv_data, data, data_len);
1755
1756 return adv;
1757}
1758
1759/* This function requires the caller holds hdev->lock */
1760int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1761 u16 adv_data_len, u8 *adv_data,
1762 u16 scan_rsp_len, u8 *scan_rsp_data)
1763{
1764 struct adv_info *adv;
1765
1766 adv = hci_find_adv_instance(hdev, instance);
1767
1768 /* If advertisement doesn't exist, we can't modify its data */
1769 if (!adv)
1770 return -ENOENT;
1771
1772 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1773 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1774 memcpy(adv->adv_data, adv_data, adv_data_len);
1775 adv->adv_data_len = adv_data_len;
1776 adv->adv_data_changed = true;
1777 }
1778
1779 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1780 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1781 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1782 adv->scan_rsp_len = scan_rsp_len;
1783 adv->scan_rsp_changed = true;
1784 }
1785
1786 /* Mark as changed if there are flags which would affect it */
1787 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1788 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1789 adv->scan_rsp_changed = true;
1790
1791 return 0;
1792}
1793
1794/* This function requires the caller holds hdev->lock */
1795u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1796{
1797 u32 flags;
1798 struct adv_info *adv;
1799
1800 if (instance == 0x00) {
1801 /* Instance 0 always manages the "Tx Power" and "Flags"
1802 * fields
1803 */
1804 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1805
1806 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1807 * corresponds to the "connectable" instance flag.
1808 */
1809 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1810 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1811
1812 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1813 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1814 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1815 flags |= MGMT_ADV_FLAG_DISCOV;
1816
1817 return flags;
1818 }
1819
1820 adv = hci_find_adv_instance(hdev, instance);
1821
1822 /* Return 0 when we got an invalid instance identifier. */
1823 if (!adv)
1824 return 0;
1825
1826 return adv->flags;
1827}
1828
1829bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1830{
1831 struct adv_info *adv;
1832
1833 /* Instance 0x00 always set local name */
1834 if (instance == 0x00)
1835 return true;
1836
1837 adv = hci_find_adv_instance(hdev, instance);
1838 if (!adv)
1839 return false;
1840
1841 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1842 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1843 return true;
1844
1845 return adv->scan_rsp_len ? true : false;
1846}
1847
1848/* This function requires the caller holds hdev->lock */
1849void hci_adv_monitors_clear(struct hci_dev *hdev)
1850{
1851 struct adv_monitor *monitor;
1852 int handle;
1853
1854 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1855 hci_free_adv_monitor(hdev, monitor);
1856
1857 idr_destroy(&hdev->adv_monitors_idr);
1858}
1859
1860/* Frees the monitor structure and do some bookkeepings.
1861 * This function requires the caller holds hdev->lock.
1862 */
1863void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1864{
1865 struct adv_pattern *pattern;
1866 struct adv_pattern *tmp;
1867
1868 if (!monitor)
1869 return;
1870
1871 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1872 list_del(&pattern->list);
1873 kfree(pattern);
1874 }
1875
1876 if (monitor->handle)
1877 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1878
1879 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1880 hdev->adv_monitors_cnt--;
1881 mgmt_adv_monitor_removed(hdev, monitor->handle);
1882 }
1883
1884 kfree(monitor);
1885}
1886
1887/* Assigns handle to a monitor, and if offloading is supported and power is on,
1888 * also attempts to forward the request to the controller.
1889 * This function requires the caller holds hci_req_sync_lock.
1890 */
1891int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1892{
1893 int min, max, handle;
1894 int status = 0;
1895
1896 if (!monitor)
1897 return -EINVAL;
1898
1899 hci_dev_lock(hdev);
1900
1901 min = HCI_MIN_ADV_MONITOR_HANDLE;
1902 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1903 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1904 GFP_KERNEL);
1905
1906 hci_dev_unlock(hdev);
1907
1908 if (handle < 0)
1909 return handle;
1910
1911 monitor->handle = handle;
1912
1913 if (!hdev_is_powered(hdev))
1914 return status;
1915
1916 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1917 case HCI_ADV_MONITOR_EXT_NONE:
1918 bt_dev_dbg(hdev, "add monitor %d status %d",
1919 monitor->handle, status);
1920 /* Message was not forwarded to controller - not an error */
1921 break;
1922
1923 case HCI_ADV_MONITOR_EXT_MSFT:
1924 status = msft_add_monitor_pattern(hdev, monitor);
1925 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1926 handle, status);
1927 break;
1928 }
1929
1930 return status;
1931}
1932
1933/* Attempts to tell the controller and free the monitor. If somehow the
1934 * controller doesn't have a corresponding handle, remove anyway.
1935 * This function requires the caller holds hci_req_sync_lock.
1936 */
1937static int hci_remove_adv_monitor(struct hci_dev *hdev,
1938 struct adv_monitor *monitor)
1939{
1940 int status = 0;
1941 int handle;
1942
1943 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1944 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1945 bt_dev_dbg(hdev, "remove monitor %d status %d",
1946 monitor->handle, status);
1947 goto free_monitor;
1948
1949 case HCI_ADV_MONITOR_EXT_MSFT:
1950 handle = monitor->handle;
1951 status = msft_remove_monitor(hdev, monitor);
1952 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1953 handle, status);
1954 break;
1955 }
1956
1957 /* In case no matching handle registered, just free the monitor */
1958 if (status == -ENOENT)
1959 goto free_monitor;
1960
1961 return status;
1962
1963free_monitor:
1964 if (status == -ENOENT)
1965 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1966 monitor->handle);
1967 hci_free_adv_monitor(hdev, monitor);
1968
1969 return status;
1970}
1971
1972/* This function requires the caller holds hci_req_sync_lock */
1973int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1974{
1975 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1976
1977 if (!monitor)
1978 return -EINVAL;
1979
1980 return hci_remove_adv_monitor(hdev, monitor);
1981}
1982
1983/* This function requires the caller holds hci_req_sync_lock */
1984int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1985{
1986 struct adv_monitor *monitor;
1987 int idr_next_id = 0;
1988 int status = 0;
1989
1990 while (1) {
1991 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1992 if (!monitor)
1993 break;
1994
1995 status = hci_remove_adv_monitor(hdev, monitor);
1996 if (status)
1997 return status;
1998
1999 idr_next_id++;
2000 }
2001
2002 return status;
2003}
2004
2005/* This function requires the caller holds hdev->lock */
2006bool hci_is_adv_monitoring(struct hci_dev *hdev)
2007{
2008 return !idr_is_empty(&hdev->adv_monitors_idr);
2009}
2010
2011int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2012{
2013 if (msft_monitor_supported(hdev))
2014 return HCI_ADV_MONITOR_EXT_MSFT;
2015
2016 return HCI_ADV_MONITOR_EXT_NONE;
2017}
2018
2019struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2020 bdaddr_t *bdaddr, u8 type)
2021{
2022 struct bdaddr_list *b;
2023
2024 list_for_each_entry(b, bdaddr_list, list) {
2025 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2026 return b;
2027 }
2028
2029 return NULL;
2030}
2031
2032struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2033 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2034 u8 type)
2035{
2036 struct bdaddr_list_with_irk *b;
2037
2038 list_for_each_entry(b, bdaddr_list, list) {
2039 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2040 return b;
2041 }
2042
2043 return NULL;
2044}
2045
2046struct bdaddr_list_with_flags *
2047hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2048 bdaddr_t *bdaddr, u8 type)
2049{
2050 struct bdaddr_list_with_flags *b;
2051
2052 list_for_each_entry(b, bdaddr_list, list) {
2053 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2054 return b;
2055 }
2056
2057 return NULL;
2058}
2059
2060void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2061{
2062 struct bdaddr_list *b, *n;
2063
2064 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2065 list_del(&b->list);
2066 kfree(b);
2067 }
2068}
2069
2070int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2071{
2072 struct bdaddr_list *entry;
2073
2074 if (!bacmp(bdaddr, BDADDR_ANY))
2075 return -EBADF;
2076
2077 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2078 return -EEXIST;
2079
2080 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2081 if (!entry)
2082 return -ENOMEM;
2083
2084 bacpy(&entry->bdaddr, bdaddr);
2085 entry->bdaddr_type = type;
2086
2087 list_add(&entry->list, list);
2088
2089 return 0;
2090}
2091
2092int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2093 u8 type, u8 *peer_irk, u8 *local_irk)
2094{
2095 struct bdaddr_list_with_irk *entry;
2096
2097 if (!bacmp(bdaddr, BDADDR_ANY))
2098 return -EBADF;
2099
2100 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2101 return -EEXIST;
2102
2103 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2104 if (!entry)
2105 return -ENOMEM;
2106
2107 bacpy(&entry->bdaddr, bdaddr);
2108 entry->bdaddr_type = type;
2109
2110 if (peer_irk)
2111 memcpy(entry->peer_irk, peer_irk, 16);
2112
2113 if (local_irk)
2114 memcpy(entry->local_irk, local_irk, 16);
2115
2116 list_add(&entry->list, list);
2117
2118 return 0;
2119}
2120
2121int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2122 u8 type, u32 flags)
2123{
2124 struct bdaddr_list_with_flags *entry;
2125
2126 if (!bacmp(bdaddr, BDADDR_ANY))
2127 return -EBADF;
2128
2129 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2130 return -EEXIST;
2131
2132 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2133 if (!entry)
2134 return -ENOMEM;
2135
2136 bacpy(&entry->bdaddr, bdaddr);
2137 entry->bdaddr_type = type;
2138 entry->flags = flags;
2139
2140 list_add(&entry->list, list);
2141
2142 return 0;
2143}
2144
2145int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2146{
2147 struct bdaddr_list *entry;
2148
2149 if (!bacmp(bdaddr, BDADDR_ANY)) {
2150 hci_bdaddr_list_clear(list);
2151 return 0;
2152 }
2153
2154 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2155 if (!entry)
2156 return -ENOENT;
2157
2158 list_del(&entry->list);
2159 kfree(entry);
2160
2161 return 0;
2162}
2163
2164int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2165 u8 type)
2166{
2167 struct bdaddr_list_with_irk *entry;
2168
2169 if (!bacmp(bdaddr, BDADDR_ANY)) {
2170 hci_bdaddr_list_clear(list);
2171 return 0;
2172 }
2173
2174 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2175 if (!entry)
2176 return -ENOENT;
2177
2178 list_del(&entry->list);
2179 kfree(entry);
2180
2181 return 0;
2182}
2183
2184int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2185 u8 type)
2186{
2187 struct bdaddr_list_with_flags *entry;
2188
2189 if (!bacmp(bdaddr, BDADDR_ANY)) {
2190 hci_bdaddr_list_clear(list);
2191 return 0;
2192 }
2193
2194 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2195 if (!entry)
2196 return -ENOENT;
2197
2198 list_del(&entry->list);
2199 kfree(entry);
2200
2201 return 0;
2202}
2203
2204/* This function requires the caller holds hdev->lock */
2205struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2206 bdaddr_t *addr, u8 addr_type)
2207{
2208 struct hci_conn_params *params;
2209
2210 list_for_each_entry(params, &hdev->le_conn_params, list) {
2211 if (bacmp(¶ms->addr, addr) == 0 &&
2212 params->addr_type == addr_type) {
2213 return params;
2214 }
2215 }
2216
2217 return NULL;
2218}
2219
2220/* This function requires the caller holds hdev->lock or rcu_read_lock */
2221struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2222 bdaddr_t *addr, u8 addr_type)
2223{
2224 struct hci_conn_params *param;
2225
2226 rcu_read_lock();
2227
2228 list_for_each_entry_rcu(param, list, action) {
2229 if (bacmp(¶m->addr, addr) == 0 &&
2230 param->addr_type == addr_type) {
2231 rcu_read_unlock();
2232 return param;
2233 }
2234 }
2235
2236 rcu_read_unlock();
2237
2238 return NULL;
2239}
2240
2241/* This function requires the caller holds hdev->lock */
2242void hci_pend_le_list_del_init(struct hci_conn_params *param)
2243{
2244 if (list_empty(¶m->action))
2245 return;
2246
2247 list_del_rcu(¶m->action);
2248 synchronize_rcu();
2249 INIT_LIST_HEAD(¶m->action);
2250}
2251
2252/* This function requires the caller holds hdev->lock */
2253void hci_pend_le_list_add(struct hci_conn_params *param,
2254 struct list_head *list)
2255{
2256 list_add_rcu(¶m->action, list);
2257}
2258
2259/* This function requires the caller holds hdev->lock */
2260struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2261 bdaddr_t *addr, u8 addr_type)
2262{
2263 struct hci_conn_params *params;
2264
2265 params = hci_conn_params_lookup(hdev, addr, addr_type);
2266 if (params)
2267 return params;
2268
2269 params = kzalloc(sizeof(*params), GFP_KERNEL);
2270 if (!params) {
2271 bt_dev_err(hdev, "out of memory");
2272 return NULL;
2273 }
2274
2275 bacpy(¶ms->addr, addr);
2276 params->addr_type = addr_type;
2277
2278 list_add(¶ms->list, &hdev->le_conn_params);
2279 INIT_LIST_HEAD(¶ms->action);
2280
2281 params->conn_min_interval = hdev->le_conn_min_interval;
2282 params->conn_max_interval = hdev->le_conn_max_interval;
2283 params->conn_latency = hdev->le_conn_latency;
2284 params->supervision_timeout = hdev->le_supv_timeout;
2285 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2286
2287 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2288
2289 return params;
2290}
2291
2292void hci_conn_params_free(struct hci_conn_params *params)
2293{
2294 hci_pend_le_list_del_init(params);
2295
2296 if (params->conn) {
2297 hci_conn_drop(params->conn);
2298 hci_conn_put(params->conn);
2299 }
2300
2301 list_del(¶ms->list);
2302 kfree(params);
2303}
2304
2305/* This function requires the caller holds hdev->lock */
2306void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2307{
2308 struct hci_conn_params *params;
2309
2310 params = hci_conn_params_lookup(hdev, addr, addr_type);
2311 if (!params)
2312 return;
2313
2314 hci_conn_params_free(params);
2315
2316 hci_update_passive_scan(hdev);
2317
2318 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2319}
2320
2321/* This function requires the caller holds hdev->lock */
2322void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2323{
2324 struct hci_conn_params *params, *tmp;
2325
2326 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2327 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2328 continue;
2329
2330 /* If trying to establish one time connection to disabled
2331 * device, leave the params, but mark them as just once.
2332 */
2333 if (params->explicit_connect) {
2334 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2335 continue;
2336 }
2337
2338 hci_conn_params_free(params);
2339 }
2340
2341 BT_DBG("All LE disabled connection parameters were removed");
2342}
2343
2344/* This function requires the caller holds hdev->lock */
2345static void hci_conn_params_clear_all(struct hci_dev *hdev)
2346{
2347 struct hci_conn_params *params, *tmp;
2348
2349 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2350 hci_conn_params_free(params);
2351
2352 BT_DBG("All LE connection parameters were removed");
2353}
2354
2355/* Copy the Identity Address of the controller.
2356 *
2357 * If the controller has a public BD_ADDR, then by default use that one.
2358 * If this is a LE only controller without a public address, default to
2359 * the static random address.
2360 *
2361 * For debugging purposes it is possible to force controllers with a
2362 * public address to use the static random address instead.
2363 *
2364 * In case BR/EDR has been disabled on a dual-mode controller and
2365 * userspace has configured a static address, then that address
2366 * becomes the identity address instead of the public BR/EDR address.
2367 */
2368void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2369 u8 *bdaddr_type)
2370{
2371 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2372 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2373 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2374 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2375 bacpy(bdaddr, &hdev->static_addr);
2376 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2377 } else {
2378 bacpy(bdaddr, &hdev->bdaddr);
2379 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2380 }
2381}
2382
2383static void hci_clear_wake_reason(struct hci_dev *hdev)
2384{
2385 hci_dev_lock(hdev);
2386
2387 hdev->wake_reason = 0;
2388 bacpy(&hdev->wake_addr, BDADDR_ANY);
2389 hdev->wake_addr_type = 0;
2390
2391 hci_dev_unlock(hdev);
2392}
2393
2394static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2395 void *data)
2396{
2397 struct hci_dev *hdev =
2398 container_of(nb, struct hci_dev, suspend_notifier);
2399 int ret = 0;
2400
2401 /* Userspace has full control of this device. Do nothing. */
2402 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2403 return NOTIFY_DONE;
2404
2405 /* To avoid a potential race with hci_unregister_dev. */
2406 hci_dev_hold(hdev);
2407
2408 switch (action) {
2409 case PM_HIBERNATION_PREPARE:
2410 case PM_SUSPEND_PREPARE:
2411 ret = hci_suspend_dev(hdev);
2412 break;
2413 case PM_POST_HIBERNATION:
2414 case PM_POST_SUSPEND:
2415 ret = hci_resume_dev(hdev);
2416 break;
2417 }
2418
2419 if (ret)
2420 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2421 action, ret);
2422
2423 hci_dev_put(hdev);
2424 return NOTIFY_DONE;
2425}
2426
2427/* Alloc HCI device */
2428struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2429{
2430 struct hci_dev *hdev;
2431 unsigned int alloc_size;
2432
2433 alloc_size = sizeof(*hdev);
2434 if (sizeof_priv) {
2435 /* Fixme: May need ALIGN-ment? */
2436 alloc_size += sizeof_priv;
2437 }
2438
2439 hdev = kzalloc(alloc_size, GFP_KERNEL);
2440 if (!hdev)
2441 return NULL;
2442
2443 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2444 hdev->esco_type = (ESCO_HV1);
2445 hdev->link_mode = (HCI_LM_ACCEPT);
2446 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2447 hdev->io_capability = 0x03; /* No Input No Output */
2448 hdev->manufacturer = 0xffff; /* Default to internal use */
2449 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2450 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2451 hdev->adv_instance_cnt = 0;
2452 hdev->cur_adv_instance = 0x00;
2453 hdev->adv_instance_timeout = 0;
2454
2455 hdev->advmon_allowlist_duration = 300;
2456 hdev->advmon_no_filter_duration = 500;
2457 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2458
2459 hdev->sniff_max_interval = 800;
2460 hdev->sniff_min_interval = 80;
2461
2462 hdev->le_adv_channel_map = 0x07;
2463 hdev->le_adv_min_interval = 0x0800;
2464 hdev->le_adv_max_interval = 0x0800;
2465 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2466 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2467 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2468 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2469 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2470 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2471 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2472 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2473 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2474 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2475 hdev->le_conn_min_interval = 0x0018;
2476 hdev->le_conn_max_interval = 0x0028;
2477 hdev->le_conn_latency = 0x0000;
2478 hdev->le_supv_timeout = 0x002a;
2479 hdev->le_def_tx_len = 0x001b;
2480 hdev->le_def_tx_time = 0x0148;
2481 hdev->le_max_tx_len = 0x001b;
2482 hdev->le_max_tx_time = 0x0148;
2483 hdev->le_max_rx_len = 0x001b;
2484 hdev->le_max_rx_time = 0x0148;
2485 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2486 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2487 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2488 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2489 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2490 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2491 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2492 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2493 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2494
2495 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2496 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2497 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2498 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2499 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2500 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2501
2502 /* default 1.28 sec page scan */
2503 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2504 hdev->def_page_scan_int = 0x0800;
2505 hdev->def_page_scan_window = 0x0012;
2506
2507 mutex_init(&hdev->lock);
2508 mutex_init(&hdev->req_lock);
2509
2510 ida_init(&hdev->unset_handle_ida);
2511
2512 INIT_LIST_HEAD(&hdev->mesh_pending);
2513 INIT_LIST_HEAD(&hdev->mgmt_pending);
2514 INIT_LIST_HEAD(&hdev->reject_list);
2515 INIT_LIST_HEAD(&hdev->accept_list);
2516 INIT_LIST_HEAD(&hdev->uuids);
2517 INIT_LIST_HEAD(&hdev->link_keys);
2518 INIT_LIST_HEAD(&hdev->long_term_keys);
2519 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2520 INIT_LIST_HEAD(&hdev->remote_oob_data);
2521 INIT_LIST_HEAD(&hdev->le_accept_list);
2522 INIT_LIST_HEAD(&hdev->le_resolv_list);
2523 INIT_LIST_HEAD(&hdev->le_conn_params);
2524 INIT_LIST_HEAD(&hdev->pend_le_conns);
2525 INIT_LIST_HEAD(&hdev->pend_le_reports);
2526 INIT_LIST_HEAD(&hdev->conn_hash.list);
2527 INIT_LIST_HEAD(&hdev->adv_instances);
2528 INIT_LIST_HEAD(&hdev->blocked_keys);
2529 INIT_LIST_HEAD(&hdev->monitored_devices);
2530
2531 INIT_LIST_HEAD(&hdev->local_codecs);
2532 INIT_WORK(&hdev->rx_work, hci_rx_work);
2533 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2534 INIT_WORK(&hdev->tx_work, hci_tx_work);
2535 INIT_WORK(&hdev->power_on, hci_power_on);
2536 INIT_WORK(&hdev->error_reset, hci_error_reset);
2537
2538 hci_cmd_sync_init(hdev);
2539
2540 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2541
2542 skb_queue_head_init(&hdev->rx_q);
2543 skb_queue_head_init(&hdev->cmd_q);
2544 skb_queue_head_init(&hdev->raw_q);
2545
2546 init_waitqueue_head(&hdev->req_wait_q);
2547
2548 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2549 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2550
2551 hci_devcd_setup(hdev);
2552
2553 hci_init_sysfs(hdev);
2554 discovery_init(hdev);
2555
2556 return hdev;
2557}
2558EXPORT_SYMBOL(hci_alloc_dev_priv);
2559
2560/* Free HCI device */
2561void hci_free_dev(struct hci_dev *hdev)
2562{
2563 /* will free via device release */
2564 put_device(&hdev->dev);
2565}
2566EXPORT_SYMBOL(hci_free_dev);
2567
2568/* Register HCI device */
2569int hci_register_dev(struct hci_dev *hdev)
2570{
2571 int id, error;
2572
2573 if (!hdev->open || !hdev->close || !hdev->send)
2574 return -EINVAL;
2575
2576 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2577 if (id < 0)
2578 return id;
2579
2580 error = dev_set_name(&hdev->dev, "hci%u", id);
2581 if (error)
2582 return error;
2583
2584 hdev->name = dev_name(&hdev->dev);
2585 hdev->id = id;
2586
2587 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2588
2589 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2590 if (!hdev->workqueue) {
2591 error = -ENOMEM;
2592 goto err;
2593 }
2594
2595 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2596 hdev->name);
2597 if (!hdev->req_workqueue) {
2598 destroy_workqueue(hdev->workqueue);
2599 error = -ENOMEM;
2600 goto err;
2601 }
2602
2603 if (!IS_ERR_OR_NULL(bt_debugfs))
2604 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2605
2606 error = device_add(&hdev->dev);
2607 if (error < 0)
2608 goto err_wqueue;
2609
2610 hci_leds_init(hdev);
2611
2612 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2613 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2614 hdev);
2615 if (hdev->rfkill) {
2616 if (rfkill_register(hdev->rfkill) < 0) {
2617 rfkill_destroy(hdev->rfkill);
2618 hdev->rfkill = NULL;
2619 }
2620 }
2621
2622 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2623 hci_dev_set_flag(hdev, HCI_RFKILLED);
2624
2625 hci_dev_set_flag(hdev, HCI_SETUP);
2626 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2627
2628 /* Assume BR/EDR support until proven otherwise (such as
2629 * through reading supported features during init.
2630 */
2631 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2632
2633 write_lock(&hci_dev_list_lock);
2634 list_add(&hdev->list, &hci_dev_list);
2635 write_unlock(&hci_dev_list_lock);
2636
2637 /* Devices that are marked for raw-only usage are unconfigured
2638 * and should not be included in normal operation.
2639 */
2640 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2641 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2642
2643 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2644 * callback.
2645 */
2646 if (hdev->wakeup)
2647 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2648
2649 hci_sock_dev_event(hdev, HCI_DEV_REG);
2650 hci_dev_hold(hdev);
2651
2652 error = hci_register_suspend_notifier(hdev);
2653 if (error)
2654 BT_WARN("register suspend notifier failed error:%d\n", error);
2655
2656 queue_work(hdev->req_workqueue, &hdev->power_on);
2657
2658 idr_init(&hdev->adv_monitors_idr);
2659 msft_register(hdev);
2660
2661 return id;
2662
2663err_wqueue:
2664 debugfs_remove_recursive(hdev->debugfs);
2665 destroy_workqueue(hdev->workqueue);
2666 destroy_workqueue(hdev->req_workqueue);
2667err:
2668 ida_free(&hci_index_ida, hdev->id);
2669
2670 return error;
2671}
2672EXPORT_SYMBOL(hci_register_dev);
2673
2674/* Unregister HCI device */
2675void hci_unregister_dev(struct hci_dev *hdev)
2676{
2677 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2678
2679 mutex_lock(&hdev->unregister_lock);
2680 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2681 mutex_unlock(&hdev->unregister_lock);
2682
2683 write_lock(&hci_dev_list_lock);
2684 list_del(&hdev->list);
2685 write_unlock(&hci_dev_list_lock);
2686
2687 disable_work_sync(&hdev->rx_work);
2688 disable_work_sync(&hdev->cmd_work);
2689 disable_work_sync(&hdev->tx_work);
2690 disable_work_sync(&hdev->power_on);
2691 disable_work_sync(&hdev->error_reset);
2692
2693 hci_cmd_sync_clear(hdev);
2694
2695 hci_unregister_suspend_notifier(hdev);
2696
2697 hci_dev_do_close(hdev);
2698
2699 if (!test_bit(HCI_INIT, &hdev->flags) &&
2700 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2701 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2702 hci_dev_lock(hdev);
2703 mgmt_index_removed(hdev);
2704 hci_dev_unlock(hdev);
2705 }
2706
2707 /* mgmt_index_removed should take care of emptying the
2708 * pending list */
2709 BUG_ON(!list_empty(&hdev->mgmt_pending));
2710
2711 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2712
2713 if (hdev->rfkill) {
2714 rfkill_unregister(hdev->rfkill);
2715 rfkill_destroy(hdev->rfkill);
2716 }
2717
2718 device_del(&hdev->dev);
2719 /* Actual cleanup is deferred until hci_release_dev(). */
2720 hci_dev_put(hdev);
2721}
2722EXPORT_SYMBOL(hci_unregister_dev);
2723
2724/* Release HCI device */
2725void hci_release_dev(struct hci_dev *hdev)
2726{
2727 debugfs_remove_recursive(hdev->debugfs);
2728 kfree_const(hdev->hw_info);
2729 kfree_const(hdev->fw_info);
2730
2731 destroy_workqueue(hdev->workqueue);
2732 destroy_workqueue(hdev->req_workqueue);
2733
2734 hci_dev_lock(hdev);
2735 hci_bdaddr_list_clear(&hdev->reject_list);
2736 hci_bdaddr_list_clear(&hdev->accept_list);
2737 hci_uuids_clear(hdev);
2738 hci_link_keys_clear(hdev);
2739 hci_smp_ltks_clear(hdev);
2740 hci_smp_irks_clear(hdev);
2741 hci_remote_oob_data_clear(hdev);
2742 hci_adv_instances_clear(hdev);
2743 hci_adv_monitors_clear(hdev);
2744 hci_bdaddr_list_clear(&hdev->le_accept_list);
2745 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2746 hci_conn_params_clear_all(hdev);
2747 hci_discovery_filter_clear(hdev);
2748 hci_blocked_keys_clear(hdev);
2749 hci_codec_list_clear(&hdev->local_codecs);
2750 msft_release(hdev);
2751 hci_dev_unlock(hdev);
2752
2753 ida_destroy(&hdev->unset_handle_ida);
2754 ida_free(&hci_index_ida, hdev->id);
2755 kfree_skb(hdev->sent_cmd);
2756 kfree_skb(hdev->req_skb);
2757 kfree_skb(hdev->recv_event);
2758 kfree(hdev);
2759}
2760EXPORT_SYMBOL(hci_release_dev);
2761
2762int hci_register_suspend_notifier(struct hci_dev *hdev)
2763{
2764 int ret = 0;
2765
2766 if (!hdev->suspend_notifier.notifier_call &&
2767 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2768 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2769 ret = register_pm_notifier(&hdev->suspend_notifier);
2770 }
2771
2772 return ret;
2773}
2774
2775int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2776{
2777 int ret = 0;
2778
2779 if (hdev->suspend_notifier.notifier_call) {
2780 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2781 if (!ret)
2782 hdev->suspend_notifier.notifier_call = NULL;
2783 }
2784
2785 return ret;
2786}
2787
2788/* Cancel ongoing command synchronously:
2789 *
2790 * - Cancel command timer
2791 * - Reset command counter
2792 * - Cancel command request
2793 */
2794static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2795{
2796 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2797
2798 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2799 disable_delayed_work_sync(&hdev->cmd_timer);
2800 disable_delayed_work_sync(&hdev->ncmd_timer);
2801 } else {
2802 cancel_delayed_work_sync(&hdev->cmd_timer);
2803 cancel_delayed_work_sync(&hdev->ncmd_timer);
2804 }
2805
2806 atomic_set(&hdev->cmd_cnt, 1);
2807
2808 hci_cmd_sync_cancel_sync(hdev, err);
2809}
2810
2811/* Suspend HCI device */
2812int hci_suspend_dev(struct hci_dev *hdev)
2813{
2814 int ret;
2815
2816 bt_dev_dbg(hdev, "");
2817
2818 /* Suspend should only act on when powered. */
2819 if (!hdev_is_powered(hdev) ||
2820 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2821 return 0;
2822
2823 /* If powering down don't attempt to suspend */
2824 if (mgmt_powering_down(hdev))
2825 return 0;
2826
2827 /* Cancel potentially blocking sync operation before suspend */
2828 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2829
2830 hci_req_sync_lock(hdev);
2831 ret = hci_suspend_sync(hdev);
2832 hci_req_sync_unlock(hdev);
2833
2834 hci_clear_wake_reason(hdev);
2835 mgmt_suspending(hdev, hdev->suspend_state);
2836
2837 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2838 return ret;
2839}
2840EXPORT_SYMBOL(hci_suspend_dev);
2841
2842/* Resume HCI device */
2843int hci_resume_dev(struct hci_dev *hdev)
2844{
2845 int ret;
2846
2847 bt_dev_dbg(hdev, "");
2848
2849 /* Resume should only act on when powered. */
2850 if (!hdev_is_powered(hdev) ||
2851 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2852 return 0;
2853
2854 /* If powering down don't attempt to resume */
2855 if (mgmt_powering_down(hdev))
2856 return 0;
2857
2858 hci_req_sync_lock(hdev);
2859 ret = hci_resume_sync(hdev);
2860 hci_req_sync_unlock(hdev);
2861
2862 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2863 hdev->wake_addr_type);
2864
2865 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2866 return ret;
2867}
2868EXPORT_SYMBOL(hci_resume_dev);
2869
2870/* Reset HCI device */
2871int hci_reset_dev(struct hci_dev *hdev)
2872{
2873 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2874 struct sk_buff *skb;
2875
2876 skb = bt_skb_alloc(3, GFP_ATOMIC);
2877 if (!skb)
2878 return -ENOMEM;
2879
2880 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2881 skb_put_data(skb, hw_err, 3);
2882
2883 bt_dev_err(hdev, "Injecting HCI hardware error event");
2884
2885 /* Send Hardware Error to upper stack */
2886 return hci_recv_frame(hdev, skb);
2887}
2888EXPORT_SYMBOL(hci_reset_dev);
2889
2890static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2891{
2892 if (hdev->classify_pkt_type)
2893 return hdev->classify_pkt_type(hdev, skb);
2894
2895 return hci_skb_pkt_type(skb);
2896}
2897
2898/* Receive frame from HCI drivers */
2899int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2900{
2901 u8 dev_pkt_type;
2902
2903 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2904 && !test_bit(HCI_INIT, &hdev->flags))) {
2905 kfree_skb(skb);
2906 return -ENXIO;
2907 }
2908
2909 /* Check if the driver agree with packet type classification */
2910 dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2911 if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2912 hci_skb_pkt_type(skb) = dev_pkt_type;
2913 }
2914
2915 switch (hci_skb_pkt_type(skb)) {
2916 case HCI_EVENT_PKT:
2917 break;
2918 case HCI_ACLDATA_PKT:
2919 /* Detect if ISO packet has been sent as ACL */
2920 if (hci_conn_num(hdev, ISO_LINK)) {
2921 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2922 __u8 type;
2923
2924 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2925 if (type == ISO_LINK)
2926 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2927 }
2928 break;
2929 case HCI_SCODATA_PKT:
2930 break;
2931 case HCI_ISODATA_PKT:
2932 break;
2933 default:
2934 kfree_skb(skb);
2935 return -EINVAL;
2936 }
2937
2938 /* Incoming skb */
2939 bt_cb(skb)->incoming = 1;
2940
2941 /* Time stamp */
2942 __net_timestamp(skb);
2943
2944 skb_queue_tail(&hdev->rx_q, skb);
2945 queue_work(hdev->workqueue, &hdev->rx_work);
2946
2947 return 0;
2948}
2949EXPORT_SYMBOL(hci_recv_frame);
2950
2951/* Receive diagnostic message from HCI drivers */
2952int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2953{
2954 /* Mark as diagnostic packet */
2955 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2956
2957 /* Time stamp */
2958 __net_timestamp(skb);
2959
2960 skb_queue_tail(&hdev->rx_q, skb);
2961 queue_work(hdev->workqueue, &hdev->rx_work);
2962
2963 return 0;
2964}
2965EXPORT_SYMBOL(hci_recv_diag);
2966
2967void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2968{
2969 va_list vargs;
2970
2971 va_start(vargs, fmt);
2972 kfree_const(hdev->hw_info);
2973 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2974 va_end(vargs);
2975}
2976EXPORT_SYMBOL(hci_set_hw_info);
2977
2978void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2979{
2980 va_list vargs;
2981
2982 va_start(vargs, fmt);
2983 kfree_const(hdev->fw_info);
2984 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2985 va_end(vargs);
2986}
2987EXPORT_SYMBOL(hci_set_fw_info);
2988
2989/* ---- Interface to upper protocols ---- */
2990
2991int hci_register_cb(struct hci_cb *cb)
2992{
2993 BT_DBG("%p name %s", cb, cb->name);
2994
2995 list_add_tail_rcu(&cb->list, &hci_cb_list);
2996
2997 return 0;
2998}
2999EXPORT_SYMBOL(hci_register_cb);
3000
3001int hci_unregister_cb(struct hci_cb *cb)
3002{
3003 BT_DBG("%p name %s", cb, cb->name);
3004
3005 list_del_rcu(&cb->list);
3006 synchronize_rcu();
3007
3008 return 0;
3009}
3010EXPORT_SYMBOL(hci_unregister_cb);
3011
3012static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3013{
3014 int err;
3015
3016 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3017 skb->len);
3018
3019 /* Time stamp */
3020 __net_timestamp(skb);
3021
3022 /* Send copy to monitor */
3023 hci_send_to_monitor(hdev, skb);
3024
3025 if (atomic_read(&hdev->promisc)) {
3026 /* Send copy to the sockets */
3027 hci_send_to_sock(hdev, skb);
3028 }
3029
3030 /* Get rid of skb owner, prior to sending to the driver. */
3031 skb_orphan(skb);
3032
3033 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3034 kfree_skb(skb);
3035 return -EINVAL;
3036 }
3037
3038 err = hdev->send(hdev, skb);
3039 if (err < 0) {
3040 bt_dev_err(hdev, "sending frame failed (%d)", err);
3041 kfree_skb(skb);
3042 return err;
3043 }
3044
3045 return 0;
3046}
3047
3048/* Send HCI command */
3049int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3050 const void *param)
3051{
3052 struct sk_buff *skb;
3053
3054 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3055
3056 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3057 if (!skb) {
3058 bt_dev_err(hdev, "no memory for command");
3059 return -ENOMEM;
3060 }
3061
3062 /* Stand-alone HCI commands must be flagged as
3063 * single-command requests.
3064 */
3065 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3066
3067 skb_queue_tail(&hdev->cmd_q, skb);
3068 queue_work(hdev->workqueue, &hdev->cmd_work);
3069
3070 return 0;
3071}
3072
3073int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3074 const void *param)
3075{
3076 struct sk_buff *skb;
3077
3078 if (hci_opcode_ogf(opcode) != 0x3f) {
3079 /* A controller receiving a command shall respond with either
3080 * a Command Status Event or a Command Complete Event.
3081 * Therefore, all standard HCI commands must be sent via the
3082 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3083 * Some vendors do not comply with this rule for vendor-specific
3084 * commands and do not return any event. We want to support
3085 * unresponded commands for such cases only.
3086 */
3087 bt_dev_err(hdev, "unresponded command not supported");
3088 return -EINVAL;
3089 }
3090
3091 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3092 if (!skb) {
3093 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3094 opcode);
3095 return -ENOMEM;
3096 }
3097
3098 hci_send_frame(hdev, skb);
3099
3100 return 0;
3101}
3102EXPORT_SYMBOL(__hci_cmd_send);
3103
3104/* Get data from the previously sent command */
3105static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3106{
3107 struct hci_command_hdr *hdr;
3108
3109 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3110 return NULL;
3111
3112 hdr = (void *)skb->data;
3113
3114 if (hdr->opcode != cpu_to_le16(opcode))
3115 return NULL;
3116
3117 return skb->data + HCI_COMMAND_HDR_SIZE;
3118}
3119
3120/* Get data from the previously sent command */
3121void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3122{
3123 void *data;
3124
3125 /* Check if opcode matches last sent command */
3126 data = hci_cmd_data(hdev->sent_cmd, opcode);
3127 if (!data)
3128 /* Check if opcode matches last request */
3129 data = hci_cmd_data(hdev->req_skb, opcode);
3130
3131 return data;
3132}
3133
3134/* Get data from last received event */
3135void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3136{
3137 struct hci_event_hdr *hdr;
3138 int offset;
3139
3140 if (!hdev->recv_event)
3141 return NULL;
3142
3143 hdr = (void *)hdev->recv_event->data;
3144 offset = sizeof(*hdr);
3145
3146 if (hdr->evt != event) {
3147 /* In case of LE metaevent check the subevent match */
3148 if (hdr->evt == HCI_EV_LE_META) {
3149 struct hci_ev_le_meta *ev;
3150
3151 ev = (void *)hdev->recv_event->data + offset;
3152 offset += sizeof(*ev);
3153 if (ev->subevent == event)
3154 goto found;
3155 }
3156 return NULL;
3157 }
3158
3159found:
3160 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3161
3162 return hdev->recv_event->data + offset;
3163}
3164
3165/* Send ACL data */
3166static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3167{
3168 struct hci_acl_hdr *hdr;
3169 int len = skb->len;
3170
3171 skb_push(skb, HCI_ACL_HDR_SIZE);
3172 skb_reset_transport_header(skb);
3173 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3174 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3175 hdr->dlen = cpu_to_le16(len);
3176}
3177
3178static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3179 struct sk_buff *skb, __u16 flags)
3180{
3181 struct hci_conn *conn = chan->conn;
3182 struct hci_dev *hdev = conn->hdev;
3183 struct sk_buff *list;
3184
3185 skb->len = skb_headlen(skb);
3186 skb->data_len = 0;
3187
3188 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3189
3190 hci_add_acl_hdr(skb, conn->handle, flags);
3191
3192 list = skb_shinfo(skb)->frag_list;
3193 if (!list) {
3194 /* Non fragmented */
3195 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3196
3197 skb_queue_tail(queue, skb);
3198 } else {
3199 /* Fragmented */
3200 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3201
3202 skb_shinfo(skb)->frag_list = NULL;
3203
3204 /* Queue all fragments atomically. We need to use spin_lock_bh
3205 * here because of 6LoWPAN links, as there this function is
3206 * called from softirq and using normal spin lock could cause
3207 * deadlocks.
3208 */
3209 spin_lock_bh(&queue->lock);
3210
3211 __skb_queue_tail(queue, skb);
3212
3213 flags &= ~ACL_START;
3214 flags |= ACL_CONT;
3215 do {
3216 skb = list; list = list->next;
3217
3218 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3219 hci_add_acl_hdr(skb, conn->handle, flags);
3220
3221 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3222
3223 __skb_queue_tail(queue, skb);
3224 } while (list);
3225
3226 spin_unlock_bh(&queue->lock);
3227 }
3228}
3229
3230void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3231{
3232 struct hci_dev *hdev = chan->conn->hdev;
3233
3234 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3235
3236 hci_queue_acl(chan, &chan->data_q, skb, flags);
3237
3238 queue_work(hdev->workqueue, &hdev->tx_work);
3239}
3240
3241/* Send SCO data */
3242void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3243{
3244 struct hci_dev *hdev = conn->hdev;
3245 struct hci_sco_hdr hdr;
3246
3247 BT_DBG("%s len %d", hdev->name, skb->len);
3248
3249 hdr.handle = cpu_to_le16(conn->handle);
3250 hdr.dlen = skb->len;
3251
3252 skb_push(skb, HCI_SCO_HDR_SIZE);
3253 skb_reset_transport_header(skb);
3254 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3255
3256 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3257
3258 skb_queue_tail(&conn->data_q, skb);
3259 queue_work(hdev->workqueue, &hdev->tx_work);
3260}
3261
3262/* Send ISO data */
3263static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3264{
3265 struct hci_iso_hdr *hdr;
3266 int len = skb->len;
3267
3268 skb_push(skb, HCI_ISO_HDR_SIZE);
3269 skb_reset_transport_header(skb);
3270 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3271 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3272 hdr->dlen = cpu_to_le16(len);
3273}
3274
3275static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3276 struct sk_buff *skb)
3277{
3278 struct hci_dev *hdev = conn->hdev;
3279 struct sk_buff *list;
3280 __u16 flags;
3281
3282 skb->len = skb_headlen(skb);
3283 skb->data_len = 0;
3284
3285 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3286
3287 list = skb_shinfo(skb)->frag_list;
3288
3289 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3290 hci_add_iso_hdr(skb, conn->handle, flags);
3291
3292 if (!list) {
3293 /* Non fragmented */
3294 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3295
3296 skb_queue_tail(queue, skb);
3297 } else {
3298 /* Fragmented */
3299 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3300
3301 skb_shinfo(skb)->frag_list = NULL;
3302
3303 __skb_queue_tail(queue, skb);
3304
3305 do {
3306 skb = list; list = list->next;
3307
3308 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3309 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3310 0x00);
3311 hci_add_iso_hdr(skb, conn->handle, flags);
3312
3313 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3314
3315 __skb_queue_tail(queue, skb);
3316 } while (list);
3317 }
3318}
3319
3320void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3321{
3322 struct hci_dev *hdev = conn->hdev;
3323
3324 BT_DBG("%s len %d", hdev->name, skb->len);
3325
3326 hci_queue_iso(conn, &conn->data_q, skb);
3327
3328 queue_work(hdev->workqueue, &hdev->tx_work);
3329}
3330
3331/* ---- HCI TX task (outgoing data) ---- */
3332
3333/* HCI Connection scheduler */
3334static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3335{
3336 struct hci_dev *hdev;
3337 int cnt, q;
3338
3339 if (!conn) {
3340 *quote = 0;
3341 return;
3342 }
3343
3344 hdev = conn->hdev;
3345
3346 switch (conn->type) {
3347 case ACL_LINK:
3348 cnt = hdev->acl_cnt;
3349 break;
3350 case SCO_LINK:
3351 case ESCO_LINK:
3352 cnt = hdev->sco_cnt;
3353 break;
3354 case LE_LINK:
3355 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3356 break;
3357 case ISO_LINK:
3358 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3359 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3360 break;
3361 default:
3362 cnt = 0;
3363 bt_dev_err(hdev, "unknown link type %d", conn->type);
3364 }
3365
3366 q = cnt / num;
3367 *quote = q ? q : 1;
3368}
3369
3370static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3371 int *quote)
3372{
3373 struct hci_conn_hash *h = &hdev->conn_hash;
3374 struct hci_conn *conn = NULL, *c;
3375 unsigned int num = 0, min = ~0;
3376
3377 /* We don't have to lock device here. Connections are always
3378 * added and removed with TX task disabled. */
3379
3380 rcu_read_lock();
3381
3382 list_for_each_entry_rcu(c, &h->list, list) {
3383 if (c->type != type || skb_queue_empty(&c->data_q))
3384 continue;
3385
3386 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3387 continue;
3388
3389 num++;
3390
3391 if (c->sent < min) {
3392 min = c->sent;
3393 conn = c;
3394 }
3395
3396 if (hci_conn_num(hdev, type) == num)
3397 break;
3398 }
3399
3400 rcu_read_unlock();
3401
3402 hci_quote_sent(conn, num, quote);
3403
3404 BT_DBG("conn %p quote %d", conn, *quote);
3405 return conn;
3406}
3407
3408static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3409{
3410 struct hci_conn_hash *h = &hdev->conn_hash;
3411 struct hci_conn *c;
3412
3413 bt_dev_err(hdev, "link tx timeout");
3414
3415 rcu_read_lock();
3416
3417 /* Kill stalled connections */
3418 list_for_each_entry_rcu(c, &h->list, list) {
3419 if (c->type == type && c->sent) {
3420 bt_dev_err(hdev, "killing stalled connection %pMR",
3421 &c->dst);
3422 /* hci_disconnect might sleep, so, we have to release
3423 * the RCU read lock before calling it.
3424 */
3425 rcu_read_unlock();
3426 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3427 rcu_read_lock();
3428 }
3429 }
3430
3431 rcu_read_unlock();
3432}
3433
3434static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3435 int *quote)
3436{
3437 struct hci_conn_hash *h = &hdev->conn_hash;
3438 struct hci_chan *chan = NULL;
3439 unsigned int num = 0, min = ~0, cur_prio = 0;
3440 struct hci_conn *conn;
3441 int conn_num = 0;
3442
3443 BT_DBG("%s", hdev->name);
3444
3445 rcu_read_lock();
3446
3447 list_for_each_entry_rcu(conn, &h->list, list) {
3448 struct hci_chan *tmp;
3449
3450 if (conn->type != type)
3451 continue;
3452
3453 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3454 continue;
3455
3456 conn_num++;
3457
3458 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3459 struct sk_buff *skb;
3460
3461 if (skb_queue_empty(&tmp->data_q))
3462 continue;
3463
3464 skb = skb_peek(&tmp->data_q);
3465 if (skb->priority < cur_prio)
3466 continue;
3467
3468 if (skb->priority > cur_prio) {
3469 num = 0;
3470 min = ~0;
3471 cur_prio = skb->priority;
3472 }
3473
3474 num++;
3475
3476 if (conn->sent < min) {
3477 min = conn->sent;
3478 chan = tmp;
3479 }
3480 }
3481
3482 if (hci_conn_num(hdev, type) == conn_num)
3483 break;
3484 }
3485
3486 rcu_read_unlock();
3487
3488 if (!chan)
3489 return NULL;
3490
3491 hci_quote_sent(chan->conn, num, quote);
3492
3493 BT_DBG("chan %p quote %d", chan, *quote);
3494 return chan;
3495}
3496
3497static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3498{
3499 struct hci_conn_hash *h = &hdev->conn_hash;
3500 struct hci_conn *conn;
3501 int num = 0;
3502
3503 BT_DBG("%s", hdev->name);
3504
3505 rcu_read_lock();
3506
3507 list_for_each_entry_rcu(conn, &h->list, list) {
3508 struct hci_chan *chan;
3509
3510 if (conn->type != type)
3511 continue;
3512
3513 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3514 continue;
3515
3516 num++;
3517
3518 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3519 struct sk_buff *skb;
3520
3521 if (chan->sent) {
3522 chan->sent = 0;
3523 continue;
3524 }
3525
3526 if (skb_queue_empty(&chan->data_q))
3527 continue;
3528
3529 skb = skb_peek(&chan->data_q);
3530 if (skb->priority >= HCI_PRIO_MAX - 1)
3531 continue;
3532
3533 skb->priority = HCI_PRIO_MAX - 1;
3534
3535 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3536 skb->priority);
3537 }
3538
3539 if (hci_conn_num(hdev, type) == num)
3540 break;
3541 }
3542
3543 rcu_read_unlock();
3544
3545}
3546
3547static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3548{
3549 unsigned long last_tx;
3550
3551 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3552 return;
3553
3554 switch (type) {
3555 case LE_LINK:
3556 last_tx = hdev->le_last_tx;
3557 break;
3558 default:
3559 last_tx = hdev->acl_last_tx;
3560 break;
3561 }
3562
3563 /* tx timeout must be longer than maximum link supervision timeout
3564 * (40.9 seconds)
3565 */
3566 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3567 hci_link_tx_to(hdev, type);
3568}
3569
3570/* Schedule SCO */
3571static void hci_sched_sco(struct hci_dev *hdev)
3572{
3573 struct hci_conn *conn;
3574 struct sk_buff *skb;
3575 int quote;
3576
3577 BT_DBG("%s", hdev->name);
3578
3579 if (!hci_conn_num(hdev, SCO_LINK))
3580 return;
3581
3582 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3583 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3584 BT_DBG("skb %p len %d", skb, skb->len);
3585 hci_send_frame(hdev, skb);
3586
3587 conn->sent++;
3588 if (conn->sent == ~0)
3589 conn->sent = 0;
3590 }
3591 }
3592}
3593
3594static void hci_sched_esco(struct hci_dev *hdev)
3595{
3596 struct hci_conn *conn;
3597 struct sk_buff *skb;
3598 int quote;
3599
3600 BT_DBG("%s", hdev->name);
3601
3602 if (!hci_conn_num(hdev, ESCO_LINK))
3603 return;
3604
3605 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3606 "e))) {
3607 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3608 BT_DBG("skb %p len %d", skb, skb->len);
3609 hci_send_frame(hdev, skb);
3610
3611 conn->sent++;
3612 if (conn->sent == ~0)
3613 conn->sent = 0;
3614 }
3615 }
3616}
3617
3618static void hci_sched_acl_pkt(struct hci_dev *hdev)
3619{
3620 unsigned int cnt = hdev->acl_cnt;
3621 struct hci_chan *chan;
3622 struct sk_buff *skb;
3623 int quote;
3624
3625 __check_timeout(hdev, cnt, ACL_LINK);
3626
3627 while (hdev->acl_cnt &&
3628 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3629 u32 priority = (skb_peek(&chan->data_q))->priority;
3630 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3631 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3632 skb->len, skb->priority);
3633
3634 /* Stop if priority has changed */
3635 if (skb->priority < priority)
3636 break;
3637
3638 skb = skb_dequeue(&chan->data_q);
3639
3640 hci_conn_enter_active_mode(chan->conn,
3641 bt_cb(skb)->force_active);
3642
3643 hci_send_frame(hdev, skb);
3644 hdev->acl_last_tx = jiffies;
3645
3646 hdev->acl_cnt--;
3647 chan->sent++;
3648 chan->conn->sent++;
3649
3650 /* Send pending SCO packets right away */
3651 hci_sched_sco(hdev);
3652 hci_sched_esco(hdev);
3653 }
3654 }
3655
3656 if (cnt != hdev->acl_cnt)
3657 hci_prio_recalculate(hdev, ACL_LINK);
3658}
3659
3660static void hci_sched_acl(struct hci_dev *hdev)
3661{
3662 BT_DBG("%s", hdev->name);
3663
3664 /* No ACL link over BR/EDR controller */
3665 if (!hci_conn_num(hdev, ACL_LINK))
3666 return;
3667
3668 hci_sched_acl_pkt(hdev);
3669}
3670
3671static void hci_sched_le(struct hci_dev *hdev)
3672{
3673 struct hci_chan *chan;
3674 struct sk_buff *skb;
3675 int quote, *cnt, tmp;
3676
3677 BT_DBG("%s", hdev->name);
3678
3679 if (!hci_conn_num(hdev, LE_LINK))
3680 return;
3681
3682 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3683
3684 __check_timeout(hdev, *cnt, LE_LINK);
3685
3686 tmp = *cnt;
3687 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3688 u32 priority = (skb_peek(&chan->data_q))->priority;
3689 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3690 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3691 skb->len, skb->priority);
3692
3693 /* Stop if priority has changed */
3694 if (skb->priority < priority)
3695 break;
3696
3697 skb = skb_dequeue(&chan->data_q);
3698
3699 hci_send_frame(hdev, skb);
3700 hdev->le_last_tx = jiffies;
3701
3702 (*cnt)--;
3703 chan->sent++;
3704 chan->conn->sent++;
3705
3706 /* Send pending SCO packets right away */
3707 hci_sched_sco(hdev);
3708 hci_sched_esco(hdev);
3709 }
3710 }
3711
3712 if (*cnt != tmp)
3713 hci_prio_recalculate(hdev, LE_LINK);
3714}
3715
3716/* Schedule CIS */
3717static void hci_sched_iso(struct hci_dev *hdev)
3718{
3719 struct hci_conn *conn;
3720 struct sk_buff *skb;
3721 int quote, *cnt;
3722
3723 BT_DBG("%s", hdev->name);
3724
3725 if (!hci_conn_num(hdev, ISO_LINK))
3726 return;
3727
3728 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3729 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3730 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3731 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3732 BT_DBG("skb %p len %d", skb, skb->len);
3733 hci_send_frame(hdev, skb);
3734
3735 conn->sent++;
3736 if (conn->sent == ~0)
3737 conn->sent = 0;
3738 (*cnt)--;
3739 }
3740 }
3741}
3742
3743static void hci_tx_work(struct work_struct *work)
3744{
3745 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3746 struct sk_buff *skb;
3747
3748 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3749 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3750
3751 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3752 /* Schedule queues and send stuff to HCI driver */
3753 hci_sched_sco(hdev);
3754 hci_sched_esco(hdev);
3755 hci_sched_iso(hdev);
3756 hci_sched_acl(hdev);
3757 hci_sched_le(hdev);
3758 }
3759
3760 /* Send next queued raw (unknown type) packet */
3761 while ((skb = skb_dequeue(&hdev->raw_q)))
3762 hci_send_frame(hdev, skb);
3763}
3764
3765/* ----- HCI RX task (incoming data processing) ----- */
3766
3767/* ACL data packet */
3768static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3769{
3770 struct hci_acl_hdr *hdr;
3771 struct hci_conn *conn;
3772 __u16 handle, flags;
3773
3774 hdr = skb_pull_data(skb, sizeof(*hdr));
3775 if (!hdr) {
3776 bt_dev_err(hdev, "ACL packet too small");
3777 goto drop;
3778 }
3779
3780 handle = __le16_to_cpu(hdr->handle);
3781 flags = hci_flags(handle);
3782 handle = hci_handle(handle);
3783
3784 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3785 handle, flags);
3786
3787 hdev->stat.acl_rx++;
3788
3789 hci_dev_lock(hdev);
3790 conn = hci_conn_hash_lookup_handle(hdev, handle);
3791 hci_dev_unlock(hdev);
3792
3793 if (conn) {
3794 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3795
3796 /* Send to upper protocol */
3797 l2cap_recv_acldata(conn, skb, flags);
3798 return;
3799 } else {
3800 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3801 handle);
3802 }
3803
3804drop:
3805 kfree_skb(skb);
3806}
3807
3808/* SCO data packet */
3809static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3810{
3811 struct hci_sco_hdr *hdr;
3812 struct hci_conn *conn;
3813 __u16 handle, flags;
3814
3815 hdr = skb_pull_data(skb, sizeof(*hdr));
3816 if (!hdr) {
3817 bt_dev_err(hdev, "SCO packet too small");
3818 goto drop;
3819 }
3820
3821 handle = __le16_to_cpu(hdr->handle);
3822 flags = hci_flags(handle);
3823 handle = hci_handle(handle);
3824
3825 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3826 handle, flags);
3827
3828 hdev->stat.sco_rx++;
3829
3830 hci_dev_lock(hdev);
3831 conn = hci_conn_hash_lookup_handle(hdev, handle);
3832 hci_dev_unlock(hdev);
3833
3834 if (conn) {
3835 /* Send to upper protocol */
3836 hci_skb_pkt_status(skb) = flags & 0x03;
3837 sco_recv_scodata(conn, skb);
3838 return;
3839 } else {
3840 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3841 handle);
3842 }
3843
3844drop:
3845 kfree_skb(skb);
3846}
3847
3848static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3849{
3850 struct hci_iso_hdr *hdr;
3851 struct hci_conn *conn;
3852 __u16 handle, flags;
3853
3854 hdr = skb_pull_data(skb, sizeof(*hdr));
3855 if (!hdr) {
3856 bt_dev_err(hdev, "ISO packet too small");
3857 goto drop;
3858 }
3859
3860 handle = __le16_to_cpu(hdr->handle);
3861 flags = hci_flags(handle);
3862 handle = hci_handle(handle);
3863
3864 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3865 handle, flags);
3866
3867 hci_dev_lock(hdev);
3868 conn = hci_conn_hash_lookup_handle(hdev, handle);
3869 hci_dev_unlock(hdev);
3870
3871 if (!conn) {
3872 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3873 handle);
3874 goto drop;
3875 }
3876
3877 /* Send to upper protocol */
3878 iso_recv(conn, skb, flags);
3879 return;
3880
3881drop:
3882 kfree_skb(skb);
3883}
3884
3885static bool hci_req_is_complete(struct hci_dev *hdev)
3886{
3887 struct sk_buff *skb;
3888
3889 skb = skb_peek(&hdev->cmd_q);
3890 if (!skb)
3891 return true;
3892
3893 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3894}
3895
3896static void hci_resend_last(struct hci_dev *hdev)
3897{
3898 struct hci_command_hdr *sent;
3899 struct sk_buff *skb;
3900 u16 opcode;
3901
3902 if (!hdev->sent_cmd)
3903 return;
3904
3905 sent = (void *) hdev->sent_cmd->data;
3906 opcode = __le16_to_cpu(sent->opcode);
3907 if (opcode == HCI_OP_RESET)
3908 return;
3909
3910 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3911 if (!skb)
3912 return;
3913
3914 skb_queue_head(&hdev->cmd_q, skb);
3915 queue_work(hdev->workqueue, &hdev->cmd_work);
3916}
3917
3918void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3919 hci_req_complete_t *req_complete,
3920 hci_req_complete_skb_t *req_complete_skb)
3921{
3922 struct sk_buff *skb;
3923 unsigned long flags;
3924
3925 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3926
3927 /* If the completed command doesn't match the last one that was
3928 * sent we need to do special handling of it.
3929 */
3930 if (!hci_sent_cmd_data(hdev, opcode)) {
3931 /* Some CSR based controllers generate a spontaneous
3932 * reset complete event during init and any pending
3933 * command will never be completed. In such a case we
3934 * need to resend whatever was the last sent
3935 * command.
3936 */
3937 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3938 hci_resend_last(hdev);
3939
3940 return;
3941 }
3942
3943 /* If we reach this point this event matches the last command sent */
3944 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3945
3946 /* If the command succeeded and there's still more commands in
3947 * this request the request is not yet complete.
3948 */
3949 if (!status && !hci_req_is_complete(hdev))
3950 return;
3951
3952 skb = hdev->req_skb;
3953
3954 /* If this was the last command in a request the complete
3955 * callback would be found in hdev->req_skb instead of the
3956 * command queue (hdev->cmd_q).
3957 */
3958 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3959 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3960 return;
3961 }
3962
3963 if (skb && bt_cb(skb)->hci.req_complete) {
3964 *req_complete = bt_cb(skb)->hci.req_complete;
3965 return;
3966 }
3967
3968 /* Remove all pending commands belonging to this request */
3969 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3970 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3971 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3972 __skb_queue_head(&hdev->cmd_q, skb);
3973 break;
3974 }
3975
3976 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3977 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3978 else
3979 *req_complete = bt_cb(skb)->hci.req_complete;
3980 dev_kfree_skb_irq(skb);
3981 }
3982 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3983}
3984
3985static void hci_rx_work(struct work_struct *work)
3986{
3987 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3988 struct sk_buff *skb;
3989
3990 BT_DBG("%s", hdev->name);
3991
3992 /* The kcov_remote functions used for collecting packet parsing
3993 * coverage information from this background thread and associate
3994 * the coverage with the syscall's thread which originally injected
3995 * the packet. This helps fuzzing the kernel.
3996 */
3997 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3998 kcov_remote_start_common(skb_get_kcov_handle(skb));
3999
4000 /* Send copy to monitor */
4001 hci_send_to_monitor(hdev, skb);
4002
4003 if (atomic_read(&hdev->promisc)) {
4004 /* Send copy to the sockets */
4005 hci_send_to_sock(hdev, skb);
4006 }
4007
4008 /* If the device has been opened in HCI_USER_CHANNEL,
4009 * the userspace has exclusive access to device.
4010 * When device is HCI_INIT, we still need to process
4011 * the data packets to the driver in order
4012 * to complete its setup().
4013 */
4014 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4015 !test_bit(HCI_INIT, &hdev->flags)) {
4016 kfree_skb(skb);
4017 continue;
4018 }
4019
4020 if (test_bit(HCI_INIT, &hdev->flags)) {
4021 /* Don't process data packets in this states. */
4022 switch (hci_skb_pkt_type(skb)) {
4023 case HCI_ACLDATA_PKT:
4024 case HCI_SCODATA_PKT:
4025 case HCI_ISODATA_PKT:
4026 kfree_skb(skb);
4027 continue;
4028 }
4029 }
4030
4031 /* Process frame */
4032 switch (hci_skb_pkt_type(skb)) {
4033 case HCI_EVENT_PKT:
4034 BT_DBG("%s Event packet", hdev->name);
4035 hci_event_packet(hdev, skb);
4036 break;
4037
4038 case HCI_ACLDATA_PKT:
4039 BT_DBG("%s ACL data packet", hdev->name);
4040 hci_acldata_packet(hdev, skb);
4041 break;
4042
4043 case HCI_SCODATA_PKT:
4044 BT_DBG("%s SCO data packet", hdev->name);
4045 hci_scodata_packet(hdev, skb);
4046 break;
4047
4048 case HCI_ISODATA_PKT:
4049 BT_DBG("%s ISO data packet", hdev->name);
4050 hci_isodata_packet(hdev, skb);
4051 break;
4052
4053 default:
4054 kfree_skb(skb);
4055 break;
4056 }
4057 }
4058}
4059
4060static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4061{
4062 int err;
4063
4064 bt_dev_dbg(hdev, "skb %p", skb);
4065
4066 kfree_skb(hdev->sent_cmd);
4067
4068 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4069 if (!hdev->sent_cmd) {
4070 skb_queue_head(&hdev->cmd_q, skb);
4071 queue_work(hdev->workqueue, &hdev->cmd_work);
4072 return;
4073 }
4074
4075 err = hci_send_frame(hdev, skb);
4076 if (err < 0) {
4077 hci_cmd_sync_cancel_sync(hdev, -err);
4078 return;
4079 }
4080
4081 if (hdev->req_status == HCI_REQ_PEND &&
4082 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4083 kfree_skb(hdev->req_skb);
4084 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4085 }
4086
4087 atomic_dec(&hdev->cmd_cnt);
4088}
4089
4090static void hci_cmd_work(struct work_struct *work)
4091{
4092 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4093 struct sk_buff *skb;
4094
4095 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4096 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4097
4098 /* Send queued commands */
4099 if (atomic_read(&hdev->cmd_cnt)) {
4100 skb = skb_dequeue(&hdev->cmd_q);
4101 if (!skb)
4102 return;
4103
4104 hci_send_cmd_sync(hdev, skb);
4105
4106 rcu_read_lock();
4107 if (test_bit(HCI_RESET, &hdev->flags) ||
4108 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4109 cancel_delayed_work(&hdev->cmd_timer);
4110 else
4111 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4112 HCI_CMD_TIMEOUT);
4113 rcu_read_unlock();
4114 }
4115}