Loading...
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <linux/sched/signal.h>
25
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
28#include <net/bluetooth/mgmt.h>
29
30#include "smp.h"
31#include "hci_request.h"
32
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
44void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
49bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
54static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
92int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
143 if (err < 0)
144 return ERR_PTR(err);
145
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
148
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
194 unsigned long opt, u32 timeout, u8 *hci_status)
195{
196 struct hci_request req;
197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
224 return 0;
225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
229
230 return err;
231 }
232
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
235
236 if (err == -ERESTARTSYS)
237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
242 if (hci_status)
243 *hci_status = hdev->req_result;
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
250 break;
251
252 default:
253 err = -ETIMEDOUT;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257 }
258
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
270 unsigned long opt, u32 timeout, u8 *hci_status)
271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
278 hci_req_sync_lock(hdev);
279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 hci_req_sync_unlock(hdev);
281
282 return ret;
283}
284
285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
301 skb_put_data(skb, param, plen);
302
303 BT_DBG("skb len %d", skb->len);
304
305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336
337 bt_cb(skb)->hci.req_event = event;
338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
366 type = PAGE_SCAN_TYPE_STANDARD; /* default */
367
368 /* default 1.28 sec page scan */
369 acp.interval = cpu_to_le16(0x0800);
370 }
371
372 acp.window = cpu_to_le16(0x0012);
373
374 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375 __cpu_to_le16(hdev->page_scan_window) != acp.window)
376 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377 sizeof(acp), &acp);
378
379 if (hdev->page_scan_type != type)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381}
382
383/* This function controls the background scanning based on hdev->pend_le_conns
384 * list. If there are pending LE connection we start the background scanning,
385 * otherwise we stop it.
386 *
387 * This function requires the caller holds hdev->lock.
388 */
389static void __hci_update_background_scan(struct hci_request *req)
390{
391 struct hci_dev *hdev = req->hdev;
392
393 if (!test_bit(HCI_UP, &hdev->flags) ||
394 test_bit(HCI_INIT, &hdev->flags) ||
395 hci_dev_test_flag(hdev, HCI_SETUP) ||
396 hci_dev_test_flag(hdev, HCI_CONFIG) ||
397 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398 hci_dev_test_flag(hdev, HCI_UNREGISTER))
399 return;
400
401 /* No point in doing scanning if LE support hasn't been enabled */
402 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403 return;
404
405 /* If discovery is active don't interfere with it */
406 if (hdev->discovery.state != DISCOVERY_STOPPED)
407 return;
408
409 /* Reset RSSI and UUID filters when starting background scanning
410 * since these filters are meant for service discovery only.
411 *
412 * The Start Discovery and Start Service Discovery operations
413 * ensure to set proper values for RSSI threshold and UUID
414 * filter list. So it is safe to just reset them here.
415 */
416 hci_discovery_filter_clear(hdev);
417
418 if (list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports)) {
420 /* If there is no pending LE connections or devices
421 * to be scanned for, we should stop the background
422 * scanning.
423 */
424
425 /* If controller is not scanning we are done. */
426 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
427 return;
428
429 hci_req_add_le_scan_disable(req);
430
431 BT_DBG("%s stopping background scanning", hdev->name);
432 } else {
433 /* If there is at least one pending LE connection, we should
434 * keep the background scan running.
435 */
436
437 /* If controller is connecting, we should not start scanning
438 * since some controllers are not able to scan and connect at
439 * the same time.
440 */
441 if (hci_lookup_le_connect(hdev))
442 return;
443
444 /* If controller is currently scanning, we stop it to ensure we
445 * don't miss any advertising (due to duplicates filter).
446 */
447 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448 hci_req_add_le_scan_disable(req);
449
450 hci_req_add_le_passive_scan(req);
451
452 BT_DBG("%s starting background scanning", hdev->name);
453 }
454}
455
456void __hci_req_update_name(struct hci_request *req)
457{
458 struct hci_dev *hdev = req->hdev;
459 struct hci_cp_write_local_name cp;
460
461 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
462
463 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
464}
465
466#define PNP_INFO_SVCLASS_ID 0x1200
467
468static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
469{
470 u8 *ptr = data, *uuids_start = NULL;
471 struct bt_uuid *uuid;
472
473 if (len < 4)
474 return ptr;
475
476 list_for_each_entry(uuid, &hdev->uuids, list) {
477 u16 uuid16;
478
479 if (uuid->size != 16)
480 continue;
481
482 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
483 if (uuid16 < 0x1100)
484 continue;
485
486 if (uuid16 == PNP_INFO_SVCLASS_ID)
487 continue;
488
489 if (!uuids_start) {
490 uuids_start = ptr;
491 uuids_start[0] = 1;
492 uuids_start[1] = EIR_UUID16_ALL;
493 ptr += 2;
494 }
495
496 /* Stop if not enough space to put next UUID */
497 if ((ptr - data) + sizeof(u16) > len) {
498 uuids_start[1] = EIR_UUID16_SOME;
499 break;
500 }
501
502 *ptr++ = (uuid16 & 0x00ff);
503 *ptr++ = (uuid16 & 0xff00) >> 8;
504 uuids_start[0] += sizeof(uuid16);
505 }
506
507 return ptr;
508}
509
510static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511{
512 u8 *ptr = data, *uuids_start = NULL;
513 struct bt_uuid *uuid;
514
515 if (len < 6)
516 return ptr;
517
518 list_for_each_entry(uuid, &hdev->uuids, list) {
519 if (uuid->size != 32)
520 continue;
521
522 if (!uuids_start) {
523 uuids_start = ptr;
524 uuids_start[0] = 1;
525 uuids_start[1] = EIR_UUID32_ALL;
526 ptr += 2;
527 }
528
529 /* Stop if not enough space to put next UUID */
530 if ((ptr - data) + sizeof(u32) > len) {
531 uuids_start[1] = EIR_UUID32_SOME;
532 break;
533 }
534
535 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
536 ptr += sizeof(u32);
537 uuids_start[0] += sizeof(u32);
538 }
539
540 return ptr;
541}
542
543static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
544{
545 u8 *ptr = data, *uuids_start = NULL;
546 struct bt_uuid *uuid;
547
548 if (len < 18)
549 return ptr;
550
551 list_for_each_entry(uuid, &hdev->uuids, list) {
552 if (uuid->size != 128)
553 continue;
554
555 if (!uuids_start) {
556 uuids_start = ptr;
557 uuids_start[0] = 1;
558 uuids_start[1] = EIR_UUID128_ALL;
559 ptr += 2;
560 }
561
562 /* Stop if not enough space to put next UUID */
563 if ((ptr - data) + 16 > len) {
564 uuids_start[1] = EIR_UUID128_SOME;
565 break;
566 }
567
568 memcpy(ptr, uuid->uuid, 16);
569 ptr += 16;
570 uuids_start[0] += 16;
571 }
572
573 return ptr;
574}
575
576static void create_eir(struct hci_dev *hdev, u8 *data)
577{
578 u8 *ptr = data;
579 size_t name_len;
580
581 name_len = strlen(hdev->dev_name);
582
583 if (name_len > 0) {
584 /* EIR Data type */
585 if (name_len > 48) {
586 name_len = 48;
587 ptr[1] = EIR_NAME_SHORT;
588 } else
589 ptr[1] = EIR_NAME_COMPLETE;
590
591 /* EIR Data length */
592 ptr[0] = name_len + 1;
593
594 memcpy(ptr + 2, hdev->dev_name, name_len);
595
596 ptr += (name_len + 2);
597 }
598
599 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
600 ptr[0] = 2;
601 ptr[1] = EIR_TX_POWER;
602 ptr[2] = (u8) hdev->inq_tx_power;
603
604 ptr += 3;
605 }
606
607 if (hdev->devid_source > 0) {
608 ptr[0] = 9;
609 ptr[1] = EIR_DEVICE_ID;
610
611 put_unaligned_le16(hdev->devid_source, ptr + 2);
612 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613 put_unaligned_le16(hdev->devid_product, ptr + 6);
614 put_unaligned_le16(hdev->devid_version, ptr + 8);
615
616 ptr += 10;
617 }
618
619 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622}
623
624void __hci_req_update_eir(struct hci_request *req)
625{
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_write_eir cp;
628
629 if (!hdev_is_powered(hdev))
630 return;
631
632 if (!lmp_ext_inq_capable(hdev))
633 return;
634
635 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
636 return;
637
638 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
639 return;
640
641 memset(&cp, 0, sizeof(cp));
642
643 create_eir(hdev, cp.data);
644
645 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
646 return;
647
648 memcpy(hdev->eir, cp.data, sizeof(cp.data));
649
650 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651}
652
653void hci_req_add_le_scan_disable(struct hci_request *req)
654{
655 struct hci_dev *hdev = req->hdev;
656
657 if (use_ext_scan(hdev)) {
658 struct hci_cp_le_set_ext_scan_enable cp;
659
660 memset(&cp, 0, sizeof(cp));
661 cp.enable = LE_SCAN_DISABLE;
662 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
663 &cp);
664 } else {
665 struct hci_cp_le_set_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670 }
671}
672
673static void add_to_white_list(struct hci_request *req,
674 struct hci_conn_params *params)
675{
676 struct hci_cp_le_add_to_white_list cp;
677
678 cp.bdaddr_type = params->addr_type;
679 bacpy(&cp.bdaddr, ¶ms->addr);
680
681 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682}
683
684static u8 update_white_list(struct hci_request *req)
685{
686 struct hci_dev *hdev = req->hdev;
687 struct hci_conn_params *params;
688 struct bdaddr_list *b;
689 uint8_t white_list_entries = 0;
690
691 /* Go through the current white list programmed into the
692 * controller one by one and check if that address is still
693 * in the list of pending connections or list of devices to
694 * report. If not present in either list, then queue the
695 * command to remove it from the controller.
696 */
697 list_for_each_entry(b, &hdev->le_white_list, list) {
698 /* If the device is neither in pend_le_conns nor
699 * pend_le_reports then remove it from the whitelist.
700 */
701 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702 &b->bdaddr, b->bdaddr_type) &&
703 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704 &b->bdaddr, b->bdaddr_type)) {
705 struct hci_cp_le_del_from_white_list cp;
706
707 cp.bdaddr_type = b->bdaddr_type;
708 bacpy(&cp.bdaddr, &b->bdaddr);
709
710 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711 sizeof(cp), &cp);
712 continue;
713 }
714
715 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716 /* White list can not be used with RPAs */
717 return 0x00;
718 }
719
720 white_list_entries++;
721 }
722
723 /* Since all no longer valid white list entries have been
724 * removed, walk through the list of pending connections
725 * and ensure that any new device gets programmed into
726 * the controller.
727 *
728 * If the list of the devices is larger than the list of
729 * available white list entries in the controller, then
730 * just abort and return filer policy value to not use the
731 * white list.
732 */
733 list_for_each_entry(params, &hdev->pend_le_conns, action) {
734 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735 ¶ms->addr, params->addr_type))
736 continue;
737
738 if (white_list_entries >= hdev->le_white_list_size) {
739 /* Select filter policy to accept all advertising */
740 return 0x00;
741 }
742
743 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
744 params->addr_type)) {
745 /* White list can not be used with RPAs */
746 return 0x00;
747 }
748
749 white_list_entries++;
750 add_to_white_list(req, params);
751 }
752
753 /* After adding all new pending connections, walk through
754 * the list of pending reports and also add these to the
755 * white list if there is still space.
756 */
757 list_for_each_entry(params, &hdev->pend_le_reports, action) {
758 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759 ¶ms->addr, params->addr_type))
760 continue;
761
762 if (white_list_entries >= hdev->le_white_list_size) {
763 /* Select filter policy to accept all advertising */
764 return 0x00;
765 }
766
767 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
768 params->addr_type)) {
769 /* White list can not be used with RPAs */
770 return 0x00;
771 }
772
773 white_list_entries++;
774 add_to_white_list(req, params);
775 }
776
777 /* Select filter policy to use white list */
778 return 0x01;
779}
780
781static bool scan_use_rpa(struct hci_dev *hdev)
782{
783 return hci_dev_test_flag(hdev, HCI_PRIVACY);
784}
785
786static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787 u16 window, u8 own_addr_type, u8 filter_policy)
788{
789 struct hci_dev *hdev = req->hdev;
790
791 /* Use ext scanning if set ext scan param and ext scan enable is
792 * supported
793 */
794 if (use_ext_scan(hdev)) {
795 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797 struct hci_cp_le_scan_phy_params *phy_params;
798 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
799 u32 plen;
800
801 ext_param_cp = (void *)data;
802 phy_params = (void *)ext_param_cp->data;
803
804 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805 ext_param_cp->own_addr_type = own_addr_type;
806 ext_param_cp->filter_policy = filter_policy;
807
808 plen = sizeof(*ext_param_cp);
809
810 if (scan_1m(hdev) || scan_2m(hdev)) {
811 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
812
813 memset(phy_params, 0, sizeof(*phy_params));
814 phy_params->type = type;
815 phy_params->interval = cpu_to_le16(interval);
816 phy_params->window = cpu_to_le16(window);
817
818 plen += sizeof(*phy_params);
819 phy_params++;
820 }
821
822 if (scan_coded(hdev)) {
823 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
824
825 memset(phy_params, 0, sizeof(*phy_params));
826 phy_params->type = type;
827 phy_params->interval = cpu_to_le16(interval);
828 phy_params->window = cpu_to_le16(window);
829
830 plen += sizeof(*phy_params);
831 phy_params++;
832 }
833
834 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
835 plen, ext_param_cp);
836
837 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838 ext_enable_cp.enable = LE_SCAN_ENABLE;
839 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
840
841 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842 sizeof(ext_enable_cp), &ext_enable_cp);
843 } else {
844 struct hci_cp_le_set_scan_param param_cp;
845 struct hci_cp_le_set_scan_enable enable_cp;
846
847 memset(¶m_cp, 0, sizeof(param_cp));
848 param_cp.type = type;
849 param_cp.interval = cpu_to_le16(interval);
850 param_cp.window = cpu_to_le16(window);
851 param_cp.own_address_type = own_addr_type;
852 param_cp.filter_policy = filter_policy;
853 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
854 ¶m_cp);
855
856 memset(&enable_cp, 0, sizeof(enable_cp));
857 enable_cp.enable = LE_SCAN_ENABLE;
858 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
860 &enable_cp);
861 }
862}
863
864void hci_req_add_le_passive_scan(struct hci_request *req)
865{
866 struct hci_dev *hdev = req->hdev;
867 u8 own_addr_type;
868 u8 filter_policy;
869
870 /* Set require_privacy to false since no SCAN_REQ are send
871 * during passive scanning. Not using an non-resolvable address
872 * here is important so that peer devices using direct
873 * advertising with our address will be correctly reported
874 * by the controller.
875 */
876 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
877 &own_addr_type))
878 return;
879
880 /* Adding or removing entries from the white list must
881 * happen before enabling scanning. The controller does
882 * not allow white list modification while scanning.
883 */
884 filter_policy = update_white_list(req);
885
886 /* When the controller is using random resolvable addresses and
887 * with that having LE privacy enabled, then controllers with
888 * Extended Scanner Filter Policies support can now enable support
889 * for handling directed advertising.
890 *
891 * So instead of using filter polices 0x00 (no whitelist)
892 * and 0x01 (whitelist enabled) use the new filter policies
893 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
894 */
895 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
896 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897 filter_policy |= 0x02;
898
899 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900 hdev->le_scan_window, own_addr_type, filter_policy);
901}
902
903static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
904{
905 struct adv_info *adv_instance;
906
907 /* Ignore instance 0 */
908 if (instance == 0x00)
909 return 0;
910
911 adv_instance = hci_find_adv_instance(hdev, instance);
912 if (!adv_instance)
913 return 0;
914
915 /* TODO: Take into account the "appearance" and "local-name" flags here.
916 * These are currently being ignored as they are not supported.
917 */
918 return adv_instance->scan_rsp_len;
919}
920
921static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
922{
923 u8 instance = hdev->cur_adv_instance;
924 struct adv_info *adv_instance;
925
926 /* Ignore instance 0 */
927 if (instance == 0x00)
928 return 0;
929
930 adv_instance = hci_find_adv_instance(hdev, instance);
931 if (!adv_instance)
932 return 0;
933
934 /* TODO: Take into account the "appearance" and "local-name" flags here.
935 * These are currently being ignored as they are not supported.
936 */
937 return adv_instance->scan_rsp_len;
938}
939
940void __hci_req_disable_advertising(struct hci_request *req)
941{
942 if (ext_adv_capable(req->hdev)) {
943 struct hci_cp_le_set_ext_adv_enable cp;
944
945 cp.enable = 0x00;
946 /* Disable all sets since we only support one set at the moment */
947 cp.num_of_sets = 0x00;
948
949 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
950 } else {
951 u8 enable = 0x00;
952
953 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
954 }
955}
956
957static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
958{
959 u32 flags;
960 struct adv_info *adv_instance;
961
962 if (instance == 0x00) {
963 /* Instance 0 always manages the "Tx Power" and "Flags"
964 * fields
965 */
966 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
967
968 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
969 * corresponds to the "connectable" instance flag.
970 */
971 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972 flags |= MGMT_ADV_FLAG_CONNECTABLE;
973
974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977 flags |= MGMT_ADV_FLAG_DISCOV;
978
979 return flags;
980 }
981
982 adv_instance = hci_find_adv_instance(hdev, instance);
983
984 /* Return 0 when we got an invalid instance identifier. */
985 if (!adv_instance)
986 return 0;
987
988 return adv_instance->flags;
989}
990
991static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
992{
993 /* If privacy is not enabled don't use RPA */
994 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
995 return false;
996
997 /* If basic privacy mode is enabled use RPA */
998 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
999 return true;
1000
1001 /* If limited privacy mode is enabled don't use RPA if we're
1002 * both discoverable and bondable.
1003 */
1004 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005 hci_dev_test_flag(hdev, HCI_BONDABLE))
1006 return false;
1007
1008 /* We're neither bondable nor discoverable in the limited
1009 * privacy mode, therefore use RPA.
1010 */
1011 return true;
1012}
1013
1014static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015{
1016 /* If there is no connection we are OK to advertise. */
1017 if (hci_conn_num(hdev, LE_LINK) == 0)
1018 return true;
1019
1020 /* Check le_states if there is any connection in slave role. */
1021 if (hdev->conn_hash.le_num_slave > 0) {
1022 /* Slave connection state and non connectable mode bit 20. */
1023 if (!connectable && !(hdev->le_states[2] & 0x10))
1024 return false;
1025
1026 /* Slave connection state and connectable mode bit 38
1027 * and scannable bit 21.
1028 */
1029 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030 !(hdev->le_states[2] & 0x20)))
1031 return false;
1032 }
1033
1034 /* Check le_states if there is any connection in master role. */
1035 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036 /* Master connection state and non connectable mode bit 18. */
1037 if (!connectable && !(hdev->le_states[2] & 0x02))
1038 return false;
1039
1040 /* Master connection state and connectable mode bit 35 and
1041 * scannable 19.
1042 */
1043 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1044 !(hdev->le_states[2] & 0x08)))
1045 return false;
1046 }
1047
1048 return true;
1049}
1050
1051void __hci_req_enable_advertising(struct hci_request *req)
1052{
1053 struct hci_dev *hdev = req->hdev;
1054 struct hci_cp_le_set_adv_param cp;
1055 u8 own_addr_type, enable = 0x01;
1056 bool connectable;
1057 u16 adv_min_interval, adv_max_interval;
1058 u32 flags;
1059
1060 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1061
1062 /* If the "connectable" instance flag was not set, then choose between
1063 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1064 */
1065 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1066 mgmt_get_connectable(hdev);
1067
1068 if (!is_advertising_allowed(hdev, connectable))
1069 return;
1070
1071 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1072 __hci_req_disable_advertising(req);
1073
1074 /* Clear the HCI_LE_ADV bit temporarily so that the
1075 * hci_update_random_address knows that it's safe to go ahead
1076 * and write a new random address. The flag will be set back on
1077 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 */
1079 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1080
1081 /* Set require_privacy to true only when non-connectable
1082 * advertising is used. In that case it is fine to use a
1083 * non-resolvable private address.
1084 */
1085 if (hci_update_random_address(req, !connectable,
1086 adv_use_rpa(hdev, flags),
1087 &own_addr_type) < 0)
1088 return;
1089
1090 memset(&cp, 0, sizeof(cp));
1091
1092 if (connectable) {
1093 cp.type = LE_ADV_IND;
1094
1095 adv_min_interval = hdev->le_adv_min_interval;
1096 adv_max_interval = hdev->le_adv_max_interval;
1097 } else {
1098 if (get_cur_adv_instance_scan_rsp_len(hdev))
1099 cp.type = LE_ADV_SCAN_IND;
1100 else
1101 cp.type = LE_ADV_NONCONN_IND;
1102
1103 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1104 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1105 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1106 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1107 } else {
1108 adv_min_interval = hdev->le_adv_min_interval;
1109 adv_max_interval = hdev->le_adv_max_interval;
1110 }
1111 }
1112
1113 cp.min_interval = cpu_to_le16(adv_min_interval);
1114 cp.max_interval = cpu_to_le16(adv_max_interval);
1115 cp.own_address_type = own_addr_type;
1116 cp.channel_map = hdev->le_adv_channel_map;
1117
1118 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1119
1120 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1121}
1122
1123u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1124{
1125 size_t short_len;
1126 size_t complete_len;
1127
1128 /* no space left for name (+ NULL + type + len) */
1129 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1130 return ad_len;
1131
1132 /* use complete name if present and fits */
1133 complete_len = strlen(hdev->dev_name);
1134 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1135 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1136 hdev->dev_name, complete_len + 1);
1137
1138 /* use short name if present */
1139 short_len = strlen(hdev->short_name);
1140 if (short_len)
1141 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1142 hdev->short_name, short_len + 1);
1143
1144 /* use shortened full name if present, we already know that name
1145 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1146 */
1147 if (complete_len) {
1148 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1149
1150 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1151 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1152
1153 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1154 sizeof(name));
1155 }
1156
1157 return ad_len;
1158}
1159
1160static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1161{
1162 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1163}
1164
1165static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1166{
1167 u8 scan_rsp_len = 0;
1168
1169 if (hdev->appearance) {
1170 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1171 }
1172
1173 return append_local_name(hdev, ptr, scan_rsp_len);
1174}
1175
1176static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1177 u8 *ptr)
1178{
1179 struct adv_info *adv_instance;
1180 u32 instance_flags;
1181 u8 scan_rsp_len = 0;
1182
1183 adv_instance = hci_find_adv_instance(hdev, instance);
1184 if (!adv_instance)
1185 return 0;
1186
1187 instance_flags = adv_instance->flags;
1188
1189 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1190 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1191 }
1192
1193 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1194 adv_instance->scan_rsp_len);
1195
1196 scan_rsp_len += adv_instance->scan_rsp_len;
1197
1198 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1199 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1200
1201 return scan_rsp_len;
1202}
1203
1204void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1205{
1206 struct hci_dev *hdev = req->hdev;
1207 u8 len;
1208
1209 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1210 return;
1211
1212 if (ext_adv_capable(hdev)) {
1213 struct hci_cp_le_set_ext_scan_rsp_data cp;
1214
1215 memset(&cp, 0, sizeof(cp));
1216
1217 if (instance)
1218 len = create_instance_scan_rsp_data(hdev, instance,
1219 cp.data);
1220 else
1221 len = create_default_scan_rsp_data(hdev, cp.data);
1222
1223 if (hdev->scan_rsp_data_len == len &&
1224 !memcmp(cp.data, hdev->scan_rsp_data, len))
1225 return;
1226
1227 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1228 hdev->scan_rsp_data_len = len;
1229
1230 cp.handle = 0;
1231 cp.length = len;
1232 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1233 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1234
1235 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1236 &cp);
1237 } else {
1238 struct hci_cp_le_set_scan_rsp_data cp;
1239
1240 memset(&cp, 0, sizeof(cp));
1241
1242 if (instance)
1243 len = create_instance_scan_rsp_data(hdev, instance,
1244 cp.data);
1245 else
1246 len = create_default_scan_rsp_data(hdev, cp.data);
1247
1248 if (hdev->scan_rsp_data_len == len &&
1249 !memcmp(cp.data, hdev->scan_rsp_data, len))
1250 return;
1251
1252 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1253 hdev->scan_rsp_data_len = len;
1254
1255 cp.length = len;
1256
1257 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1258 }
1259}
1260
1261static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1262{
1263 struct adv_info *adv_instance = NULL;
1264 u8 ad_len = 0, flags = 0;
1265 u32 instance_flags;
1266
1267 /* Return 0 when the current instance identifier is invalid. */
1268 if (instance) {
1269 adv_instance = hci_find_adv_instance(hdev, instance);
1270 if (!adv_instance)
1271 return 0;
1272 }
1273
1274 instance_flags = get_adv_instance_flags(hdev, instance);
1275
1276 /* The Add Advertising command allows userspace to set both the general
1277 * and limited discoverable flags.
1278 */
1279 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1280 flags |= LE_AD_GENERAL;
1281
1282 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1283 flags |= LE_AD_LIMITED;
1284
1285 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1286 flags |= LE_AD_NO_BREDR;
1287
1288 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1289 /* If a discovery flag wasn't provided, simply use the global
1290 * settings.
1291 */
1292 if (!flags)
1293 flags |= mgmt_get_adv_discov_flags(hdev);
1294
1295 /* If flags would still be empty, then there is no need to
1296 * include the "Flags" AD field".
1297 */
1298 if (flags) {
1299 ptr[0] = 0x02;
1300 ptr[1] = EIR_FLAGS;
1301 ptr[2] = flags;
1302
1303 ad_len += 3;
1304 ptr += 3;
1305 }
1306 }
1307
1308 if (adv_instance) {
1309 memcpy(ptr, adv_instance->adv_data,
1310 adv_instance->adv_data_len);
1311 ad_len += adv_instance->adv_data_len;
1312 ptr += adv_instance->adv_data_len;
1313 }
1314
1315 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1316 s8 adv_tx_power;
1317
1318 if (ext_adv_capable(hdev)) {
1319 if (adv_instance)
1320 adv_tx_power = adv_instance->tx_power;
1321 else
1322 adv_tx_power = hdev->adv_tx_power;
1323 } else {
1324 adv_tx_power = hdev->adv_tx_power;
1325 }
1326
1327 /* Provide Tx Power only if we can provide a valid value for it */
1328 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1329 ptr[0] = 0x02;
1330 ptr[1] = EIR_TX_POWER;
1331 ptr[2] = (u8)adv_tx_power;
1332
1333 ad_len += 3;
1334 ptr += 3;
1335 }
1336 }
1337
1338 return ad_len;
1339}
1340
1341void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1342{
1343 struct hci_dev *hdev = req->hdev;
1344 u8 len;
1345
1346 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347 return;
1348
1349 if (ext_adv_capable(hdev)) {
1350 struct hci_cp_le_set_ext_adv_data cp;
1351
1352 memset(&cp, 0, sizeof(cp));
1353
1354 len = create_instance_adv_data(hdev, instance, cp.data);
1355
1356 /* There's nothing to do if the data hasn't changed */
1357 if (hdev->adv_data_len == len &&
1358 memcmp(cp.data, hdev->adv_data, len) == 0)
1359 return;
1360
1361 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1362 hdev->adv_data_len = len;
1363
1364 cp.length = len;
1365 cp.handle = 0;
1366 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1367 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1368
1369 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1370 } else {
1371 struct hci_cp_le_set_adv_data cp;
1372
1373 memset(&cp, 0, sizeof(cp));
1374
1375 len = create_instance_adv_data(hdev, instance, cp.data);
1376
1377 /* There's nothing to do if the data hasn't changed */
1378 if (hdev->adv_data_len == len &&
1379 memcmp(cp.data, hdev->adv_data, len) == 0)
1380 return;
1381
1382 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1383 hdev->adv_data_len = len;
1384
1385 cp.length = len;
1386
1387 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1388 }
1389}
1390
1391int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1392{
1393 struct hci_request req;
1394
1395 hci_req_init(&req, hdev);
1396 __hci_req_update_adv_data(&req, instance);
1397
1398 return hci_req_run(&req, NULL);
1399}
1400
1401static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1402{
1403 BT_DBG("%s status %u", hdev->name, status);
1404}
1405
1406void hci_req_reenable_advertising(struct hci_dev *hdev)
1407{
1408 struct hci_request req;
1409
1410 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1411 list_empty(&hdev->adv_instances))
1412 return;
1413
1414 hci_req_init(&req, hdev);
1415
1416 if (hdev->cur_adv_instance) {
1417 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1418 true);
1419 } else {
1420 if (ext_adv_capable(hdev)) {
1421 __hci_req_start_ext_adv(&req, 0x00);
1422 } else {
1423 __hci_req_update_adv_data(&req, 0x00);
1424 __hci_req_update_scan_rsp_data(&req, 0x00);
1425 __hci_req_enable_advertising(&req);
1426 }
1427 }
1428
1429 hci_req_run(&req, adv_enable_complete);
1430}
1431
1432static void adv_timeout_expire(struct work_struct *work)
1433{
1434 struct hci_dev *hdev = container_of(work, struct hci_dev,
1435 adv_instance_expire.work);
1436
1437 struct hci_request req;
1438 u8 instance;
1439
1440 BT_DBG("%s", hdev->name);
1441
1442 hci_dev_lock(hdev);
1443
1444 hdev->adv_instance_timeout = 0;
1445
1446 instance = hdev->cur_adv_instance;
1447 if (instance == 0x00)
1448 goto unlock;
1449
1450 hci_req_init(&req, hdev);
1451
1452 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1453
1454 if (list_empty(&hdev->adv_instances))
1455 __hci_req_disable_advertising(&req);
1456
1457 hci_req_run(&req, NULL);
1458
1459unlock:
1460 hci_dev_unlock(hdev);
1461}
1462
1463int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1464 bool use_rpa, struct adv_info *adv_instance,
1465 u8 *own_addr_type, bdaddr_t *rand_addr)
1466{
1467 int err;
1468
1469 bacpy(rand_addr, BDADDR_ANY);
1470
1471 /* If privacy is enabled use a resolvable private address. If
1472 * current RPA has expired then generate a new one.
1473 */
1474 if (use_rpa) {
1475 int to;
1476
1477 *own_addr_type = ADDR_LE_DEV_RANDOM;
1478
1479 if (adv_instance) {
1480 if (!adv_instance->rpa_expired &&
1481 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1482 return 0;
1483
1484 adv_instance->rpa_expired = false;
1485 } else {
1486 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1487 !bacmp(&hdev->random_addr, &hdev->rpa))
1488 return 0;
1489 }
1490
1491 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1492 if (err < 0) {
1493 BT_ERR("%s failed to generate new RPA", hdev->name);
1494 return err;
1495 }
1496
1497 bacpy(rand_addr, &hdev->rpa);
1498
1499 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1500 if (adv_instance)
1501 queue_delayed_work(hdev->workqueue,
1502 &adv_instance->rpa_expired_cb, to);
1503 else
1504 queue_delayed_work(hdev->workqueue,
1505 &hdev->rpa_expired, to);
1506
1507 return 0;
1508 }
1509
1510 /* In case of required privacy without resolvable private address,
1511 * use an non-resolvable private address. This is useful for
1512 * non-connectable advertising.
1513 */
1514 if (require_privacy) {
1515 bdaddr_t nrpa;
1516
1517 while (true) {
1518 /* The non-resolvable private address is generated
1519 * from random six bytes with the two most significant
1520 * bits cleared.
1521 */
1522 get_random_bytes(&nrpa, 6);
1523 nrpa.b[5] &= 0x3f;
1524
1525 /* The non-resolvable private address shall not be
1526 * equal to the public address.
1527 */
1528 if (bacmp(&hdev->bdaddr, &nrpa))
1529 break;
1530 }
1531
1532 *own_addr_type = ADDR_LE_DEV_RANDOM;
1533 bacpy(rand_addr, &nrpa);
1534
1535 return 0;
1536 }
1537
1538 /* No privacy so use a public address. */
1539 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1540
1541 return 0;
1542}
1543
1544void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1545{
1546 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1547}
1548
1549int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1550{
1551 struct hci_cp_le_set_ext_adv_params cp;
1552 struct hci_dev *hdev = req->hdev;
1553 bool connectable;
1554 u32 flags;
1555 bdaddr_t random_addr;
1556 u8 own_addr_type;
1557 int err;
1558 struct adv_info *adv_instance;
1559 bool secondary_adv;
1560 /* In ext adv set param interval is 3 octets */
1561 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1562
1563 if (instance > 0) {
1564 adv_instance = hci_find_adv_instance(hdev, instance);
1565 if (!adv_instance)
1566 return -EINVAL;
1567 } else {
1568 adv_instance = NULL;
1569 }
1570
1571 flags = get_adv_instance_flags(hdev, instance);
1572
1573 /* If the "connectable" instance flag was not set, then choose between
1574 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1575 */
1576 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1577 mgmt_get_connectable(hdev);
1578
1579 if (!is_advertising_allowed(hdev, connectable))
1580 return -EPERM;
1581
1582 /* Set require_privacy to true only when non-connectable
1583 * advertising is used. In that case it is fine to use a
1584 * non-resolvable private address.
1585 */
1586 err = hci_get_random_address(hdev, !connectable,
1587 adv_use_rpa(hdev, flags), adv_instance,
1588 &own_addr_type, &random_addr);
1589 if (err < 0)
1590 return err;
1591
1592 memset(&cp, 0, sizeof(cp));
1593
1594 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1595 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1596
1597 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1598
1599 if (connectable) {
1600 if (secondary_adv)
1601 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1602 else
1603 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1604 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1605 if (secondary_adv)
1606 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1607 else
1608 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1609 } else {
1610 if (secondary_adv)
1611 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1612 else
1613 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1614 }
1615
1616 cp.own_addr_type = own_addr_type;
1617 cp.channel_map = hdev->le_adv_channel_map;
1618 cp.tx_power = 127;
1619 cp.handle = instance;
1620
1621 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1622 cp.primary_phy = HCI_ADV_PHY_1M;
1623 cp.secondary_phy = HCI_ADV_PHY_2M;
1624 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1625 cp.primary_phy = HCI_ADV_PHY_CODED;
1626 cp.secondary_phy = HCI_ADV_PHY_CODED;
1627 } else {
1628 /* In all other cases use 1M */
1629 cp.primary_phy = HCI_ADV_PHY_1M;
1630 cp.secondary_phy = HCI_ADV_PHY_1M;
1631 }
1632
1633 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1634
1635 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1636 bacmp(&random_addr, BDADDR_ANY)) {
1637 struct hci_cp_le_set_adv_set_rand_addr cp;
1638
1639 /* Check if random address need to be updated */
1640 if (adv_instance) {
1641 if (!bacmp(&random_addr, &adv_instance->random_addr))
1642 return 0;
1643 } else {
1644 if (!bacmp(&random_addr, &hdev->random_addr))
1645 return 0;
1646 }
1647
1648 memset(&cp, 0, sizeof(cp));
1649
1650 cp.handle = 0;
1651 bacpy(&cp.bdaddr, &random_addr);
1652
1653 hci_req_add(req,
1654 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1655 sizeof(cp), &cp);
1656 }
1657
1658 return 0;
1659}
1660
1661int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1662{
1663 struct hci_dev *hdev = req->hdev;
1664 struct hci_cp_le_set_ext_adv_enable *cp;
1665 struct hci_cp_ext_adv_set *adv_set;
1666 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1667 struct adv_info *adv_instance;
1668
1669 if (instance > 0) {
1670 adv_instance = hci_find_adv_instance(hdev, instance);
1671 if (!adv_instance)
1672 return -EINVAL;
1673 } else {
1674 adv_instance = NULL;
1675 }
1676
1677 cp = (void *) data;
1678 adv_set = (void *) cp->data;
1679
1680 memset(cp, 0, sizeof(*cp));
1681
1682 cp->enable = 0x01;
1683 cp->num_of_sets = 0x01;
1684
1685 memset(adv_set, 0, sizeof(*adv_set));
1686
1687 adv_set->handle = instance;
1688
1689 /* Set duration per instance since controller is responsible for
1690 * scheduling it.
1691 */
1692 if (adv_instance && adv_instance->duration) {
1693 u16 duration = adv_instance->duration * MSEC_PER_SEC;
1694
1695 /* Time = N * 10 ms */
1696 adv_set->duration = cpu_to_le16(duration / 10);
1697 }
1698
1699 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1700 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1701 data);
1702
1703 return 0;
1704}
1705
1706int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1707{
1708 struct hci_dev *hdev = req->hdev;
1709 int err;
1710
1711 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1712 __hci_req_disable_advertising(req);
1713
1714 err = __hci_req_setup_ext_adv_instance(req, instance);
1715 if (err < 0)
1716 return err;
1717
1718 __hci_req_update_scan_rsp_data(req, instance);
1719 __hci_req_enable_ext_advertising(req, instance);
1720
1721 return 0;
1722}
1723
1724int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1725 bool force)
1726{
1727 struct hci_dev *hdev = req->hdev;
1728 struct adv_info *adv_instance = NULL;
1729 u16 timeout;
1730
1731 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1732 list_empty(&hdev->adv_instances))
1733 return -EPERM;
1734
1735 if (hdev->adv_instance_timeout)
1736 return -EBUSY;
1737
1738 adv_instance = hci_find_adv_instance(hdev, instance);
1739 if (!adv_instance)
1740 return -ENOENT;
1741
1742 /* A zero timeout means unlimited advertising. As long as there is
1743 * only one instance, duration should be ignored. We still set a timeout
1744 * in case further instances are being added later on.
1745 *
1746 * If the remaining lifetime of the instance is more than the duration
1747 * then the timeout corresponds to the duration, otherwise it will be
1748 * reduced to the remaining instance lifetime.
1749 */
1750 if (adv_instance->timeout == 0 ||
1751 adv_instance->duration <= adv_instance->remaining_time)
1752 timeout = adv_instance->duration;
1753 else
1754 timeout = adv_instance->remaining_time;
1755
1756 /* The remaining time is being reduced unless the instance is being
1757 * advertised without time limit.
1758 */
1759 if (adv_instance->timeout)
1760 adv_instance->remaining_time =
1761 adv_instance->remaining_time - timeout;
1762
1763 /* Only use work for scheduling instances with legacy advertising */
1764 if (!ext_adv_capable(hdev)) {
1765 hdev->adv_instance_timeout = timeout;
1766 queue_delayed_work(hdev->req_workqueue,
1767 &hdev->adv_instance_expire,
1768 msecs_to_jiffies(timeout * 1000));
1769 }
1770
1771 /* If we're just re-scheduling the same instance again then do not
1772 * execute any HCI commands. This happens when a single instance is
1773 * being advertised.
1774 */
1775 if (!force && hdev->cur_adv_instance == instance &&
1776 hci_dev_test_flag(hdev, HCI_LE_ADV))
1777 return 0;
1778
1779 hdev->cur_adv_instance = instance;
1780 if (ext_adv_capable(hdev)) {
1781 __hci_req_start_ext_adv(req, instance);
1782 } else {
1783 __hci_req_update_adv_data(req, instance);
1784 __hci_req_update_scan_rsp_data(req, instance);
1785 __hci_req_enable_advertising(req);
1786 }
1787
1788 return 0;
1789}
1790
1791static void cancel_adv_timeout(struct hci_dev *hdev)
1792{
1793 if (hdev->adv_instance_timeout) {
1794 hdev->adv_instance_timeout = 0;
1795 cancel_delayed_work(&hdev->adv_instance_expire);
1796 }
1797}
1798
1799/* For a single instance:
1800 * - force == true: The instance will be removed even when its remaining
1801 * lifetime is not zero.
1802 * - force == false: the instance will be deactivated but kept stored unless
1803 * the remaining lifetime is zero.
1804 *
1805 * For instance == 0x00:
1806 * - force == true: All instances will be removed regardless of their timeout
1807 * setting.
1808 * - force == false: Only instances that have a timeout will be removed.
1809 */
1810void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1811 struct hci_request *req, u8 instance,
1812 bool force)
1813{
1814 struct adv_info *adv_instance, *n, *next_instance = NULL;
1815 int err;
1816 u8 rem_inst;
1817
1818 /* Cancel any timeout concerning the removed instance(s). */
1819 if (!instance || hdev->cur_adv_instance == instance)
1820 cancel_adv_timeout(hdev);
1821
1822 /* Get the next instance to advertise BEFORE we remove
1823 * the current one. This can be the same instance again
1824 * if there is only one instance.
1825 */
1826 if (instance && hdev->cur_adv_instance == instance)
1827 next_instance = hci_get_next_instance(hdev, instance);
1828
1829 if (instance == 0x00) {
1830 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1831 list) {
1832 if (!(force || adv_instance->timeout))
1833 continue;
1834
1835 rem_inst = adv_instance->instance;
1836 err = hci_remove_adv_instance(hdev, rem_inst);
1837 if (!err)
1838 mgmt_advertising_removed(sk, hdev, rem_inst);
1839 }
1840 } else {
1841 adv_instance = hci_find_adv_instance(hdev, instance);
1842
1843 if (force || (adv_instance && adv_instance->timeout &&
1844 !adv_instance->remaining_time)) {
1845 /* Don't advertise a removed instance. */
1846 if (next_instance &&
1847 next_instance->instance == instance)
1848 next_instance = NULL;
1849
1850 err = hci_remove_adv_instance(hdev, instance);
1851 if (!err)
1852 mgmt_advertising_removed(sk, hdev, instance);
1853 }
1854 }
1855
1856 if (!req || !hdev_is_powered(hdev) ||
1857 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1858 return;
1859
1860 if (next_instance)
1861 __hci_req_schedule_adv_instance(req, next_instance->instance,
1862 false);
1863}
1864
1865static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1866{
1867 struct hci_dev *hdev = req->hdev;
1868
1869 /* If we're advertising or initiating an LE connection we can't
1870 * go ahead and change the random address at this time. This is
1871 * because the eventual initiator address used for the
1872 * subsequently created connection will be undefined (some
1873 * controllers use the new address and others the one we had
1874 * when the operation started).
1875 *
1876 * In this kind of scenario skip the update and let the random
1877 * address be updated at the next cycle.
1878 */
1879 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1880 hci_lookup_le_connect(hdev)) {
1881 BT_DBG("Deferring random address update");
1882 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1883 return;
1884 }
1885
1886 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1887}
1888
1889int hci_update_random_address(struct hci_request *req, bool require_privacy,
1890 bool use_rpa, u8 *own_addr_type)
1891{
1892 struct hci_dev *hdev = req->hdev;
1893 int err;
1894
1895 /* If privacy is enabled use a resolvable private address. If
1896 * current RPA has expired or there is something else than
1897 * the current RPA in use, then generate a new one.
1898 */
1899 if (use_rpa) {
1900 int to;
1901
1902 *own_addr_type = ADDR_LE_DEV_RANDOM;
1903
1904 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1905 !bacmp(&hdev->random_addr, &hdev->rpa))
1906 return 0;
1907
1908 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1909 if (err < 0) {
1910 bt_dev_err(hdev, "failed to generate new RPA");
1911 return err;
1912 }
1913
1914 set_random_addr(req, &hdev->rpa);
1915
1916 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1917 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1918
1919 return 0;
1920 }
1921
1922 /* In case of required privacy without resolvable private address,
1923 * use an non-resolvable private address. This is useful for active
1924 * scanning and non-connectable advertising.
1925 */
1926 if (require_privacy) {
1927 bdaddr_t nrpa;
1928
1929 while (true) {
1930 /* The non-resolvable private address is generated
1931 * from random six bytes with the two most significant
1932 * bits cleared.
1933 */
1934 get_random_bytes(&nrpa, 6);
1935 nrpa.b[5] &= 0x3f;
1936
1937 /* The non-resolvable private address shall not be
1938 * equal to the public address.
1939 */
1940 if (bacmp(&hdev->bdaddr, &nrpa))
1941 break;
1942 }
1943
1944 *own_addr_type = ADDR_LE_DEV_RANDOM;
1945 set_random_addr(req, &nrpa);
1946 return 0;
1947 }
1948
1949 /* If forcing static address is in use or there is no public
1950 * address use the static address as random address (but skip
1951 * the HCI command if the current random address is already the
1952 * static one.
1953 *
1954 * In case BR/EDR has been disabled on a dual-mode controller
1955 * and a static address has been configured, then use that
1956 * address instead of the public BR/EDR address.
1957 */
1958 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1959 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1960 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1961 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1962 *own_addr_type = ADDR_LE_DEV_RANDOM;
1963 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1964 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1965 &hdev->static_addr);
1966 return 0;
1967 }
1968
1969 /* Neither privacy nor static address is being used so use a
1970 * public address.
1971 */
1972 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1973
1974 return 0;
1975}
1976
1977static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1978{
1979 struct bdaddr_list *b;
1980
1981 list_for_each_entry(b, &hdev->whitelist, list) {
1982 struct hci_conn *conn;
1983
1984 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1985 if (!conn)
1986 return true;
1987
1988 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1989 return true;
1990 }
1991
1992 return false;
1993}
1994
1995void __hci_req_update_scan(struct hci_request *req)
1996{
1997 struct hci_dev *hdev = req->hdev;
1998 u8 scan;
1999
2000 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2001 return;
2002
2003 if (!hdev_is_powered(hdev))
2004 return;
2005
2006 if (mgmt_powering_down(hdev))
2007 return;
2008
2009 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2010 disconnected_whitelist_entries(hdev))
2011 scan = SCAN_PAGE;
2012 else
2013 scan = SCAN_DISABLED;
2014
2015 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2016 scan |= SCAN_INQUIRY;
2017
2018 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2019 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2020 return;
2021
2022 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2023}
2024
2025static int update_scan(struct hci_request *req, unsigned long opt)
2026{
2027 hci_dev_lock(req->hdev);
2028 __hci_req_update_scan(req);
2029 hci_dev_unlock(req->hdev);
2030 return 0;
2031}
2032
2033static void scan_update_work(struct work_struct *work)
2034{
2035 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2036
2037 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2038}
2039
2040static int connectable_update(struct hci_request *req, unsigned long opt)
2041{
2042 struct hci_dev *hdev = req->hdev;
2043
2044 hci_dev_lock(hdev);
2045
2046 __hci_req_update_scan(req);
2047
2048 /* If BR/EDR is not enabled and we disable advertising as a
2049 * by-product of disabling connectable, we need to update the
2050 * advertising flags.
2051 */
2052 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2053 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2054
2055 /* Update the advertising parameters if necessary */
2056 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2057 !list_empty(&hdev->adv_instances)) {
2058 if (ext_adv_capable(hdev))
2059 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2060 else
2061 __hci_req_enable_advertising(req);
2062 }
2063
2064 __hci_update_background_scan(req);
2065
2066 hci_dev_unlock(hdev);
2067
2068 return 0;
2069}
2070
2071static void connectable_update_work(struct work_struct *work)
2072{
2073 struct hci_dev *hdev = container_of(work, struct hci_dev,
2074 connectable_update);
2075 u8 status;
2076
2077 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2078 mgmt_set_connectable_complete(hdev, status);
2079}
2080
2081static u8 get_service_classes(struct hci_dev *hdev)
2082{
2083 struct bt_uuid *uuid;
2084 u8 val = 0;
2085
2086 list_for_each_entry(uuid, &hdev->uuids, list)
2087 val |= uuid->svc_hint;
2088
2089 return val;
2090}
2091
2092void __hci_req_update_class(struct hci_request *req)
2093{
2094 struct hci_dev *hdev = req->hdev;
2095 u8 cod[3];
2096
2097 BT_DBG("%s", hdev->name);
2098
2099 if (!hdev_is_powered(hdev))
2100 return;
2101
2102 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2103 return;
2104
2105 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2106 return;
2107
2108 cod[0] = hdev->minor_class;
2109 cod[1] = hdev->major_class;
2110 cod[2] = get_service_classes(hdev);
2111
2112 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2113 cod[1] |= 0x20;
2114
2115 if (memcmp(cod, hdev->dev_class, 3) == 0)
2116 return;
2117
2118 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2119}
2120
2121static void write_iac(struct hci_request *req)
2122{
2123 struct hci_dev *hdev = req->hdev;
2124 struct hci_cp_write_current_iac_lap cp;
2125
2126 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2127 return;
2128
2129 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2130 /* Limited discoverable mode */
2131 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2132 cp.iac_lap[0] = 0x00; /* LIAC */
2133 cp.iac_lap[1] = 0x8b;
2134 cp.iac_lap[2] = 0x9e;
2135 cp.iac_lap[3] = 0x33; /* GIAC */
2136 cp.iac_lap[4] = 0x8b;
2137 cp.iac_lap[5] = 0x9e;
2138 } else {
2139 /* General discoverable mode */
2140 cp.num_iac = 1;
2141 cp.iac_lap[0] = 0x33; /* GIAC */
2142 cp.iac_lap[1] = 0x8b;
2143 cp.iac_lap[2] = 0x9e;
2144 }
2145
2146 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2147 (cp.num_iac * 3) + 1, &cp);
2148}
2149
2150static int discoverable_update(struct hci_request *req, unsigned long opt)
2151{
2152 struct hci_dev *hdev = req->hdev;
2153
2154 hci_dev_lock(hdev);
2155
2156 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2157 write_iac(req);
2158 __hci_req_update_scan(req);
2159 __hci_req_update_class(req);
2160 }
2161
2162 /* Advertising instances don't use the global discoverable setting, so
2163 * only update AD if advertising was enabled using Set Advertising.
2164 */
2165 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2166 __hci_req_update_adv_data(req, 0x00);
2167
2168 /* Discoverable mode affects the local advertising
2169 * address in limited privacy mode.
2170 */
2171 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2172 if (ext_adv_capable(hdev))
2173 __hci_req_start_ext_adv(req, 0x00);
2174 else
2175 __hci_req_enable_advertising(req);
2176 }
2177 }
2178
2179 hci_dev_unlock(hdev);
2180
2181 return 0;
2182}
2183
2184static void discoverable_update_work(struct work_struct *work)
2185{
2186 struct hci_dev *hdev = container_of(work, struct hci_dev,
2187 discoverable_update);
2188 u8 status;
2189
2190 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2191 mgmt_set_discoverable_complete(hdev, status);
2192}
2193
2194void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2195 u8 reason)
2196{
2197 switch (conn->state) {
2198 case BT_CONNECTED:
2199 case BT_CONFIG:
2200 if (conn->type == AMP_LINK) {
2201 struct hci_cp_disconn_phy_link cp;
2202
2203 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2204 cp.reason = reason;
2205 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2206 &cp);
2207 } else {
2208 struct hci_cp_disconnect dc;
2209
2210 dc.handle = cpu_to_le16(conn->handle);
2211 dc.reason = reason;
2212 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2213 }
2214
2215 conn->state = BT_DISCONN;
2216
2217 break;
2218 case BT_CONNECT:
2219 if (conn->type == LE_LINK) {
2220 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2221 break;
2222 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2223 0, NULL);
2224 } else if (conn->type == ACL_LINK) {
2225 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2226 break;
2227 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2228 6, &conn->dst);
2229 }
2230 break;
2231 case BT_CONNECT2:
2232 if (conn->type == ACL_LINK) {
2233 struct hci_cp_reject_conn_req rej;
2234
2235 bacpy(&rej.bdaddr, &conn->dst);
2236 rej.reason = reason;
2237
2238 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2239 sizeof(rej), &rej);
2240 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2241 struct hci_cp_reject_sync_conn_req rej;
2242
2243 bacpy(&rej.bdaddr, &conn->dst);
2244
2245 /* SCO rejection has its own limited set of
2246 * allowed error values (0x0D-0x0F) which isn't
2247 * compatible with most values passed to this
2248 * function. To be safe hard-code one of the
2249 * values that's suitable for SCO.
2250 */
2251 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2252
2253 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2254 sizeof(rej), &rej);
2255 }
2256 break;
2257 default:
2258 conn->state = BT_CLOSED;
2259 break;
2260 }
2261}
2262
2263static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2264{
2265 if (status)
2266 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2267}
2268
2269int hci_abort_conn(struct hci_conn *conn, u8 reason)
2270{
2271 struct hci_request req;
2272 int err;
2273
2274 hci_req_init(&req, conn->hdev);
2275
2276 __hci_abort_conn(&req, conn, reason);
2277
2278 err = hci_req_run(&req, abort_conn_complete);
2279 if (err && err != -ENODATA) {
2280 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2281 return err;
2282 }
2283
2284 return 0;
2285}
2286
2287static int update_bg_scan(struct hci_request *req, unsigned long opt)
2288{
2289 hci_dev_lock(req->hdev);
2290 __hci_update_background_scan(req);
2291 hci_dev_unlock(req->hdev);
2292 return 0;
2293}
2294
2295static void bg_scan_update(struct work_struct *work)
2296{
2297 struct hci_dev *hdev = container_of(work, struct hci_dev,
2298 bg_scan_update);
2299 struct hci_conn *conn;
2300 u8 status;
2301 int err;
2302
2303 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2304 if (!err)
2305 return;
2306
2307 hci_dev_lock(hdev);
2308
2309 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2310 if (conn)
2311 hci_le_conn_failed(conn, status);
2312
2313 hci_dev_unlock(hdev);
2314}
2315
2316static int le_scan_disable(struct hci_request *req, unsigned long opt)
2317{
2318 hci_req_add_le_scan_disable(req);
2319 return 0;
2320}
2321
2322static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2323{
2324 u8 length = opt;
2325 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2326 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2327 struct hci_cp_inquiry cp;
2328
2329 BT_DBG("%s", req->hdev->name);
2330
2331 hci_dev_lock(req->hdev);
2332 hci_inquiry_cache_flush(req->hdev);
2333 hci_dev_unlock(req->hdev);
2334
2335 memset(&cp, 0, sizeof(cp));
2336
2337 if (req->hdev->discovery.limited)
2338 memcpy(&cp.lap, liac, sizeof(cp.lap));
2339 else
2340 memcpy(&cp.lap, giac, sizeof(cp.lap));
2341
2342 cp.length = length;
2343
2344 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2345
2346 return 0;
2347}
2348
2349static void le_scan_disable_work(struct work_struct *work)
2350{
2351 struct hci_dev *hdev = container_of(work, struct hci_dev,
2352 le_scan_disable.work);
2353 u8 status;
2354
2355 BT_DBG("%s", hdev->name);
2356
2357 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2358 return;
2359
2360 cancel_delayed_work(&hdev->le_scan_restart);
2361
2362 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2363 if (status) {
2364 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2365 status);
2366 return;
2367 }
2368
2369 hdev->discovery.scan_start = 0;
2370
2371 /* If we were running LE only scan, change discovery state. If
2372 * we were running both LE and BR/EDR inquiry simultaneously,
2373 * and BR/EDR inquiry is already finished, stop discovery,
2374 * otherwise BR/EDR inquiry will stop discovery when finished.
2375 * If we will resolve remote device name, do not change
2376 * discovery state.
2377 */
2378
2379 if (hdev->discovery.type == DISCOV_TYPE_LE)
2380 goto discov_stopped;
2381
2382 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2383 return;
2384
2385 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2386 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2387 hdev->discovery.state != DISCOVERY_RESOLVING)
2388 goto discov_stopped;
2389
2390 return;
2391 }
2392
2393 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2394 HCI_CMD_TIMEOUT, &status);
2395 if (status) {
2396 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2397 goto discov_stopped;
2398 }
2399
2400 return;
2401
2402discov_stopped:
2403 hci_dev_lock(hdev);
2404 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2405 hci_dev_unlock(hdev);
2406}
2407
2408static int le_scan_restart(struct hci_request *req, unsigned long opt)
2409{
2410 struct hci_dev *hdev = req->hdev;
2411
2412 /* If controller is not scanning we are done. */
2413 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2414 return 0;
2415
2416 hci_req_add_le_scan_disable(req);
2417
2418 if (use_ext_scan(hdev)) {
2419 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2420
2421 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2422 ext_enable_cp.enable = LE_SCAN_ENABLE;
2423 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2424
2425 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2426 sizeof(ext_enable_cp), &ext_enable_cp);
2427 } else {
2428 struct hci_cp_le_set_scan_enable cp;
2429
2430 memset(&cp, 0, sizeof(cp));
2431 cp.enable = LE_SCAN_ENABLE;
2432 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2433 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2434 }
2435
2436 return 0;
2437}
2438
2439static void le_scan_restart_work(struct work_struct *work)
2440{
2441 struct hci_dev *hdev = container_of(work, struct hci_dev,
2442 le_scan_restart.work);
2443 unsigned long timeout, duration, scan_start, now;
2444 u8 status;
2445
2446 BT_DBG("%s", hdev->name);
2447
2448 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2449 if (status) {
2450 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2451 status);
2452 return;
2453 }
2454
2455 hci_dev_lock(hdev);
2456
2457 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2458 !hdev->discovery.scan_start)
2459 goto unlock;
2460
2461 /* When the scan was started, hdev->le_scan_disable has been queued
2462 * after duration from scan_start. During scan restart this job
2463 * has been canceled, and we need to queue it again after proper
2464 * timeout, to make sure that scan does not run indefinitely.
2465 */
2466 duration = hdev->discovery.scan_duration;
2467 scan_start = hdev->discovery.scan_start;
2468 now = jiffies;
2469 if (now - scan_start <= duration) {
2470 int elapsed;
2471
2472 if (now >= scan_start)
2473 elapsed = now - scan_start;
2474 else
2475 elapsed = ULONG_MAX - scan_start + now;
2476
2477 timeout = duration - elapsed;
2478 } else {
2479 timeout = 0;
2480 }
2481
2482 queue_delayed_work(hdev->req_workqueue,
2483 &hdev->le_scan_disable, timeout);
2484
2485unlock:
2486 hci_dev_unlock(hdev);
2487}
2488
2489static int active_scan(struct hci_request *req, unsigned long opt)
2490{
2491 uint16_t interval = opt;
2492 struct hci_dev *hdev = req->hdev;
2493 u8 own_addr_type;
2494 int err;
2495
2496 BT_DBG("%s", hdev->name);
2497
2498 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2499 hci_dev_lock(hdev);
2500
2501 /* Don't let discovery abort an outgoing connection attempt
2502 * that's using directed advertising.
2503 */
2504 if (hci_lookup_le_connect(hdev)) {
2505 hci_dev_unlock(hdev);
2506 return -EBUSY;
2507 }
2508
2509 cancel_adv_timeout(hdev);
2510 hci_dev_unlock(hdev);
2511
2512 __hci_req_disable_advertising(req);
2513 }
2514
2515 /* If controller is scanning, it means the background scanning is
2516 * running. Thus, we should temporarily stop it in order to set the
2517 * discovery scanning parameters.
2518 */
2519 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2520 hci_req_add_le_scan_disable(req);
2521
2522 /* All active scans will be done with either a resolvable private
2523 * address (when privacy feature has been enabled) or non-resolvable
2524 * private address.
2525 */
2526 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2527 &own_addr_type);
2528 if (err < 0)
2529 own_addr_type = ADDR_LE_DEV_PUBLIC;
2530
2531 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2532 own_addr_type, 0);
2533 return 0;
2534}
2535
2536static int interleaved_discov(struct hci_request *req, unsigned long opt)
2537{
2538 int err;
2539
2540 BT_DBG("%s", req->hdev->name);
2541
2542 err = active_scan(req, opt);
2543 if (err)
2544 return err;
2545
2546 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2547}
2548
2549static void start_discovery(struct hci_dev *hdev, u8 *status)
2550{
2551 unsigned long timeout;
2552
2553 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2554
2555 switch (hdev->discovery.type) {
2556 case DISCOV_TYPE_BREDR:
2557 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2558 hci_req_sync(hdev, bredr_inquiry,
2559 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2560 status);
2561 return;
2562 case DISCOV_TYPE_INTERLEAVED:
2563 /* When running simultaneous discovery, the LE scanning time
2564 * should occupy the whole discovery time sine BR/EDR inquiry
2565 * and LE scanning are scheduled by the controller.
2566 *
2567 * For interleaving discovery in comparison, BR/EDR inquiry
2568 * and LE scanning are done sequentially with separate
2569 * timeouts.
2570 */
2571 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2572 &hdev->quirks)) {
2573 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2574 /* During simultaneous discovery, we double LE scan
2575 * interval. We must leave some time for the controller
2576 * to do BR/EDR inquiry.
2577 */
2578 hci_req_sync(hdev, interleaved_discov,
2579 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2580 status);
2581 break;
2582 }
2583
2584 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2585 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2586 HCI_CMD_TIMEOUT, status);
2587 break;
2588 case DISCOV_TYPE_LE:
2589 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2590 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2591 HCI_CMD_TIMEOUT, status);
2592 break;
2593 default:
2594 *status = HCI_ERROR_UNSPECIFIED;
2595 return;
2596 }
2597
2598 if (*status)
2599 return;
2600
2601 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2602
2603 /* When service discovery is used and the controller has a
2604 * strict duplicate filter, it is important to remember the
2605 * start and duration of the scan. This is required for
2606 * restarting scanning during the discovery phase.
2607 */
2608 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2609 hdev->discovery.result_filtering) {
2610 hdev->discovery.scan_start = jiffies;
2611 hdev->discovery.scan_duration = timeout;
2612 }
2613
2614 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2615 timeout);
2616}
2617
2618bool hci_req_stop_discovery(struct hci_request *req)
2619{
2620 struct hci_dev *hdev = req->hdev;
2621 struct discovery_state *d = &hdev->discovery;
2622 struct hci_cp_remote_name_req_cancel cp;
2623 struct inquiry_entry *e;
2624 bool ret = false;
2625
2626 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2627
2628 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2629 if (test_bit(HCI_INQUIRY, &hdev->flags))
2630 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2631
2632 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2633 cancel_delayed_work(&hdev->le_scan_disable);
2634 hci_req_add_le_scan_disable(req);
2635 }
2636
2637 ret = true;
2638 } else {
2639 /* Passive scanning */
2640 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2641 hci_req_add_le_scan_disable(req);
2642 ret = true;
2643 }
2644 }
2645
2646 /* No further actions needed for LE-only discovery */
2647 if (d->type == DISCOV_TYPE_LE)
2648 return ret;
2649
2650 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2651 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2652 NAME_PENDING);
2653 if (!e)
2654 return ret;
2655
2656 bacpy(&cp.bdaddr, &e->data.bdaddr);
2657 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2658 &cp);
2659 ret = true;
2660 }
2661
2662 return ret;
2663}
2664
2665static int stop_discovery(struct hci_request *req, unsigned long opt)
2666{
2667 hci_dev_lock(req->hdev);
2668 hci_req_stop_discovery(req);
2669 hci_dev_unlock(req->hdev);
2670
2671 return 0;
2672}
2673
2674static void discov_update(struct work_struct *work)
2675{
2676 struct hci_dev *hdev = container_of(work, struct hci_dev,
2677 discov_update);
2678 u8 status = 0;
2679
2680 switch (hdev->discovery.state) {
2681 case DISCOVERY_STARTING:
2682 start_discovery(hdev, &status);
2683 mgmt_start_discovery_complete(hdev, status);
2684 if (status)
2685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2686 else
2687 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2688 break;
2689 case DISCOVERY_STOPPING:
2690 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2691 mgmt_stop_discovery_complete(hdev, status);
2692 if (!status)
2693 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2694 break;
2695 case DISCOVERY_STOPPED:
2696 default:
2697 return;
2698 }
2699}
2700
2701static void discov_off(struct work_struct *work)
2702{
2703 struct hci_dev *hdev = container_of(work, struct hci_dev,
2704 discov_off.work);
2705
2706 BT_DBG("%s", hdev->name);
2707
2708 hci_dev_lock(hdev);
2709
2710 /* When discoverable timeout triggers, then just make sure
2711 * the limited discoverable flag is cleared. Even in the case
2712 * of a timeout triggered from general discoverable, it is
2713 * safe to unconditionally clear the flag.
2714 */
2715 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2716 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2717 hdev->discov_timeout = 0;
2718
2719 hci_dev_unlock(hdev);
2720
2721 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2722 mgmt_new_settings(hdev);
2723}
2724
2725static int powered_update_hci(struct hci_request *req, unsigned long opt)
2726{
2727 struct hci_dev *hdev = req->hdev;
2728 u8 link_sec;
2729
2730 hci_dev_lock(hdev);
2731
2732 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2733 !lmp_host_ssp_capable(hdev)) {
2734 u8 mode = 0x01;
2735
2736 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2737
2738 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2739 u8 support = 0x01;
2740
2741 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2742 sizeof(support), &support);
2743 }
2744 }
2745
2746 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2747 lmp_bredr_capable(hdev)) {
2748 struct hci_cp_write_le_host_supported cp;
2749
2750 cp.le = 0x01;
2751 cp.simul = 0x00;
2752
2753 /* Check first if we already have the right
2754 * host state (host features set)
2755 */
2756 if (cp.le != lmp_host_le_capable(hdev) ||
2757 cp.simul != lmp_host_le_br_capable(hdev))
2758 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2759 sizeof(cp), &cp);
2760 }
2761
2762 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2763 /* Make sure the controller has a good default for
2764 * advertising data. This also applies to the case
2765 * where BR/EDR was toggled during the AUTO_OFF phase.
2766 */
2767 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2768 list_empty(&hdev->adv_instances)) {
2769 int err;
2770
2771 if (ext_adv_capable(hdev)) {
2772 err = __hci_req_setup_ext_adv_instance(req,
2773 0x00);
2774 if (!err)
2775 __hci_req_update_scan_rsp_data(req,
2776 0x00);
2777 } else {
2778 err = 0;
2779 __hci_req_update_adv_data(req, 0x00);
2780 __hci_req_update_scan_rsp_data(req, 0x00);
2781 }
2782
2783 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2784 if (!ext_adv_capable(hdev))
2785 __hci_req_enable_advertising(req);
2786 else if (!err)
2787 __hci_req_enable_ext_advertising(req,
2788 0x00);
2789 }
2790 } else if (!list_empty(&hdev->adv_instances)) {
2791 struct adv_info *adv_instance;
2792
2793 adv_instance = list_first_entry(&hdev->adv_instances,
2794 struct adv_info, list);
2795 __hci_req_schedule_adv_instance(req,
2796 adv_instance->instance,
2797 true);
2798 }
2799 }
2800
2801 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2802 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2803 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2804 sizeof(link_sec), &link_sec);
2805
2806 if (lmp_bredr_capable(hdev)) {
2807 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2808 __hci_req_write_fast_connectable(req, true);
2809 else
2810 __hci_req_write_fast_connectable(req, false);
2811 __hci_req_update_scan(req);
2812 __hci_req_update_class(req);
2813 __hci_req_update_name(req);
2814 __hci_req_update_eir(req);
2815 }
2816
2817 hci_dev_unlock(hdev);
2818 return 0;
2819}
2820
2821int __hci_req_hci_power_on(struct hci_dev *hdev)
2822{
2823 /* Register the available SMP channels (BR/EDR and LE) only when
2824 * successfully powering on the controller. This late
2825 * registration is required so that LE SMP can clearly decide if
2826 * the public address or static address is used.
2827 */
2828 smp_register(hdev);
2829
2830 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2831 NULL);
2832}
2833
2834void hci_request_setup(struct hci_dev *hdev)
2835{
2836 INIT_WORK(&hdev->discov_update, discov_update);
2837 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2838 INIT_WORK(&hdev->scan_update, scan_update_work);
2839 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2840 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2841 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2842 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2843 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2844 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2845}
2846
2847void hci_request_cancel_all(struct hci_dev *hdev)
2848{
2849 hci_req_sync_cancel(hdev, ENODEV);
2850
2851 cancel_work_sync(&hdev->discov_update);
2852 cancel_work_sync(&hdev->bg_scan_update);
2853 cancel_work_sync(&hdev->scan_update);
2854 cancel_work_sync(&hdev->connectable_update);
2855 cancel_work_sync(&hdev->discoverable_update);
2856 cancel_delayed_work_sync(&hdev->discov_off);
2857 cancel_delayed_work_sync(&hdev->le_scan_disable);
2858 cancel_delayed_work_sync(&hdev->le_scan_restart);
2859
2860 if (hdev->adv_instance_timeout) {
2861 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2862 hdev->adv_instance_timeout = 0;
2863 }
2864}
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <asm/unaligned.h>
25
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
28#include <net/bluetooth/mgmt.h>
29
30#include "smp.h"
31#include "hci_request.h"
32
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
44static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
46{
47 struct hci_dev *hdev = req->hdev;
48 struct sk_buff *skb;
49 unsigned long flags;
50
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
55 */
56 if (req->err) {
57 skb_queue_purge(&req->cmd_q);
58 return req->err;
59 }
60
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
63 return -ENODATA;
64
65 skb = skb_peek_tail(&req->cmd_q);
66 if (complete) {
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 }
72
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77 queue_work(hdev->workqueue, &hdev->cmd_work);
78
79 return 0;
80}
81
82int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83{
84 return req_run(req, complete, NULL);
85}
86
87int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88{
89 return req_run(req, NULL, complete);
90}
91
92static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 struct sk_buff *skb)
94{
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 if (skb)
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
106void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107{
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
122 struct sk_buff *skb;
123 int err = 0;
124
125 BT_DBG("%s", hdev->name);
126
127 hci_req_init(&req, hdev);
128
129 hci_req_add_ev(&req, opcode, plen, param, event);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 if (err < 0) {
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
140 return ERR_PTR(err);
141 }
142
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
152 err = -bt_to_errno(hdev->req_result);
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
162 }
163
164 hdev->req_status = hdev->req_result = 0;
165 skb = hdev->req_skb;
166 hdev->req_skb = NULL;
167
168 BT_DBG("%s end: err %d", hdev->name, err);
169
170 if (err < 0) {
171 kfree_skb(skb);
172 return ERR_PTR(err);
173 }
174
175 if (!skb)
176 return ERR_PTR(-ENODATA);
177
178 return skb;
179}
180EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
184{
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186}
187EXPORT_SYMBOL(__hci_cmd_sync);
188
189/* Execute request and wait for completion. */
190int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191 unsigned long opt),
192 unsigned long opt, u32 timeout, u8 *hci_status)
193{
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
196 int err = 0;
197
198 BT_DBG("%s start", hdev->name);
199
200 hci_req_init(&req, hdev);
201
202 hdev->req_status = HCI_REQ_PEND;
203
204 err = func(&req, opt);
205 if (err) {
206 if (hci_status)
207 *hci_status = HCI_ERROR_UNSPECIFIED;
208 return err;
209 }
210
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
220
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
225 */
226 if (err == -ENODATA) {
227 if (hci_status)
228 *hci_status = 0;
229 return 0;
230 }
231
232 if (hci_status)
233 *hci_status = HCI_ERROR_UNSPECIFIED;
234
235 return err;
236 }
237
238 schedule_timeout(timeout);
239
240 remove_wait_queue(&hdev->req_wait_q, &wait);
241
242 if (signal_pending(current))
243 return -EINTR;
244
245 switch (hdev->req_status) {
246 case HCI_REQ_DONE:
247 err = -bt_to_errno(hdev->req_result);
248 if (hci_status)
249 *hci_status = hdev->req_result;
250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257
258 default:
259 err = -ETIMEDOUT;
260 if (hci_status)
261 *hci_status = HCI_ERROR_UNSPECIFIED;
262 break;
263 }
264
265 hdev->req_status = hdev->req_result = 0;
266
267 BT_DBG("%s end: err %d", hdev->name, err);
268
269 return err;
270}
271
272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
274 unsigned long opt, u32 timeout, u8 *hci_status)
275{
276 int ret;
277
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
280
281 /* Serialize all requests */
282 hci_req_sync_lock(hdev);
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 hci_req_sync_unlock(hdev);
285
286 return ret;
287}
288
289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291{
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
311
312 return skb;
313}
314
315/* Queue a command to an asynchronous HCI request */
316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318{
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341 bt_cb(skb)->hci.req_event = event;
342
343 skb_queue_tail(&req->cmd_q, skb);
344}
345
346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348{
349 hci_req_add_ev(req, opcode, plen, param, 0);
350}
351
352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353{
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
374 }
375
376 acp.window = cpu_to_le16(0x0012);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
387/* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
390 *
391 * This function requires the caller holds hdev->lock.
392 */
393static void __hci_update_background_scan(struct hci_request *req)
394{
395 struct hci_dev *hdev = req->hdev;
396
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
404
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
408
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
412
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
415 *
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
419 */
420 hci_discovery_filter_clear(hdev);
421
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
426 * scanning.
427 */
428
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
432
433 hci_req_add_le_scan_disable(req);
434
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
439 */
440
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
443 * the same time.
444 */
445 if (hci_lookup_le_connect(hdev))
446 return;
447
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
450 */
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
453
454 hci_req_add_le_passive_scan(req);
455
456 BT_DBG("%s starting background scanning", hdev->name);
457 }
458}
459
460void __hci_req_update_name(struct hci_request *req)
461{
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
464
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468}
469
470#define PNP_INFO_SVCLASS_ID 0x1200
471
472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473{
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
476
477 if (len < 4)
478 return ptr;
479
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
482
483 if (uuid->size != 16)
484 continue;
485
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
489
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
498 }
499
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
504 }
505
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
509 }
510
511 return ptr;
512}
513
514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515{
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 6)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
531 }
532
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
537 }
538
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
542 }
543
544 return ptr;
545}
546
547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548{
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
551
552 if (len < 18)
553 return ptr;
554
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
558
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
564 }
565
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
570 }
571
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
575 }
576
577 return ptr;
578}
579
580static void create_eir(struct hci_dev *hdev, u8 *data)
581{
582 u8 *ptr = data;
583 size_t name_len;
584
585 name_len = strlen(hdev->dev_name);
586
587 if (name_len > 0) {
588 /* EIR Data type */
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
594
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
597
598 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600 ptr += (name_len + 2);
601 }
602
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
607
608 ptr += 3;
609 }
610
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
614
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620 ptr += 10;
621 }
622
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626}
627
628void __hci_req_update_eir(struct hci_request *req)
629{
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
632
633 if (!hdev_is_powered(hdev))
634 return;
635
636 if (!lmp_ext_inq_capable(hdev))
637 return;
638
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
641
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 create_eir(hdev, cp.data);
648
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
651
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655}
656
657void hci_req_add_le_scan_disable(struct hci_request *req)
658{
659 struct hci_cp_le_set_scan_enable cp;
660
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664}
665
666static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
668{
669 struct hci_cp_le_add_to_white_list cp;
670
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, ¶ms->addr);
673
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675}
676
677static u8 update_white_list(struct hci_request *req)
678{
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
683
684 /* Go through the current white list programmed into the
685 * controller one by one and check if that address is still
686 * in the list of pending connections or list of devices to
687 * report. If not present in either list, then queue the
688 * command to remove it from the controller.
689 */
690 list_for_each_entry(b, &hdev->le_white_list, list) {
691 /* If the device is neither in pend_le_conns nor
692 * pend_le_reports then remove it from the whitelist.
693 */
694 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
695 &b->bdaddr, b->bdaddr_type) &&
696 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
697 &b->bdaddr, b->bdaddr_type)) {
698 struct hci_cp_le_del_from_white_list cp;
699
700 cp.bdaddr_type = b->bdaddr_type;
701 bacpy(&cp.bdaddr, &b->bdaddr);
702
703 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
704 sizeof(cp), &cp);
705 continue;
706 }
707
708 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
709 /* White list can not be used with RPAs */
710 return 0x00;
711 }
712
713 white_list_entries++;
714 }
715
716 /* Since all no longer valid white list entries have been
717 * removed, walk through the list of pending connections
718 * and ensure that any new device gets programmed into
719 * the controller.
720 *
721 * If the list of the devices is larger than the list of
722 * available white list entries in the controller, then
723 * just abort and return filer policy value to not use the
724 * white list.
725 */
726 list_for_each_entry(params, &hdev->pend_le_conns, action) {
727 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
728 ¶ms->addr, params->addr_type))
729 continue;
730
731 if (white_list_entries >= hdev->le_white_list_size) {
732 /* Select filter policy to accept all advertising */
733 return 0x00;
734 }
735
736 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
737 params->addr_type)) {
738 /* White list can not be used with RPAs */
739 return 0x00;
740 }
741
742 white_list_entries++;
743 add_to_white_list(req, params);
744 }
745
746 /* After adding all new pending connections, walk through
747 * the list of pending reports and also add these to the
748 * white list if there is still space.
749 */
750 list_for_each_entry(params, &hdev->pend_le_reports, action) {
751 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
752 ¶ms->addr, params->addr_type))
753 continue;
754
755 if (white_list_entries >= hdev->le_white_list_size) {
756 /* Select filter policy to accept all advertising */
757 return 0x00;
758 }
759
760 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
761 params->addr_type)) {
762 /* White list can not be used with RPAs */
763 return 0x00;
764 }
765
766 white_list_entries++;
767 add_to_white_list(req, params);
768 }
769
770 /* Select filter policy to use white list */
771 return 0x01;
772}
773
774static bool scan_use_rpa(struct hci_dev *hdev)
775{
776 return hci_dev_test_flag(hdev, HCI_PRIVACY);
777}
778
779void hci_req_add_le_passive_scan(struct hci_request *req)
780{
781 struct hci_cp_le_set_scan_param param_cp;
782 struct hci_cp_le_set_scan_enable enable_cp;
783 struct hci_dev *hdev = req->hdev;
784 u8 own_addr_type;
785 u8 filter_policy;
786
787 /* Set require_privacy to false since no SCAN_REQ are send
788 * during passive scanning. Not using an non-resolvable address
789 * here is important so that peer devices using direct
790 * advertising with our address will be correctly reported
791 * by the controller.
792 */
793 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
794 &own_addr_type))
795 return;
796
797 /* Adding or removing entries from the white list must
798 * happen before enabling scanning. The controller does
799 * not allow white list modification while scanning.
800 */
801 filter_policy = update_white_list(req);
802
803 /* When the controller is using random resolvable addresses and
804 * with that having LE privacy enabled, then controllers with
805 * Extended Scanner Filter Policies support can now enable support
806 * for handling directed advertising.
807 *
808 * So instead of using filter polices 0x00 (no whitelist)
809 * and 0x01 (whitelist enabled) use the new filter policies
810 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
811 */
812 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
813 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
814 filter_policy |= 0x02;
815
816 memset(¶m_cp, 0, sizeof(param_cp));
817 param_cp.type = LE_SCAN_PASSIVE;
818 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
819 param_cp.window = cpu_to_le16(hdev->le_scan_window);
820 param_cp.own_address_type = own_addr_type;
821 param_cp.filter_policy = filter_policy;
822 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
823 ¶m_cp);
824
825 memset(&enable_cp, 0, sizeof(enable_cp));
826 enable_cp.enable = LE_SCAN_ENABLE;
827 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
828 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
829 &enable_cp);
830}
831
832static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
833{
834 u8 instance = hdev->cur_adv_instance;
835 struct adv_info *adv_instance;
836
837 /* Ignore instance 0 */
838 if (instance == 0x00)
839 return 0;
840
841 adv_instance = hci_find_adv_instance(hdev, instance);
842 if (!adv_instance)
843 return 0;
844
845 /* TODO: Take into account the "appearance" and "local-name" flags here.
846 * These are currently being ignored as they are not supported.
847 */
848 return adv_instance->scan_rsp_len;
849}
850
851void __hci_req_disable_advertising(struct hci_request *req)
852{
853 u8 enable = 0x00;
854
855 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
856}
857
858static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
859{
860 u32 flags;
861 struct adv_info *adv_instance;
862
863 if (instance == 0x00) {
864 /* Instance 0 always manages the "Tx Power" and "Flags"
865 * fields
866 */
867 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
868
869 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
870 * corresponds to the "connectable" instance flag.
871 */
872 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873 flags |= MGMT_ADV_FLAG_CONNECTABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
876 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
877 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
878 flags |= MGMT_ADV_FLAG_DISCOV;
879
880 return flags;
881 }
882
883 adv_instance = hci_find_adv_instance(hdev, instance);
884
885 /* Return 0 when we got an invalid instance identifier. */
886 if (!adv_instance)
887 return 0;
888
889 return adv_instance->flags;
890}
891
892static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
893{
894 /* If privacy is not enabled don't use RPA */
895 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
896 return false;
897
898 /* If basic privacy mode is enabled use RPA */
899 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
900 return true;
901
902 /* If limited privacy mode is enabled don't use RPA if we're
903 * both discoverable and bondable.
904 */
905 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
906 hci_dev_test_flag(hdev, HCI_BONDABLE))
907 return false;
908
909 /* We're neither bondable nor discoverable in the limited
910 * privacy mode, therefore use RPA.
911 */
912 return true;
913}
914
915void __hci_req_enable_advertising(struct hci_request *req)
916{
917 struct hci_dev *hdev = req->hdev;
918 struct hci_cp_le_set_adv_param cp;
919 u8 own_addr_type, enable = 0x01;
920 bool connectable;
921 u32 flags;
922
923 if (hci_conn_num(hdev, LE_LINK) > 0)
924 return;
925
926 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
927 __hci_req_disable_advertising(req);
928
929 /* Clear the HCI_LE_ADV bit temporarily so that the
930 * hci_update_random_address knows that it's safe to go ahead
931 * and write a new random address. The flag will be set back on
932 * as soon as the SET_ADV_ENABLE HCI command completes.
933 */
934 hci_dev_clear_flag(hdev, HCI_LE_ADV);
935
936 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
937
938 /* If the "connectable" instance flag was not set, then choose between
939 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
940 */
941 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
942 mgmt_get_connectable(hdev);
943
944 /* Set require_privacy to true only when non-connectable
945 * advertising is used. In that case it is fine to use a
946 * non-resolvable private address.
947 */
948 if (hci_update_random_address(req, !connectable,
949 adv_use_rpa(hdev, flags),
950 &own_addr_type) < 0)
951 return;
952
953 memset(&cp, 0, sizeof(cp));
954 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
955 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
956
957 if (connectable)
958 cp.type = LE_ADV_IND;
959 else if (get_cur_adv_instance_scan_rsp_len(hdev))
960 cp.type = LE_ADV_SCAN_IND;
961 else
962 cp.type = LE_ADV_NONCONN_IND;
963
964 cp.own_address_type = own_addr_type;
965 cp.channel_map = hdev->le_adv_channel_map;
966
967 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
968
969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
970}
971
972static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
973{
974 u8 ad_len = 0;
975 size_t name_len;
976
977 name_len = strlen(hdev->dev_name);
978 if (name_len > 0) {
979 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
980
981 if (name_len > max_len) {
982 name_len = max_len;
983 ptr[1] = EIR_NAME_SHORT;
984 } else
985 ptr[1] = EIR_NAME_COMPLETE;
986
987 ptr[0] = name_len + 1;
988
989 memcpy(ptr + 2, hdev->dev_name, name_len);
990
991 ad_len += (name_len + 2);
992 ptr += (name_len + 2);
993 }
994
995 return ad_len;
996}
997
998static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
999 u8 *ptr)
1000{
1001 struct adv_info *adv_instance;
1002
1003 adv_instance = hci_find_adv_instance(hdev, instance);
1004 if (!adv_instance)
1005 return 0;
1006
1007 /* TODO: Set the appropriate entries based on advertising instance flags
1008 * here once flags other than 0 are supported.
1009 */
1010 memcpy(ptr, adv_instance->scan_rsp_data,
1011 adv_instance->scan_rsp_len);
1012
1013 return adv_instance->scan_rsp_len;
1014}
1015
1016void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1017{
1018 struct hci_dev *hdev = req->hdev;
1019 struct hci_cp_le_set_scan_rsp_data cp;
1020 u8 len;
1021
1022 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1023 return;
1024
1025 memset(&cp, 0, sizeof(cp));
1026
1027 if (instance)
1028 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1029 else
1030 len = create_default_scan_rsp_data(hdev, cp.data);
1031
1032 if (hdev->scan_rsp_data_len == len &&
1033 !memcmp(cp.data, hdev->scan_rsp_data, len))
1034 return;
1035
1036 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1037 hdev->scan_rsp_data_len = len;
1038
1039 cp.length = len;
1040
1041 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1042}
1043
1044static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1045{
1046 struct adv_info *adv_instance = NULL;
1047 u8 ad_len = 0, flags = 0;
1048 u32 instance_flags;
1049
1050 /* Return 0 when the current instance identifier is invalid. */
1051 if (instance) {
1052 adv_instance = hci_find_adv_instance(hdev, instance);
1053 if (!adv_instance)
1054 return 0;
1055 }
1056
1057 instance_flags = get_adv_instance_flags(hdev, instance);
1058
1059 /* The Add Advertising command allows userspace to set both the general
1060 * and limited discoverable flags.
1061 */
1062 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1063 flags |= LE_AD_GENERAL;
1064
1065 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1066 flags |= LE_AD_LIMITED;
1067
1068 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1069 /* If a discovery flag wasn't provided, simply use the global
1070 * settings.
1071 */
1072 if (!flags)
1073 flags |= mgmt_get_adv_discov_flags(hdev);
1074
1075 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1076 flags |= LE_AD_NO_BREDR;
1077
1078 /* If flags would still be empty, then there is no need to
1079 * include the "Flags" AD field".
1080 */
1081 if (flags) {
1082 ptr[0] = 0x02;
1083 ptr[1] = EIR_FLAGS;
1084 ptr[2] = flags;
1085
1086 ad_len += 3;
1087 ptr += 3;
1088 }
1089 }
1090
1091 if (adv_instance) {
1092 memcpy(ptr, adv_instance->adv_data,
1093 adv_instance->adv_data_len);
1094 ad_len += adv_instance->adv_data_len;
1095 ptr += adv_instance->adv_data_len;
1096 }
1097
1098 /* Provide Tx Power only if we can provide a valid value for it */
1099 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1100 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1101 ptr[0] = 0x02;
1102 ptr[1] = EIR_TX_POWER;
1103 ptr[2] = (u8)hdev->adv_tx_power;
1104
1105 ad_len += 3;
1106 ptr += 3;
1107 }
1108
1109 return ad_len;
1110}
1111
1112void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1113{
1114 struct hci_dev *hdev = req->hdev;
1115 struct hci_cp_le_set_adv_data cp;
1116 u8 len;
1117
1118 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1119 return;
1120
1121 memset(&cp, 0, sizeof(cp));
1122
1123 len = create_instance_adv_data(hdev, instance, cp.data);
1124
1125 /* There's nothing to do if the data hasn't changed */
1126 if (hdev->adv_data_len == len &&
1127 memcmp(cp.data, hdev->adv_data, len) == 0)
1128 return;
1129
1130 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1131 hdev->adv_data_len = len;
1132
1133 cp.length = len;
1134
1135 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1136}
1137
1138int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1139{
1140 struct hci_request req;
1141
1142 hci_req_init(&req, hdev);
1143 __hci_req_update_adv_data(&req, instance);
1144
1145 return hci_req_run(&req, NULL);
1146}
1147
1148static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1149{
1150 BT_DBG("%s status %u", hdev->name, status);
1151}
1152
1153void hci_req_reenable_advertising(struct hci_dev *hdev)
1154{
1155 struct hci_request req;
1156
1157 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1158 list_empty(&hdev->adv_instances))
1159 return;
1160
1161 hci_req_init(&req, hdev);
1162
1163 if (hdev->cur_adv_instance) {
1164 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1165 true);
1166 } else {
1167 __hci_req_update_adv_data(&req, 0x00);
1168 __hci_req_update_scan_rsp_data(&req, 0x00);
1169 __hci_req_enable_advertising(&req);
1170 }
1171
1172 hci_req_run(&req, adv_enable_complete);
1173}
1174
1175static void adv_timeout_expire(struct work_struct *work)
1176{
1177 struct hci_dev *hdev = container_of(work, struct hci_dev,
1178 adv_instance_expire.work);
1179
1180 struct hci_request req;
1181 u8 instance;
1182
1183 BT_DBG("%s", hdev->name);
1184
1185 hci_dev_lock(hdev);
1186
1187 hdev->adv_instance_timeout = 0;
1188
1189 instance = hdev->cur_adv_instance;
1190 if (instance == 0x00)
1191 goto unlock;
1192
1193 hci_req_init(&req, hdev);
1194
1195 hci_req_clear_adv_instance(hdev, &req, instance, false);
1196
1197 if (list_empty(&hdev->adv_instances))
1198 __hci_req_disable_advertising(&req);
1199
1200 hci_req_run(&req, NULL);
1201
1202unlock:
1203 hci_dev_unlock(hdev);
1204}
1205
1206int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1207 bool force)
1208{
1209 struct hci_dev *hdev = req->hdev;
1210 struct adv_info *adv_instance = NULL;
1211 u16 timeout;
1212
1213 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1214 list_empty(&hdev->adv_instances))
1215 return -EPERM;
1216
1217 if (hdev->adv_instance_timeout)
1218 return -EBUSY;
1219
1220 adv_instance = hci_find_adv_instance(hdev, instance);
1221 if (!adv_instance)
1222 return -ENOENT;
1223
1224 /* A zero timeout means unlimited advertising. As long as there is
1225 * only one instance, duration should be ignored. We still set a timeout
1226 * in case further instances are being added later on.
1227 *
1228 * If the remaining lifetime of the instance is more than the duration
1229 * then the timeout corresponds to the duration, otherwise it will be
1230 * reduced to the remaining instance lifetime.
1231 */
1232 if (adv_instance->timeout == 0 ||
1233 adv_instance->duration <= adv_instance->remaining_time)
1234 timeout = adv_instance->duration;
1235 else
1236 timeout = adv_instance->remaining_time;
1237
1238 /* The remaining time is being reduced unless the instance is being
1239 * advertised without time limit.
1240 */
1241 if (adv_instance->timeout)
1242 adv_instance->remaining_time =
1243 adv_instance->remaining_time - timeout;
1244
1245 hdev->adv_instance_timeout = timeout;
1246 queue_delayed_work(hdev->req_workqueue,
1247 &hdev->adv_instance_expire,
1248 msecs_to_jiffies(timeout * 1000));
1249
1250 /* If we're just re-scheduling the same instance again then do not
1251 * execute any HCI commands. This happens when a single instance is
1252 * being advertised.
1253 */
1254 if (!force && hdev->cur_adv_instance == instance &&
1255 hci_dev_test_flag(hdev, HCI_LE_ADV))
1256 return 0;
1257
1258 hdev->cur_adv_instance = instance;
1259 __hci_req_update_adv_data(req, instance);
1260 __hci_req_update_scan_rsp_data(req, instance);
1261 __hci_req_enable_advertising(req);
1262
1263 return 0;
1264}
1265
1266static void cancel_adv_timeout(struct hci_dev *hdev)
1267{
1268 if (hdev->adv_instance_timeout) {
1269 hdev->adv_instance_timeout = 0;
1270 cancel_delayed_work(&hdev->adv_instance_expire);
1271 }
1272}
1273
1274/* For a single instance:
1275 * - force == true: The instance will be removed even when its remaining
1276 * lifetime is not zero.
1277 * - force == false: the instance will be deactivated but kept stored unless
1278 * the remaining lifetime is zero.
1279 *
1280 * For instance == 0x00:
1281 * - force == true: All instances will be removed regardless of their timeout
1282 * setting.
1283 * - force == false: Only instances that have a timeout will be removed.
1284 */
1285void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1286 u8 instance, bool force)
1287{
1288 struct adv_info *adv_instance, *n, *next_instance = NULL;
1289 int err;
1290 u8 rem_inst;
1291
1292 /* Cancel any timeout concerning the removed instance(s). */
1293 if (!instance || hdev->cur_adv_instance == instance)
1294 cancel_adv_timeout(hdev);
1295
1296 /* Get the next instance to advertise BEFORE we remove
1297 * the current one. This can be the same instance again
1298 * if there is only one instance.
1299 */
1300 if (instance && hdev->cur_adv_instance == instance)
1301 next_instance = hci_get_next_instance(hdev, instance);
1302
1303 if (instance == 0x00) {
1304 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1305 list) {
1306 if (!(force || adv_instance->timeout))
1307 continue;
1308
1309 rem_inst = adv_instance->instance;
1310 err = hci_remove_adv_instance(hdev, rem_inst);
1311 if (!err)
1312 mgmt_advertising_removed(NULL, hdev, rem_inst);
1313 }
1314 } else {
1315 adv_instance = hci_find_adv_instance(hdev, instance);
1316
1317 if (force || (adv_instance && adv_instance->timeout &&
1318 !adv_instance->remaining_time)) {
1319 /* Don't advertise a removed instance. */
1320 if (next_instance &&
1321 next_instance->instance == instance)
1322 next_instance = NULL;
1323
1324 err = hci_remove_adv_instance(hdev, instance);
1325 if (!err)
1326 mgmt_advertising_removed(NULL, hdev, instance);
1327 }
1328 }
1329
1330 if (!req || !hdev_is_powered(hdev) ||
1331 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1332 return;
1333
1334 if (next_instance)
1335 __hci_req_schedule_adv_instance(req, next_instance->instance,
1336 false);
1337}
1338
1339static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1340{
1341 struct hci_dev *hdev = req->hdev;
1342
1343 /* If we're advertising or initiating an LE connection we can't
1344 * go ahead and change the random address at this time. This is
1345 * because the eventual initiator address used for the
1346 * subsequently created connection will be undefined (some
1347 * controllers use the new address and others the one we had
1348 * when the operation started).
1349 *
1350 * In this kind of scenario skip the update and let the random
1351 * address be updated at the next cycle.
1352 */
1353 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1354 hci_lookup_le_connect(hdev)) {
1355 BT_DBG("Deferring random address update");
1356 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1357 return;
1358 }
1359
1360 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1361}
1362
1363int hci_update_random_address(struct hci_request *req, bool require_privacy,
1364 bool use_rpa, u8 *own_addr_type)
1365{
1366 struct hci_dev *hdev = req->hdev;
1367 int err;
1368
1369 /* If privacy is enabled use a resolvable private address. If
1370 * current RPA has expired or there is something else than
1371 * the current RPA in use, then generate a new one.
1372 */
1373 if (use_rpa) {
1374 int to;
1375
1376 *own_addr_type = ADDR_LE_DEV_RANDOM;
1377
1378 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1379 !bacmp(&hdev->random_addr, &hdev->rpa))
1380 return 0;
1381
1382 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1383 if (err < 0) {
1384 BT_ERR("%s failed to generate new RPA", hdev->name);
1385 return err;
1386 }
1387
1388 set_random_addr(req, &hdev->rpa);
1389
1390 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1391 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1392
1393 return 0;
1394 }
1395
1396 /* In case of required privacy without resolvable private address,
1397 * use an non-resolvable private address. This is useful for active
1398 * scanning and non-connectable advertising.
1399 */
1400 if (require_privacy) {
1401 bdaddr_t nrpa;
1402
1403 while (true) {
1404 /* The non-resolvable private address is generated
1405 * from random six bytes with the two most significant
1406 * bits cleared.
1407 */
1408 get_random_bytes(&nrpa, 6);
1409 nrpa.b[5] &= 0x3f;
1410
1411 /* The non-resolvable private address shall not be
1412 * equal to the public address.
1413 */
1414 if (bacmp(&hdev->bdaddr, &nrpa))
1415 break;
1416 }
1417
1418 *own_addr_type = ADDR_LE_DEV_RANDOM;
1419 set_random_addr(req, &nrpa);
1420 return 0;
1421 }
1422
1423 /* If forcing static address is in use or there is no public
1424 * address use the static address as random address (but skip
1425 * the HCI command if the current random address is already the
1426 * static one.
1427 *
1428 * In case BR/EDR has been disabled on a dual-mode controller
1429 * and a static address has been configured, then use that
1430 * address instead of the public BR/EDR address.
1431 */
1432 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1433 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1434 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1435 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1436 *own_addr_type = ADDR_LE_DEV_RANDOM;
1437 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1438 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1439 &hdev->static_addr);
1440 return 0;
1441 }
1442
1443 /* Neither privacy nor static address is being used so use a
1444 * public address.
1445 */
1446 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1447
1448 return 0;
1449}
1450
1451static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1452{
1453 struct bdaddr_list *b;
1454
1455 list_for_each_entry(b, &hdev->whitelist, list) {
1456 struct hci_conn *conn;
1457
1458 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1459 if (!conn)
1460 return true;
1461
1462 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1463 return true;
1464 }
1465
1466 return false;
1467}
1468
1469void __hci_req_update_scan(struct hci_request *req)
1470{
1471 struct hci_dev *hdev = req->hdev;
1472 u8 scan;
1473
1474 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1475 return;
1476
1477 if (!hdev_is_powered(hdev))
1478 return;
1479
1480 if (mgmt_powering_down(hdev))
1481 return;
1482
1483 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1484 disconnected_whitelist_entries(hdev))
1485 scan = SCAN_PAGE;
1486 else
1487 scan = SCAN_DISABLED;
1488
1489 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1490 scan |= SCAN_INQUIRY;
1491
1492 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1493 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1494 return;
1495
1496 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1497}
1498
1499static int update_scan(struct hci_request *req, unsigned long opt)
1500{
1501 hci_dev_lock(req->hdev);
1502 __hci_req_update_scan(req);
1503 hci_dev_unlock(req->hdev);
1504 return 0;
1505}
1506
1507static void scan_update_work(struct work_struct *work)
1508{
1509 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1510
1511 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1512}
1513
1514static int connectable_update(struct hci_request *req, unsigned long opt)
1515{
1516 struct hci_dev *hdev = req->hdev;
1517
1518 hci_dev_lock(hdev);
1519
1520 __hci_req_update_scan(req);
1521
1522 /* If BR/EDR is not enabled and we disable advertising as a
1523 * by-product of disabling connectable, we need to update the
1524 * advertising flags.
1525 */
1526 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1527 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1528
1529 /* Update the advertising parameters if necessary */
1530 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1531 !list_empty(&hdev->adv_instances))
1532 __hci_req_enable_advertising(req);
1533
1534 __hci_update_background_scan(req);
1535
1536 hci_dev_unlock(hdev);
1537
1538 return 0;
1539}
1540
1541static void connectable_update_work(struct work_struct *work)
1542{
1543 struct hci_dev *hdev = container_of(work, struct hci_dev,
1544 connectable_update);
1545 u8 status;
1546
1547 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1548 mgmt_set_connectable_complete(hdev, status);
1549}
1550
1551static u8 get_service_classes(struct hci_dev *hdev)
1552{
1553 struct bt_uuid *uuid;
1554 u8 val = 0;
1555
1556 list_for_each_entry(uuid, &hdev->uuids, list)
1557 val |= uuid->svc_hint;
1558
1559 return val;
1560}
1561
1562void __hci_req_update_class(struct hci_request *req)
1563{
1564 struct hci_dev *hdev = req->hdev;
1565 u8 cod[3];
1566
1567 BT_DBG("%s", hdev->name);
1568
1569 if (!hdev_is_powered(hdev))
1570 return;
1571
1572 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573 return;
1574
1575 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1576 return;
1577
1578 cod[0] = hdev->minor_class;
1579 cod[1] = hdev->major_class;
1580 cod[2] = get_service_classes(hdev);
1581
1582 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1583 cod[1] |= 0x20;
1584
1585 if (memcmp(cod, hdev->dev_class, 3) == 0)
1586 return;
1587
1588 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1589}
1590
1591static void write_iac(struct hci_request *req)
1592{
1593 struct hci_dev *hdev = req->hdev;
1594 struct hci_cp_write_current_iac_lap cp;
1595
1596 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1597 return;
1598
1599 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1600 /* Limited discoverable mode */
1601 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1602 cp.iac_lap[0] = 0x00; /* LIAC */
1603 cp.iac_lap[1] = 0x8b;
1604 cp.iac_lap[2] = 0x9e;
1605 cp.iac_lap[3] = 0x33; /* GIAC */
1606 cp.iac_lap[4] = 0x8b;
1607 cp.iac_lap[5] = 0x9e;
1608 } else {
1609 /* General discoverable mode */
1610 cp.num_iac = 1;
1611 cp.iac_lap[0] = 0x33; /* GIAC */
1612 cp.iac_lap[1] = 0x8b;
1613 cp.iac_lap[2] = 0x9e;
1614 }
1615
1616 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1617 (cp.num_iac * 3) + 1, &cp);
1618}
1619
1620static int discoverable_update(struct hci_request *req, unsigned long opt)
1621{
1622 struct hci_dev *hdev = req->hdev;
1623
1624 hci_dev_lock(hdev);
1625
1626 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1627 write_iac(req);
1628 __hci_req_update_scan(req);
1629 __hci_req_update_class(req);
1630 }
1631
1632 /* Advertising instances don't use the global discoverable setting, so
1633 * only update AD if advertising was enabled using Set Advertising.
1634 */
1635 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1636 __hci_req_update_adv_data(req, 0x00);
1637
1638 /* Discoverable mode affects the local advertising
1639 * address in limited privacy mode.
1640 */
1641 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1642 __hci_req_enable_advertising(req);
1643 }
1644
1645 hci_dev_unlock(hdev);
1646
1647 return 0;
1648}
1649
1650static void discoverable_update_work(struct work_struct *work)
1651{
1652 struct hci_dev *hdev = container_of(work, struct hci_dev,
1653 discoverable_update);
1654 u8 status;
1655
1656 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1657 mgmt_set_discoverable_complete(hdev, status);
1658}
1659
1660void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1661 u8 reason)
1662{
1663 switch (conn->state) {
1664 case BT_CONNECTED:
1665 case BT_CONFIG:
1666 if (conn->type == AMP_LINK) {
1667 struct hci_cp_disconn_phy_link cp;
1668
1669 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1670 cp.reason = reason;
1671 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1672 &cp);
1673 } else {
1674 struct hci_cp_disconnect dc;
1675
1676 dc.handle = cpu_to_le16(conn->handle);
1677 dc.reason = reason;
1678 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1679 }
1680
1681 conn->state = BT_DISCONN;
1682
1683 break;
1684 case BT_CONNECT:
1685 if (conn->type == LE_LINK) {
1686 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1687 break;
1688 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1689 0, NULL);
1690 } else if (conn->type == ACL_LINK) {
1691 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1692 break;
1693 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1694 6, &conn->dst);
1695 }
1696 break;
1697 case BT_CONNECT2:
1698 if (conn->type == ACL_LINK) {
1699 struct hci_cp_reject_conn_req rej;
1700
1701 bacpy(&rej.bdaddr, &conn->dst);
1702 rej.reason = reason;
1703
1704 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1705 sizeof(rej), &rej);
1706 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1707 struct hci_cp_reject_sync_conn_req rej;
1708
1709 bacpy(&rej.bdaddr, &conn->dst);
1710
1711 /* SCO rejection has its own limited set of
1712 * allowed error values (0x0D-0x0F) which isn't
1713 * compatible with most values passed to this
1714 * function. To be safe hard-code one of the
1715 * values that's suitable for SCO.
1716 */
1717 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1718
1719 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1720 sizeof(rej), &rej);
1721 }
1722 break;
1723 default:
1724 conn->state = BT_CLOSED;
1725 break;
1726 }
1727}
1728
1729static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1730{
1731 if (status)
1732 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1733}
1734
1735int hci_abort_conn(struct hci_conn *conn, u8 reason)
1736{
1737 struct hci_request req;
1738 int err;
1739
1740 hci_req_init(&req, conn->hdev);
1741
1742 __hci_abort_conn(&req, conn, reason);
1743
1744 err = hci_req_run(&req, abort_conn_complete);
1745 if (err && err != -ENODATA) {
1746 BT_ERR("Failed to run HCI request: err %d", err);
1747 return err;
1748 }
1749
1750 return 0;
1751}
1752
1753static int update_bg_scan(struct hci_request *req, unsigned long opt)
1754{
1755 hci_dev_lock(req->hdev);
1756 __hci_update_background_scan(req);
1757 hci_dev_unlock(req->hdev);
1758 return 0;
1759}
1760
1761static void bg_scan_update(struct work_struct *work)
1762{
1763 struct hci_dev *hdev = container_of(work, struct hci_dev,
1764 bg_scan_update);
1765 struct hci_conn *conn;
1766 u8 status;
1767 int err;
1768
1769 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1770 if (!err)
1771 return;
1772
1773 hci_dev_lock(hdev);
1774
1775 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1776 if (conn)
1777 hci_le_conn_failed(conn, status);
1778
1779 hci_dev_unlock(hdev);
1780}
1781
1782static int le_scan_disable(struct hci_request *req, unsigned long opt)
1783{
1784 hci_req_add_le_scan_disable(req);
1785 return 0;
1786}
1787
1788static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1789{
1790 u8 length = opt;
1791 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1792 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1793 struct hci_cp_inquiry cp;
1794
1795 BT_DBG("%s", req->hdev->name);
1796
1797 hci_dev_lock(req->hdev);
1798 hci_inquiry_cache_flush(req->hdev);
1799 hci_dev_unlock(req->hdev);
1800
1801 memset(&cp, 0, sizeof(cp));
1802
1803 if (req->hdev->discovery.limited)
1804 memcpy(&cp.lap, liac, sizeof(cp.lap));
1805 else
1806 memcpy(&cp.lap, giac, sizeof(cp.lap));
1807
1808 cp.length = length;
1809
1810 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1811
1812 return 0;
1813}
1814
1815static void le_scan_disable_work(struct work_struct *work)
1816{
1817 struct hci_dev *hdev = container_of(work, struct hci_dev,
1818 le_scan_disable.work);
1819 u8 status;
1820
1821 BT_DBG("%s", hdev->name);
1822
1823 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1824 return;
1825
1826 cancel_delayed_work(&hdev->le_scan_restart);
1827
1828 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1829 if (status) {
1830 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1831 return;
1832 }
1833
1834 hdev->discovery.scan_start = 0;
1835
1836 /* If we were running LE only scan, change discovery state. If
1837 * we were running both LE and BR/EDR inquiry simultaneously,
1838 * and BR/EDR inquiry is already finished, stop discovery,
1839 * otherwise BR/EDR inquiry will stop discovery when finished.
1840 * If we will resolve remote device name, do not change
1841 * discovery state.
1842 */
1843
1844 if (hdev->discovery.type == DISCOV_TYPE_LE)
1845 goto discov_stopped;
1846
1847 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1848 return;
1849
1850 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1851 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1852 hdev->discovery.state != DISCOVERY_RESOLVING)
1853 goto discov_stopped;
1854
1855 return;
1856 }
1857
1858 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1859 HCI_CMD_TIMEOUT, &status);
1860 if (status) {
1861 BT_ERR("Inquiry failed: status 0x%02x", status);
1862 goto discov_stopped;
1863 }
1864
1865 return;
1866
1867discov_stopped:
1868 hci_dev_lock(hdev);
1869 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1870 hci_dev_unlock(hdev);
1871}
1872
1873static int le_scan_restart(struct hci_request *req, unsigned long opt)
1874{
1875 struct hci_dev *hdev = req->hdev;
1876 struct hci_cp_le_set_scan_enable cp;
1877
1878 /* If controller is not scanning we are done. */
1879 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1880 return 0;
1881
1882 hci_req_add_le_scan_disable(req);
1883
1884 memset(&cp, 0, sizeof(cp));
1885 cp.enable = LE_SCAN_ENABLE;
1886 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1887 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1888
1889 return 0;
1890}
1891
1892static void le_scan_restart_work(struct work_struct *work)
1893{
1894 struct hci_dev *hdev = container_of(work, struct hci_dev,
1895 le_scan_restart.work);
1896 unsigned long timeout, duration, scan_start, now;
1897 u8 status;
1898
1899 BT_DBG("%s", hdev->name);
1900
1901 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1902 if (status) {
1903 BT_ERR("Failed to restart LE scan: status %d", status);
1904 return;
1905 }
1906
1907 hci_dev_lock(hdev);
1908
1909 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1910 !hdev->discovery.scan_start)
1911 goto unlock;
1912
1913 /* When the scan was started, hdev->le_scan_disable has been queued
1914 * after duration from scan_start. During scan restart this job
1915 * has been canceled, and we need to queue it again after proper
1916 * timeout, to make sure that scan does not run indefinitely.
1917 */
1918 duration = hdev->discovery.scan_duration;
1919 scan_start = hdev->discovery.scan_start;
1920 now = jiffies;
1921 if (now - scan_start <= duration) {
1922 int elapsed;
1923
1924 if (now >= scan_start)
1925 elapsed = now - scan_start;
1926 else
1927 elapsed = ULONG_MAX - scan_start + now;
1928
1929 timeout = duration - elapsed;
1930 } else {
1931 timeout = 0;
1932 }
1933
1934 queue_delayed_work(hdev->req_workqueue,
1935 &hdev->le_scan_disable, timeout);
1936
1937unlock:
1938 hci_dev_unlock(hdev);
1939}
1940
1941static void disable_advertising(struct hci_request *req)
1942{
1943 u8 enable = 0x00;
1944
1945 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1946}
1947
1948static int active_scan(struct hci_request *req, unsigned long opt)
1949{
1950 uint16_t interval = opt;
1951 struct hci_dev *hdev = req->hdev;
1952 struct hci_cp_le_set_scan_param param_cp;
1953 struct hci_cp_le_set_scan_enable enable_cp;
1954 u8 own_addr_type;
1955 int err;
1956
1957 BT_DBG("%s", hdev->name);
1958
1959 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1960 hci_dev_lock(hdev);
1961
1962 /* Don't let discovery abort an outgoing connection attempt
1963 * that's using directed advertising.
1964 */
1965 if (hci_lookup_le_connect(hdev)) {
1966 hci_dev_unlock(hdev);
1967 return -EBUSY;
1968 }
1969
1970 cancel_adv_timeout(hdev);
1971 hci_dev_unlock(hdev);
1972
1973 disable_advertising(req);
1974 }
1975
1976 /* If controller is scanning, it means the background scanning is
1977 * running. Thus, we should temporarily stop it in order to set the
1978 * discovery scanning parameters.
1979 */
1980 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1981 hci_req_add_le_scan_disable(req);
1982
1983 /* All active scans will be done with either a resolvable private
1984 * address (when privacy feature has been enabled) or non-resolvable
1985 * private address.
1986 */
1987 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
1988 &own_addr_type);
1989 if (err < 0)
1990 own_addr_type = ADDR_LE_DEV_PUBLIC;
1991
1992 memset(¶m_cp, 0, sizeof(param_cp));
1993 param_cp.type = LE_SCAN_ACTIVE;
1994 param_cp.interval = cpu_to_le16(interval);
1995 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1996 param_cp.own_address_type = own_addr_type;
1997
1998 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1999 ¶m_cp);
2000
2001 memset(&enable_cp, 0, sizeof(enable_cp));
2002 enable_cp.enable = LE_SCAN_ENABLE;
2003 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2004
2005 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2006 &enable_cp);
2007
2008 return 0;
2009}
2010
2011static int interleaved_discov(struct hci_request *req, unsigned long opt)
2012{
2013 int err;
2014
2015 BT_DBG("%s", req->hdev->name);
2016
2017 err = active_scan(req, opt);
2018 if (err)
2019 return err;
2020
2021 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2022}
2023
2024static void start_discovery(struct hci_dev *hdev, u8 *status)
2025{
2026 unsigned long timeout;
2027
2028 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2029
2030 switch (hdev->discovery.type) {
2031 case DISCOV_TYPE_BREDR:
2032 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2033 hci_req_sync(hdev, bredr_inquiry,
2034 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2035 status);
2036 return;
2037 case DISCOV_TYPE_INTERLEAVED:
2038 /* When running simultaneous discovery, the LE scanning time
2039 * should occupy the whole discovery time sine BR/EDR inquiry
2040 * and LE scanning are scheduled by the controller.
2041 *
2042 * For interleaving discovery in comparison, BR/EDR inquiry
2043 * and LE scanning are done sequentially with separate
2044 * timeouts.
2045 */
2046 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2047 &hdev->quirks)) {
2048 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2049 /* During simultaneous discovery, we double LE scan
2050 * interval. We must leave some time for the controller
2051 * to do BR/EDR inquiry.
2052 */
2053 hci_req_sync(hdev, interleaved_discov,
2054 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2055 status);
2056 break;
2057 }
2058
2059 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2060 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2061 HCI_CMD_TIMEOUT, status);
2062 break;
2063 case DISCOV_TYPE_LE:
2064 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2065 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2066 HCI_CMD_TIMEOUT, status);
2067 break;
2068 default:
2069 *status = HCI_ERROR_UNSPECIFIED;
2070 return;
2071 }
2072
2073 if (*status)
2074 return;
2075
2076 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2077
2078 /* When service discovery is used and the controller has a
2079 * strict duplicate filter, it is important to remember the
2080 * start and duration of the scan. This is required for
2081 * restarting scanning during the discovery phase.
2082 */
2083 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2084 hdev->discovery.result_filtering) {
2085 hdev->discovery.scan_start = jiffies;
2086 hdev->discovery.scan_duration = timeout;
2087 }
2088
2089 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2090 timeout);
2091}
2092
2093bool hci_req_stop_discovery(struct hci_request *req)
2094{
2095 struct hci_dev *hdev = req->hdev;
2096 struct discovery_state *d = &hdev->discovery;
2097 struct hci_cp_remote_name_req_cancel cp;
2098 struct inquiry_entry *e;
2099 bool ret = false;
2100
2101 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2102
2103 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2104 if (test_bit(HCI_INQUIRY, &hdev->flags))
2105 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2106
2107 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2108 cancel_delayed_work(&hdev->le_scan_disable);
2109 hci_req_add_le_scan_disable(req);
2110 }
2111
2112 ret = true;
2113 } else {
2114 /* Passive scanning */
2115 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2116 hci_req_add_le_scan_disable(req);
2117 ret = true;
2118 }
2119 }
2120
2121 /* No further actions needed for LE-only discovery */
2122 if (d->type == DISCOV_TYPE_LE)
2123 return ret;
2124
2125 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2126 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2127 NAME_PENDING);
2128 if (!e)
2129 return ret;
2130
2131 bacpy(&cp.bdaddr, &e->data.bdaddr);
2132 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2133 &cp);
2134 ret = true;
2135 }
2136
2137 return ret;
2138}
2139
2140static int stop_discovery(struct hci_request *req, unsigned long opt)
2141{
2142 hci_dev_lock(req->hdev);
2143 hci_req_stop_discovery(req);
2144 hci_dev_unlock(req->hdev);
2145
2146 return 0;
2147}
2148
2149static void discov_update(struct work_struct *work)
2150{
2151 struct hci_dev *hdev = container_of(work, struct hci_dev,
2152 discov_update);
2153 u8 status = 0;
2154
2155 switch (hdev->discovery.state) {
2156 case DISCOVERY_STARTING:
2157 start_discovery(hdev, &status);
2158 mgmt_start_discovery_complete(hdev, status);
2159 if (status)
2160 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2161 else
2162 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2163 break;
2164 case DISCOVERY_STOPPING:
2165 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2166 mgmt_stop_discovery_complete(hdev, status);
2167 if (!status)
2168 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2169 break;
2170 case DISCOVERY_STOPPED:
2171 default:
2172 return;
2173 }
2174}
2175
2176static void discov_off(struct work_struct *work)
2177{
2178 struct hci_dev *hdev = container_of(work, struct hci_dev,
2179 discov_off.work);
2180
2181 BT_DBG("%s", hdev->name);
2182
2183 hci_dev_lock(hdev);
2184
2185 /* When discoverable timeout triggers, then just make sure
2186 * the limited discoverable flag is cleared. Even in the case
2187 * of a timeout triggered from general discoverable, it is
2188 * safe to unconditionally clear the flag.
2189 */
2190 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2191 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2192 hdev->discov_timeout = 0;
2193
2194 hci_dev_unlock(hdev);
2195
2196 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2197 mgmt_new_settings(hdev);
2198}
2199
2200static int powered_update_hci(struct hci_request *req, unsigned long opt)
2201{
2202 struct hci_dev *hdev = req->hdev;
2203 u8 link_sec;
2204
2205 hci_dev_lock(hdev);
2206
2207 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2208 !lmp_host_ssp_capable(hdev)) {
2209 u8 mode = 0x01;
2210
2211 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2212
2213 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2214 u8 support = 0x01;
2215
2216 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2217 sizeof(support), &support);
2218 }
2219 }
2220
2221 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2222 lmp_bredr_capable(hdev)) {
2223 struct hci_cp_write_le_host_supported cp;
2224
2225 cp.le = 0x01;
2226 cp.simul = 0x00;
2227
2228 /* Check first if we already have the right
2229 * host state (host features set)
2230 */
2231 if (cp.le != lmp_host_le_capable(hdev) ||
2232 cp.simul != lmp_host_le_br_capable(hdev))
2233 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2234 sizeof(cp), &cp);
2235 }
2236
2237 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2238 /* Make sure the controller has a good default for
2239 * advertising data. This also applies to the case
2240 * where BR/EDR was toggled during the AUTO_OFF phase.
2241 */
2242 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2243 list_empty(&hdev->adv_instances)) {
2244 __hci_req_update_adv_data(req, 0x00);
2245 __hci_req_update_scan_rsp_data(req, 0x00);
2246
2247 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2248 __hci_req_enable_advertising(req);
2249 } else if (!list_empty(&hdev->adv_instances)) {
2250 struct adv_info *adv_instance;
2251
2252 adv_instance = list_first_entry(&hdev->adv_instances,
2253 struct adv_info, list);
2254 __hci_req_schedule_adv_instance(req,
2255 adv_instance->instance,
2256 true);
2257 }
2258 }
2259
2260 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2261 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2262 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2263 sizeof(link_sec), &link_sec);
2264
2265 if (lmp_bredr_capable(hdev)) {
2266 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2267 __hci_req_write_fast_connectable(req, true);
2268 else
2269 __hci_req_write_fast_connectable(req, false);
2270 __hci_req_update_scan(req);
2271 __hci_req_update_class(req);
2272 __hci_req_update_name(req);
2273 __hci_req_update_eir(req);
2274 }
2275
2276 hci_dev_unlock(hdev);
2277 return 0;
2278}
2279
2280int __hci_req_hci_power_on(struct hci_dev *hdev)
2281{
2282 /* Register the available SMP channels (BR/EDR and LE) only when
2283 * successfully powering on the controller. This late
2284 * registration is required so that LE SMP can clearly decide if
2285 * the public address or static address is used.
2286 */
2287 smp_register(hdev);
2288
2289 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2290 NULL);
2291}
2292
2293void hci_request_setup(struct hci_dev *hdev)
2294{
2295 INIT_WORK(&hdev->discov_update, discov_update);
2296 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2297 INIT_WORK(&hdev->scan_update, scan_update_work);
2298 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2299 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2300 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2301 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2302 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2303 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2304}
2305
2306void hci_request_cancel_all(struct hci_dev *hdev)
2307{
2308 hci_req_sync_cancel(hdev, ENODEV);
2309
2310 cancel_work_sync(&hdev->discov_update);
2311 cancel_work_sync(&hdev->bg_scan_update);
2312 cancel_work_sync(&hdev->scan_update);
2313 cancel_work_sync(&hdev->connectable_update);
2314 cancel_work_sync(&hdev->discoverable_update);
2315 cancel_delayed_work_sync(&hdev->discov_off);
2316 cancel_delayed_work_sync(&hdev->le_scan_disable);
2317 cancel_delayed_work_sync(&hdev->le_scan_restart);
2318
2319 if (hdev->adv_instance_timeout) {
2320 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2321 hdev->adv_instance_timeout = 0;
2322 }
2323}