Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2014 Intel Corporation
   5
   6   This program is free software; you can redistribute it and/or modify
   7   it under the terms of the GNU General Public License version 2 as
   8   published by the Free Software Foundation;
   9
  10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  11   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  12   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  13   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  14   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  15   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18
  19   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  20   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  21   SOFTWARE IS DISCLAIMED.
  22*/
  23
  24#include <asm/unaligned.h>
  25
  26#include <net/bluetooth/bluetooth.h>
  27#include <net/bluetooth/hci_core.h>
  28#include <net/bluetooth/mgmt.h>
  29
  30#include "smp.h"
  31#include "hci_request.h"
 
  32
  33#define HCI_REQ_DONE	  0
  34#define HCI_REQ_PEND	  1
  35#define HCI_REQ_CANCELED  2
  36
  37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
  38{
  39	skb_queue_head_init(&req->cmd_q);
  40	req->hdev = hdev;
  41	req->err = 0;
  42}
  43
 
 
 
 
 
 
 
 
 
 
  44static int req_run(struct hci_request *req, hci_req_complete_t complete,
  45		   hci_req_complete_skb_t complete_skb)
  46{
  47	struct hci_dev *hdev = req->hdev;
  48	struct sk_buff *skb;
  49	unsigned long flags;
  50
  51	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
  52
  53	/* If an error occurred during request building, remove all HCI
  54	 * commands queued on the HCI request queue.
  55	 */
  56	if (req->err) {
  57		skb_queue_purge(&req->cmd_q);
  58		return req->err;
  59	}
  60
  61	/* Do not allow empty requests */
  62	if (skb_queue_empty(&req->cmd_q))
  63		return -ENODATA;
  64
  65	skb = skb_peek_tail(&req->cmd_q);
  66	if (complete) {
  67		bt_cb(skb)->hci.req_complete = complete;
  68	} else if (complete_skb) {
  69		bt_cb(skb)->hci.req_complete_skb = complete_skb;
  70		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
  71	}
  72
  73	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
  74	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
  75	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
  76
  77	queue_work(hdev->workqueue, &hdev->cmd_work);
  78
  79	return 0;
  80}
  81
  82int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
  83{
  84	return req_run(req, complete, NULL);
  85}
  86
  87int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
  88{
  89	return req_run(req, NULL, complete);
  90}
  91
  92static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
  93				  struct sk_buff *skb)
  94{
  95	BT_DBG("%s result 0x%2.2x", hdev->name, result);
  96
  97	if (hdev->req_status == HCI_REQ_PEND) {
  98		hdev->req_result = result;
  99		hdev->req_status = HCI_REQ_DONE;
 100		if (skb)
 101			hdev->req_skb = skb_get(skb);
 102		wake_up_interruptible(&hdev->req_wait_q);
 103	}
 104}
 105
 106void hci_req_sync_cancel(struct hci_dev *hdev, int err)
 107{
 108	BT_DBG("%s err 0x%2.2x", hdev->name, err);
 109
 110	if (hdev->req_status == HCI_REQ_PEND) {
 111		hdev->req_result = err;
 112		hdev->req_status = HCI_REQ_CANCELED;
 113		wake_up_interruptible(&hdev->req_wait_q);
 114	}
 115}
 116
 117struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
 118				  const void *param, u8 event, u32 timeout)
 119{
 120	DECLARE_WAITQUEUE(wait, current);
 121	struct hci_request req;
 122	struct sk_buff *skb;
 123	int err = 0;
 124
 125	BT_DBG("%s", hdev->name);
 126
 127	hci_req_init(&req, hdev);
 128
 129	hci_req_add_ev(&req, opcode, plen, param, event);
 130
 131	hdev->req_status = HCI_REQ_PEND;
 132
 133	add_wait_queue(&hdev->req_wait_q, &wait);
 134	set_current_state(TASK_INTERRUPTIBLE);
 135
 136	err = hci_req_run_skb(&req, hci_req_sync_complete);
 137	if (err < 0) {
 138		remove_wait_queue(&hdev->req_wait_q, &wait);
 139		set_current_state(TASK_RUNNING);
 140		return ERR_PTR(err);
 141	}
 142
 143	schedule_timeout(timeout);
 144
 145	remove_wait_queue(&hdev->req_wait_q, &wait);
 
 146
 147	if (signal_pending(current))
 148		return ERR_PTR(-EINTR);
 149
 150	switch (hdev->req_status) {
 151	case HCI_REQ_DONE:
 152		err = -bt_to_errno(hdev->req_result);
 153		break;
 154
 155	case HCI_REQ_CANCELED:
 156		err = -hdev->req_result;
 157		break;
 158
 159	default:
 160		err = -ETIMEDOUT;
 161		break;
 162	}
 163
 164	hdev->req_status = hdev->req_result = 0;
 165	skb = hdev->req_skb;
 166	hdev->req_skb = NULL;
 167
 168	BT_DBG("%s end: err %d", hdev->name, err);
 169
 170	if (err < 0) {
 171		kfree_skb(skb);
 172		return ERR_PTR(err);
 173	}
 174
 175	if (!skb)
 176		return ERR_PTR(-ENODATA);
 177
 178	return skb;
 179}
 180EXPORT_SYMBOL(__hci_cmd_sync_ev);
 181
 182struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
 183			       const void *param, u32 timeout)
 184{
 185	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
 186}
 187EXPORT_SYMBOL(__hci_cmd_sync);
 188
 189/* Execute request and wait for completion. */
 190int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
 191						     unsigned long opt),
 192		   unsigned long opt, u32 timeout, u8 *hci_status)
 193{
 194	struct hci_request req;
 195	DECLARE_WAITQUEUE(wait, current);
 196	int err = 0;
 197
 198	BT_DBG("%s start", hdev->name);
 199
 200	hci_req_init(&req, hdev);
 201
 202	hdev->req_status = HCI_REQ_PEND;
 203
 204	err = func(&req, opt);
 205	if (err) {
 206		if (hci_status)
 207			*hci_status = HCI_ERROR_UNSPECIFIED;
 208		return err;
 209	}
 210
 211	add_wait_queue(&hdev->req_wait_q, &wait);
 212	set_current_state(TASK_INTERRUPTIBLE);
 213
 214	err = hci_req_run_skb(&req, hci_req_sync_complete);
 215	if (err < 0) {
 216		hdev->req_status = 0;
 217
 218		remove_wait_queue(&hdev->req_wait_q, &wait);
 219		set_current_state(TASK_RUNNING);
 220
 221		/* ENODATA means the HCI request command queue is empty.
 222		 * This can happen when a request with conditionals doesn't
 223		 * trigger any commands to be sent. This is normal behavior
 224		 * and should not trigger an error return.
 225		 */
 226		if (err == -ENODATA) {
 227			if (hci_status)
 228				*hci_status = 0;
 229			return 0;
 230		}
 231
 232		if (hci_status)
 233			*hci_status = HCI_ERROR_UNSPECIFIED;
 234
 235		return err;
 236	}
 237
 238	schedule_timeout(timeout);
 239
 240	remove_wait_queue(&hdev->req_wait_q, &wait);
 241
 242	if (signal_pending(current))
 243		return -EINTR;
 244
 245	switch (hdev->req_status) {
 246	case HCI_REQ_DONE:
 247		err = -bt_to_errno(hdev->req_result);
 248		if (hci_status)
 249			*hci_status = hdev->req_result;
 250		break;
 251
 252	case HCI_REQ_CANCELED:
 253		err = -hdev->req_result;
 254		if (hci_status)
 255			*hci_status = HCI_ERROR_UNSPECIFIED;
 256		break;
 257
 258	default:
 259		err = -ETIMEDOUT;
 260		if (hci_status)
 261			*hci_status = HCI_ERROR_UNSPECIFIED;
 262		break;
 263	}
 264
 
 
 265	hdev->req_status = hdev->req_result = 0;
 266
 267	BT_DBG("%s end: err %d", hdev->name, err);
 268
 269	return err;
 270}
 271
 272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
 273						  unsigned long opt),
 274		 unsigned long opt, u32 timeout, u8 *hci_status)
 275{
 276	int ret;
 277
 278	if (!test_bit(HCI_UP, &hdev->flags))
 279		return -ENETDOWN;
 280
 281	/* Serialize all requests */
 282	hci_req_sync_lock(hdev);
 283	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
 
 
 
 
 
 
 
 284	hci_req_sync_unlock(hdev);
 285
 286	return ret;
 287}
 288
 289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
 290				const void *param)
 291{
 292	int len = HCI_COMMAND_HDR_SIZE + plen;
 293	struct hci_command_hdr *hdr;
 294	struct sk_buff *skb;
 295
 296	skb = bt_skb_alloc(len, GFP_ATOMIC);
 297	if (!skb)
 298		return NULL;
 299
 300	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
 301	hdr->opcode = cpu_to_le16(opcode);
 302	hdr->plen   = plen;
 303
 304	if (plen)
 305		memcpy(skb_put(skb, plen), param, plen);
 306
 307	BT_DBG("skb len %d", skb->len);
 308
 309	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
 310	hci_skb_opcode(skb) = opcode;
 311
 312	return skb;
 313}
 314
 315/* Queue a command to an asynchronous HCI request */
 316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 317		    const void *param, u8 event)
 318{
 319	struct hci_dev *hdev = req->hdev;
 320	struct sk_buff *skb;
 321
 322	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
 323
 324	/* If an error occurred during request building, there is no point in
 325	 * queueing the HCI command. We can simply return.
 326	 */
 327	if (req->err)
 328		return;
 329
 330	skb = hci_prepare_cmd(hdev, opcode, plen, param);
 331	if (!skb) {
 332		BT_ERR("%s no memory for command (opcode 0x%4.4x)",
 333		       hdev->name, opcode);
 334		req->err = -ENOMEM;
 335		return;
 336	}
 337
 338	if (skb_queue_empty(&req->cmd_q))
 339		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
 340
 341	bt_cb(skb)->hci.req_event = event;
 342
 343	skb_queue_tail(&req->cmd_q, skb);
 344}
 345
 346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
 347		 const void *param)
 348{
 349	hci_req_add_ev(req, opcode, plen, param, 0);
 350}
 351
 352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
 353{
 354	struct hci_dev *hdev = req->hdev;
 355	struct hci_cp_write_page_scan_activity acp;
 356	u8 type;
 357
 358	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 359		return;
 360
 361	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 362		return;
 363
 364	if (enable) {
 365		type = PAGE_SCAN_TYPE_INTERLACED;
 366
 367		/* 160 msec page scan interval */
 368		acp.interval = cpu_to_le16(0x0100);
 369	} else {
 370		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
 371
 372		/* default 1.28 sec page scan */
 373		acp.interval = cpu_to_le16(0x0800);
 374	}
 375
 376	acp.window = cpu_to_le16(0x0012);
 377
 378	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
 379	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
 380		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
 381			    sizeof(acp), &acp);
 382
 383	if (hdev->page_scan_type != type)
 384		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 385}
 386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387/* This function controls the background scanning based on hdev->pend_le_conns
 388 * list. If there are pending LE connection we start the background scanning,
 389 * otherwise we stop it.
 390 *
 391 * This function requires the caller holds hdev->lock.
 392 */
 393static void __hci_update_background_scan(struct hci_request *req)
 394{
 395	struct hci_dev *hdev = req->hdev;
 396
 397	if (!test_bit(HCI_UP, &hdev->flags) ||
 398	    test_bit(HCI_INIT, &hdev->flags) ||
 399	    hci_dev_test_flag(hdev, HCI_SETUP) ||
 400	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 401	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
 402	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
 403		return;
 404
 405	/* No point in doing scanning if LE support hasn't been enabled */
 406	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 407		return;
 408
 409	/* If discovery is active don't interfere with it */
 410	if (hdev->discovery.state != DISCOVERY_STOPPED)
 411		return;
 412
 413	/* Reset RSSI and UUID filters when starting background scanning
 414	 * since these filters are meant for service discovery only.
 415	 *
 416	 * The Start Discovery and Start Service Discovery operations
 417	 * ensure to set proper values for RSSI threshold and UUID
 418	 * filter list. So it is safe to just reset them here.
 419	 */
 420	hci_discovery_filter_clear(hdev);
 421
 
 
 
 422	if (list_empty(&hdev->pend_le_conns) &&
 423	    list_empty(&hdev->pend_le_reports)) {
 
 424		/* If there is no pending LE connections or devices
 425		 * to be scanned for, we should stop the background
 426		 * scanning.
 427		 */
 428
 429		/* If controller is not scanning we are done. */
 430		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
 431			return;
 432
 433		hci_req_add_le_scan_disable(req);
 434
 435		BT_DBG("%s stopping background scanning", hdev->name);
 436	} else {
 437		/* If there is at least one pending LE connection, we should
 438		 * keep the background scan running.
 439		 */
 440
 441		/* If controller is connecting, we should not start scanning
 442		 * since some controllers are not able to scan and connect at
 443		 * the same time.
 444		 */
 445		if (hci_lookup_le_connect(hdev))
 446			return;
 447
 448		/* If controller is currently scanning, we stop it to ensure we
 449		 * don't miss any advertising (due to duplicates filter).
 450		 */
 451		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
 452			hci_req_add_le_scan_disable(req);
 453
 454		hci_req_add_le_passive_scan(req);
 455
 456		BT_DBG("%s starting background scanning", hdev->name);
 457	}
 458}
 459
 460void __hci_req_update_name(struct hci_request *req)
 461{
 462	struct hci_dev *hdev = req->hdev;
 463	struct hci_cp_write_local_name cp;
 464
 465	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
 466
 467	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
 468}
 469
 470#define PNP_INFO_SVCLASS_ID		0x1200
 471
 472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 473{
 474	u8 *ptr = data, *uuids_start = NULL;
 475	struct bt_uuid *uuid;
 476
 477	if (len < 4)
 478		return ptr;
 479
 480	list_for_each_entry(uuid, &hdev->uuids, list) {
 481		u16 uuid16;
 482
 483		if (uuid->size != 16)
 484			continue;
 485
 486		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
 487		if (uuid16 < 0x1100)
 488			continue;
 489
 490		if (uuid16 == PNP_INFO_SVCLASS_ID)
 491			continue;
 492
 493		if (!uuids_start) {
 494			uuids_start = ptr;
 495			uuids_start[0] = 1;
 496			uuids_start[1] = EIR_UUID16_ALL;
 497			ptr += 2;
 498		}
 499
 500		/* Stop if not enough space to put next UUID */
 501		if ((ptr - data) + sizeof(u16) > len) {
 502			uuids_start[1] = EIR_UUID16_SOME;
 503			break;
 504		}
 505
 506		*ptr++ = (uuid16 & 0x00ff);
 507		*ptr++ = (uuid16 & 0xff00) >> 8;
 508		uuids_start[0] += sizeof(uuid16);
 509	}
 510
 511	return ptr;
 512}
 513
 514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 515{
 516	u8 *ptr = data, *uuids_start = NULL;
 517	struct bt_uuid *uuid;
 518
 519	if (len < 6)
 520		return ptr;
 521
 522	list_for_each_entry(uuid, &hdev->uuids, list) {
 523		if (uuid->size != 32)
 524			continue;
 525
 526		if (!uuids_start) {
 527			uuids_start = ptr;
 528			uuids_start[0] = 1;
 529			uuids_start[1] = EIR_UUID32_ALL;
 530			ptr += 2;
 531		}
 532
 533		/* Stop if not enough space to put next UUID */
 534		if ((ptr - data) + sizeof(u32) > len) {
 535			uuids_start[1] = EIR_UUID32_SOME;
 536			break;
 537		}
 538
 539		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
 540		ptr += sizeof(u32);
 541		uuids_start[0] += sizeof(u32);
 542	}
 543
 544	return ptr;
 545}
 546
 547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 548{
 549	u8 *ptr = data, *uuids_start = NULL;
 550	struct bt_uuid *uuid;
 551
 552	if (len < 18)
 553		return ptr;
 554
 555	list_for_each_entry(uuid, &hdev->uuids, list) {
 556		if (uuid->size != 128)
 557			continue;
 558
 559		if (!uuids_start) {
 560			uuids_start = ptr;
 561			uuids_start[0] = 1;
 562			uuids_start[1] = EIR_UUID128_ALL;
 563			ptr += 2;
 564		}
 565
 566		/* Stop if not enough space to put next UUID */
 567		if ((ptr - data) + 16 > len) {
 568			uuids_start[1] = EIR_UUID128_SOME;
 569			break;
 570		}
 571
 572		memcpy(ptr, uuid->uuid, 16);
 573		ptr += 16;
 574		uuids_start[0] += 16;
 575	}
 576
 577	return ptr;
 578}
 579
 580static void create_eir(struct hci_dev *hdev, u8 *data)
 581{
 582	u8 *ptr = data;
 583	size_t name_len;
 584
 585	name_len = strlen(hdev->dev_name);
 586
 587	if (name_len > 0) {
 588		/* EIR Data type */
 589		if (name_len > 48) {
 590			name_len = 48;
 591			ptr[1] = EIR_NAME_SHORT;
 592		} else
 593			ptr[1] = EIR_NAME_COMPLETE;
 594
 595		/* EIR Data length */
 596		ptr[0] = name_len + 1;
 597
 598		memcpy(ptr + 2, hdev->dev_name, name_len);
 599
 600		ptr += (name_len + 2);
 601	}
 602
 603	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
 604		ptr[0] = 2;
 605		ptr[1] = EIR_TX_POWER;
 606		ptr[2] = (u8) hdev->inq_tx_power;
 607
 608		ptr += 3;
 609	}
 610
 611	if (hdev->devid_source > 0) {
 612		ptr[0] = 9;
 613		ptr[1] = EIR_DEVICE_ID;
 614
 615		put_unaligned_le16(hdev->devid_source, ptr + 2);
 616		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
 617		put_unaligned_le16(hdev->devid_product, ptr + 6);
 618		put_unaligned_le16(hdev->devid_version, ptr + 8);
 619
 620		ptr += 10;
 621	}
 622
 623	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 624	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 625	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 626}
 627
 628void __hci_req_update_eir(struct hci_request *req)
 629{
 630	struct hci_dev *hdev = req->hdev;
 631	struct hci_cp_write_eir cp;
 632
 633	if (!hdev_is_powered(hdev))
 634		return;
 635
 636	if (!lmp_ext_inq_capable(hdev))
 637		return;
 638
 639	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 640		return;
 641
 642	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
 643		return;
 644
 645	memset(&cp, 0, sizeof(cp));
 646
 647	create_eir(hdev, cp.data);
 648
 649	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
 650		return;
 651
 652	memcpy(hdev->eir, cp.data, sizeof(cp.data));
 653
 654	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 655}
 656
 657void hci_req_add_le_scan_disable(struct hci_request *req)
 658{
 659	struct hci_cp_le_set_scan_enable cp;
 660
 661	memset(&cp, 0, sizeof(cp));
 662	cp.enable = LE_SCAN_DISABLE;
 663	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664}
 665
 666static void add_to_white_list(struct hci_request *req,
 667			      struct hci_conn_params *params)
 668{
 669	struct hci_cp_le_add_to_white_list cp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 670
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671	cp.bdaddr_type = params->addr_type;
 672	bacpy(&cp.bdaddr, &params->addr);
 673
 674	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 675}
 676
 677static u8 update_white_list(struct hci_request *req)
 678{
 679	struct hci_dev *hdev = req->hdev;
 680	struct hci_conn_params *params;
 681	struct bdaddr_list *b;
 682	uint8_t white_list_entries = 0;
 
 
 
 
 
 
 
 
 
 
 
 683
 684	/* Go through the current white list programmed into the
 685	 * controller one by one and check if that address is still
 686	 * in the list of pending connections or list of devices to
 687	 * report. If not present in either list, then queue the
 688	 * command to remove it from the controller.
 689	 */
 690	list_for_each_entry(b, &hdev->le_white_list, list) {
 691		/* If the device is neither in pend_le_conns nor
 692		 * pend_le_reports then remove it from the whitelist.
 693		 */
 694		if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
 695					       &b->bdaddr, b->bdaddr_type) &&
 696		    !hci_pend_le_action_lookup(&hdev->pend_le_reports,
 697					       &b->bdaddr, b->bdaddr_type)) {
 698			struct hci_cp_le_del_from_white_list cp;
 699
 700			cp.bdaddr_type = b->bdaddr_type;
 701			bacpy(&cp.bdaddr, &b->bdaddr);
 702
 703			hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
 704				    sizeof(cp), &cp);
 
 
 
 705			continue;
 706		}
 707
 708		if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
 709			/* White list can not be used with RPAs */
 
 
 710			return 0x00;
 711		}
 712
 713		white_list_entries++;
 714	}
 715
 716	/* Since all no longer valid white list entries have been
 717	 * removed, walk through the list of pending connections
 718	 * and ensure that any new device gets programmed into
 719	 * the controller.
 720	 *
 721	 * If the list of the devices is larger than the list of
 722	 * available white list entries in the controller, then
 723	 * just abort and return filer policy value to not use the
 724	 * white list.
 725	 */
 726	list_for_each_entry(params, &hdev->pend_le_conns, action) {
 727		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 728					   &params->addr, params->addr_type))
 729			continue;
 730
 731		if (white_list_entries >= hdev->le_white_list_size) {
 732			/* Select filter policy to accept all advertising */
 733			return 0x00;
 734		}
 735
 736		if (hci_find_irk_by_addr(hdev, &params->addr,
 737					 params->addr_type)) {
 738			/* White list can not be used with RPAs */
 739			return 0x00;
 740		}
 741
 742		white_list_entries++;
 743		add_to_white_list(req, params);
 744	}
 745
 746	/* After adding all new pending connections, walk through
 747	 * the list of pending reports and also add these to the
 748	 * white list if there is still space.
 749	 */
 750	list_for_each_entry(params, &hdev->pend_le_reports, action) {
 751		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 752					   &params->addr, params->addr_type))
 753			continue;
 754
 755		if (white_list_entries >= hdev->le_white_list_size) {
 756			/* Select filter policy to accept all advertising */
 757			return 0x00;
 758		}
 759
 760		if (hci_find_irk_by_addr(hdev, &params->addr,
 761					 params->addr_type)) {
 762			/* White list can not be used with RPAs */
 763			return 0x00;
 764		}
 765
 766		white_list_entries++;
 767		add_to_white_list(req, params);
 768	}
 769
 770	/* Select filter policy to use white list */
 
 
 
 
 
 
 
 
 
 
 771	return 0x01;
 772}
 773
 774static bool scan_use_rpa(struct hci_dev *hdev)
 775{
 776	return hci_dev_test_flag(hdev, HCI_PRIVACY);
 777}
 778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 779void hci_req_add_le_passive_scan(struct hci_request *req)
 780{
 781	struct hci_cp_le_set_scan_param param_cp;
 782	struct hci_cp_le_set_scan_enable enable_cp;
 783	struct hci_dev *hdev = req->hdev;
 784	u8 own_addr_type;
 785	u8 filter_policy;
 
 
 
 
 
 
 
 
 
 
 786
 787	/* Set require_privacy to false since no SCAN_REQ are send
 788	 * during passive scanning. Not using an non-resolvable address
 789	 * here is important so that peer devices using direct
 790	 * advertising with our address will be correctly reported
 791	 * by the controller.
 792	 */
 793	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
 794				      &own_addr_type))
 795		return;
 796
 797	/* Adding or removing entries from the white list must
 
 
 
 
 
 798	 * happen before enabling scanning. The controller does
 799	 * not allow white list modification while scanning.
 800	 */
 801	filter_policy = update_white_list(req);
 802
 803	/* When the controller is using random resolvable addresses and
 804	 * with that having LE privacy enabled, then controllers with
 805	 * Extended Scanner Filter Policies support can now enable support
 806	 * for handling directed advertising.
 807	 *
 808	 * So instead of using filter polices 0x00 (no whitelist)
 809	 * and 0x01 (whitelist enabled) use the new filter policies
 810	 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
 811	 */
 812	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
 813	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
 814		filter_policy |= 0x02;
 815
 816	memset(&param_cp, 0, sizeof(param_cp));
 817	param_cp.type = LE_SCAN_PASSIVE;
 818	param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
 819	param_cp.window = cpu_to_le16(hdev->le_scan_window);
 820	param_cp.own_address_type = own_addr_type;
 821	param_cp.filter_policy = filter_policy;
 822	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
 823		    &param_cp);
 824
 825	memset(&enable_cp, 0, sizeof(enable_cp));
 826	enable_cp.enable = LE_SCAN_ENABLE;
 827	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
 828	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
 829		    &enable_cp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 830}
 831
 832static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
 833{
 834	u8 instance = hdev->cur_adv_instance;
 835	struct adv_info *adv_instance;
 836
 837	/* Ignore instance 0 */
 838	if (instance == 0x00)
 839		return 0;
 840
 841	adv_instance = hci_find_adv_instance(hdev, instance);
 842	if (!adv_instance)
 843		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 844
 845	/* TODO: Take into account the "appearance" and "local-name" flags here.
 846	 * These are currently being ignored as they are not supported.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 847	 */
 848	return adv_instance->scan_rsp_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 849}
 850
 851void __hci_req_disable_advertising(struct hci_request *req)
 852{
 853	u8 enable = 0x00;
 
 854
 855	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
 
 
 
 
 856}
 857
 858static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
 859{
 860	u32 flags;
 861	struct adv_info *adv_instance;
 862
 863	if (instance == 0x00) {
 864		/* Instance 0 always manages the "Tx Power" and "Flags"
 865		 * fields
 866		 */
 867		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
 868
 869		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
 870		 * corresponds to the "connectable" instance flag.
 871		 */
 872		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
 873			flags |= MGMT_ADV_FLAG_CONNECTABLE;
 874
 875		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
 876			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
 877		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
 878			flags |= MGMT_ADV_FLAG_DISCOV;
 879
 880		return flags;
 881	}
 882
 883	adv_instance = hci_find_adv_instance(hdev, instance);
 884
 885	/* Return 0 when we got an invalid instance identifier. */
 886	if (!adv_instance)
 887		return 0;
 888
 889	return adv_instance->flags;
 890}
 891
 892static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
 893{
 894	/* If privacy is not enabled don't use RPA */
 895	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
 896		return false;
 897
 898	/* If basic privacy mode is enabled use RPA */
 899	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
 900		return true;
 901
 902	/* If limited privacy mode is enabled don't use RPA if we're
 903	 * both discoverable and bondable.
 904	 */
 905	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
 906	    hci_dev_test_flag(hdev, HCI_BONDABLE))
 907		return false;
 908
 909	/* We're neither bondable nor discoverable in the limited
 910	 * privacy mode, therefore use RPA.
 911	 */
 912	return true;
 913}
 914
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 915void __hci_req_enable_advertising(struct hci_request *req)
 916{
 917	struct hci_dev *hdev = req->hdev;
 
 918	struct hci_cp_le_set_adv_param cp;
 919	u8 own_addr_type, enable = 0x01;
 920	bool connectable;
 
 921	u32 flags;
 922
 923	if (hci_conn_num(hdev, LE_LINK) > 0)
 
 
 
 
 
 
 
 
 
 924		return;
 925
 926	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
 927		__hci_req_disable_advertising(req);
 928
 929	/* Clear the HCI_LE_ADV bit temporarily so that the
 930	 * hci_update_random_address knows that it's safe to go ahead
 931	 * and write a new random address. The flag will be set back on
 932	 * as soon as the SET_ADV_ENABLE HCI command completes.
 933	 */
 934	hci_dev_clear_flag(hdev, HCI_LE_ADV);
 935
 936	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
 937
 938	/* If the "connectable" instance flag was not set, then choose between
 939	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
 940	 */
 941	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
 942		      mgmt_get_connectable(hdev);
 943
 944	/* Set require_privacy to true only when non-connectable
 945	 * advertising is used. In that case it is fine to use a
 946	 * non-resolvable private address.
 947	 */
 948	if (hci_update_random_address(req, !connectable,
 949				      adv_use_rpa(hdev, flags),
 950				      &own_addr_type) < 0)
 951		return;
 952
 953	memset(&cp, 0, sizeof(cp));
 954	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
 955	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
 956
 957	if (connectable)
 
 
 
 
 
 
 
 
 958		cp.type = LE_ADV_IND;
 959	else if (get_cur_adv_instance_scan_rsp_len(hdev))
 960		cp.type = LE_ADV_SCAN_IND;
 961	else
 962		cp.type = LE_ADV_NONCONN_IND;
 
 963
 
 
 
 
 
 
 
 
 
 964	cp.own_address_type = own_addr_type;
 965	cp.channel_map = hdev->le_adv_channel_map;
 966
 967	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
 968
 969	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
 970}
 971
 972static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
 973{
 974	u8 ad_len = 0;
 975	size_t name_len;
 976
 977	name_len = strlen(hdev->dev_name);
 978	if (name_len > 0) {
 979		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
 980
 981		if (name_len > max_len) {
 982			name_len = max_len;
 983			ptr[1] = EIR_NAME_SHORT;
 984		} else
 985			ptr[1] = EIR_NAME_COMPLETE;
 986
 987		ptr[0] = name_len + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 988
 989		memcpy(ptr + 2, hdev->dev_name, name_len);
 
 990
 991		ad_len += (name_len + 2);
 992		ptr += (name_len + 2);
 993	}
 994
 995	return ad_len;
 996}
 997
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
 999					u8 *ptr)
1000{
1001	struct adv_info *adv_instance;
 
 
1002
1003	adv_instance = hci_find_adv_instance(hdev, instance);
1004	if (!adv_instance)
1005		return 0;
1006
1007	/* TODO: Set the appropriate entries based on advertising instance flags
1008	 * here once flags other than 0 are supported.
1009	 */
1010	memcpy(ptr, adv_instance->scan_rsp_data,
 
 
1011	       adv_instance->scan_rsp_len);
1012
1013	return adv_instance->scan_rsp_len;
 
 
 
 
 
1014}
1015
1016void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1017{
1018	struct hci_dev *hdev = req->hdev;
1019	struct hci_cp_le_set_scan_rsp_data cp;
1020	u8 len;
1021
1022	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1023		return;
1024
1025	memset(&cp, 0, sizeof(cp));
 
 
 
 
 
 
 
 
 
 
 
 
1026
1027	if (instance)
1028		len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1029	else
1030		len = create_default_scan_rsp_data(hdev, cp.data);
1031
1032	if (hdev->scan_rsp_data_len == len &&
1033	    !memcmp(cp.data, hdev->scan_rsp_data, len))
1034		return;
1035
1036	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1037	hdev->scan_rsp_data_len = len;
 
 
1038
1039	cp.length = len;
 
 
 
1040
1041	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1042}
1043
1044static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1045{
1046	struct adv_info *adv_instance = NULL;
1047	u8 ad_len = 0, flags = 0;
1048	u32 instance_flags;
1049
1050	/* Return 0 when the current instance identifier is invalid. */
1051	if (instance) {
1052		adv_instance = hci_find_adv_instance(hdev, instance);
1053		if (!adv_instance)
1054			return 0;
1055	}
1056
1057	instance_flags = get_adv_instance_flags(hdev, instance);
1058
 
 
 
 
 
 
 
 
1059	/* The Add Advertising command allows userspace to set both the general
1060	 * and limited discoverable flags.
1061	 */
1062	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1063		flags |= LE_AD_GENERAL;
1064
1065	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1066		flags |= LE_AD_LIMITED;
1067
 
 
 
1068	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1069		/* If a discovery flag wasn't provided, simply use the global
1070		 * settings.
1071		 */
1072		if (!flags)
1073			flags |= mgmt_get_adv_discov_flags(hdev);
1074
1075		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1076			flags |= LE_AD_NO_BREDR;
1077
1078		/* If flags would still be empty, then there is no need to
1079		 * include the "Flags" AD field".
1080		 */
1081		if (flags) {
1082			ptr[0] = 0x02;
1083			ptr[1] = EIR_FLAGS;
1084			ptr[2] = flags;
1085
1086			ad_len += 3;
1087			ptr += 3;
1088		}
1089	}
1090
 
1091	if (adv_instance) {
1092		memcpy(ptr, adv_instance->adv_data,
1093		       adv_instance->adv_data_len);
1094		ad_len += adv_instance->adv_data_len;
1095		ptr += adv_instance->adv_data_len;
1096	}
1097
1098	/* Provide Tx Power only if we can provide a valid value for it */
1099	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1100	    (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1101		ptr[0] = 0x02;
1102		ptr[1] = EIR_TX_POWER;
1103		ptr[2] = (u8)hdev->adv_tx_power;
1104
1105		ad_len += 3;
1106		ptr += 3;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107	}
1108
1109	return ad_len;
1110}
1111
1112void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1113{
1114	struct hci_dev *hdev = req->hdev;
1115	struct hci_cp_le_set_adv_data cp;
1116	u8 len;
1117
1118	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1119		return;
1120
1121	memset(&cp, 0, sizeof(cp));
 
 
 
 
 
 
 
 
 
 
 
 
 
1122
1123	len = create_instance_adv_data(hdev, instance, cp.data);
 
1124
1125	/* There's nothing to do if the data hasn't changed */
1126	if (hdev->adv_data_len == len &&
1127	    memcmp(cp.data, hdev->adv_data, len) == 0)
1128		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1129
1130	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1131	hdev->adv_data_len = len;
1132
1133	cp.length = len;
1134
1135	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
 
1136}
1137
1138int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1139{
1140	struct hci_request req;
1141
1142	hci_req_init(&req, hdev);
1143	__hci_req_update_adv_data(&req, instance);
1144
1145	return hci_req_run(&req, NULL);
1146}
1147
1148static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 
1149{
1150	BT_DBG("%s status %u", hdev->name, status);
1151}
1152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153void hci_req_reenable_advertising(struct hci_dev *hdev)
1154{
1155	struct hci_request req;
1156
1157	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1158	    list_empty(&hdev->adv_instances))
1159		return;
1160
1161	hci_req_init(&req, hdev);
1162
1163	if (hdev->cur_adv_instance) {
1164		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1165						true);
1166	} else {
1167		__hci_req_update_adv_data(&req, 0x00);
1168		__hci_req_update_scan_rsp_data(&req, 0x00);
1169		__hci_req_enable_advertising(&req);
 
 
 
 
1170	}
1171
1172	hci_req_run(&req, adv_enable_complete);
1173}
1174
1175static void adv_timeout_expire(struct work_struct *work)
1176{
1177	struct hci_dev *hdev = container_of(work, struct hci_dev,
1178					    adv_instance_expire.work);
1179
1180	struct hci_request req;
1181	u8 instance;
1182
1183	BT_DBG("%s", hdev->name);
1184
1185	hci_dev_lock(hdev);
1186
1187	hdev->adv_instance_timeout = 0;
1188
1189	instance = hdev->cur_adv_instance;
1190	if (instance == 0x00)
1191		goto unlock;
1192
1193	hci_req_init(&req, hdev);
1194
1195	hci_req_clear_adv_instance(hdev, &req, instance, false);
1196
1197	if (list_empty(&hdev->adv_instances))
1198		__hci_req_disable_advertising(&req);
1199
1200	hci_req_run(&req, NULL);
1201
1202unlock:
1203	hci_dev_unlock(hdev);
1204}
1205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1207				    bool force)
1208{
1209	struct hci_dev *hdev = req->hdev;
1210	struct adv_info *adv_instance = NULL;
1211	u16 timeout;
1212
1213	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1214	    list_empty(&hdev->adv_instances))
1215		return -EPERM;
1216
1217	if (hdev->adv_instance_timeout)
1218		return -EBUSY;
1219
1220	adv_instance = hci_find_adv_instance(hdev, instance);
1221	if (!adv_instance)
1222		return -ENOENT;
1223
1224	/* A zero timeout means unlimited advertising. As long as there is
1225	 * only one instance, duration should be ignored. We still set a timeout
1226	 * in case further instances are being added later on.
1227	 *
1228	 * If the remaining lifetime of the instance is more than the duration
1229	 * then the timeout corresponds to the duration, otherwise it will be
1230	 * reduced to the remaining instance lifetime.
1231	 */
1232	if (adv_instance->timeout == 0 ||
1233	    adv_instance->duration <= adv_instance->remaining_time)
1234		timeout = adv_instance->duration;
1235	else
1236		timeout = adv_instance->remaining_time;
1237
1238	/* The remaining time is being reduced unless the instance is being
1239	 * advertised without time limit.
1240	 */
1241	if (adv_instance->timeout)
1242		adv_instance->remaining_time =
1243				adv_instance->remaining_time - timeout;
1244
1245	hdev->adv_instance_timeout = timeout;
1246	queue_delayed_work(hdev->req_workqueue,
 
 
1247			   &hdev->adv_instance_expire,
1248			   msecs_to_jiffies(timeout * 1000));
 
1249
1250	/* If we're just re-scheduling the same instance again then do not
1251	 * execute any HCI commands. This happens when a single instance is
1252	 * being advertised.
1253	 */
1254	if (!force && hdev->cur_adv_instance == instance &&
1255	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1256		return 0;
1257
1258	hdev->cur_adv_instance = instance;
1259	__hci_req_update_adv_data(req, instance);
1260	__hci_req_update_scan_rsp_data(req, instance);
1261	__hci_req_enable_advertising(req);
 
 
 
 
1262
1263	return 0;
1264}
1265
1266static void cancel_adv_timeout(struct hci_dev *hdev)
1267{
1268	if (hdev->adv_instance_timeout) {
1269		hdev->adv_instance_timeout = 0;
1270		cancel_delayed_work(&hdev->adv_instance_expire);
1271	}
1272}
1273
1274/* For a single instance:
1275 * - force == true: The instance will be removed even when its remaining
1276 *   lifetime is not zero.
1277 * - force == false: the instance will be deactivated but kept stored unless
1278 *   the remaining lifetime is zero.
1279 *
1280 * For instance == 0x00:
1281 * - force == true: All instances will be removed regardless of their timeout
1282 *   setting.
1283 * - force == false: Only instances that have a timeout will be removed.
1284 */
1285void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1286				u8 instance, bool force)
 
1287{
1288	struct adv_info *adv_instance, *n, *next_instance = NULL;
1289	int err;
1290	u8 rem_inst;
1291
1292	/* Cancel any timeout concerning the removed instance(s). */
1293	if (!instance || hdev->cur_adv_instance == instance)
1294		cancel_adv_timeout(hdev);
1295
1296	/* Get the next instance to advertise BEFORE we remove
1297	 * the current one. This can be the same instance again
1298	 * if there is only one instance.
1299	 */
1300	if (instance && hdev->cur_adv_instance == instance)
1301		next_instance = hci_get_next_instance(hdev, instance);
1302
1303	if (instance == 0x00) {
1304		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1305					 list) {
1306			if (!(force || adv_instance->timeout))
1307				continue;
1308
1309			rem_inst = adv_instance->instance;
1310			err = hci_remove_adv_instance(hdev, rem_inst);
1311			if (!err)
1312				mgmt_advertising_removed(NULL, hdev, rem_inst);
1313		}
1314	} else {
1315		adv_instance = hci_find_adv_instance(hdev, instance);
1316
1317		if (force || (adv_instance && adv_instance->timeout &&
1318			      !adv_instance->remaining_time)) {
1319			/* Don't advertise a removed instance. */
1320			if (next_instance &&
1321			    next_instance->instance == instance)
1322				next_instance = NULL;
1323
1324			err = hci_remove_adv_instance(hdev, instance);
1325			if (!err)
1326				mgmt_advertising_removed(NULL, hdev, instance);
1327		}
1328	}
1329
1330	if (!req || !hdev_is_powered(hdev) ||
1331	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
1332		return;
1333
1334	if (next_instance)
1335		__hci_req_schedule_adv_instance(req, next_instance->instance,
1336						false);
1337}
1338
1339static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1340{
1341	struct hci_dev *hdev = req->hdev;
1342
1343	/* If we're advertising or initiating an LE connection we can't
1344	 * go ahead and change the random address at this time. This is
1345	 * because the eventual initiator address used for the
1346	 * subsequently created connection will be undefined (some
1347	 * controllers use the new address and others the one we had
1348	 * when the operation started).
1349	 *
1350	 * In this kind of scenario skip the update and let the random
1351	 * address be updated at the next cycle.
1352	 */
1353	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1354	    hci_lookup_le_connect(hdev)) {
1355		BT_DBG("Deferring random address update");
1356		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1357		return;
1358	}
1359
1360	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1361}
1362
1363int hci_update_random_address(struct hci_request *req, bool require_privacy,
1364			      bool use_rpa, u8 *own_addr_type)
1365{
1366	struct hci_dev *hdev = req->hdev;
1367	int err;
1368
1369	/* If privacy is enabled use a resolvable private address. If
1370	 * current RPA has expired or there is something else than
1371	 * the current RPA in use, then generate a new one.
1372	 */
1373	if (use_rpa) {
1374		int to;
1375
1376		*own_addr_type = ADDR_LE_DEV_RANDOM;
 
 
 
 
 
1377
1378		if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1379		    !bacmp(&hdev->random_addr, &hdev->rpa))
1380			return 0;
1381
1382		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1383		if (err < 0) {
1384			BT_ERR("%s failed to generate new RPA", hdev->name);
1385			return err;
1386		}
1387
1388		set_random_addr(req, &hdev->rpa);
1389
1390		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1391		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1392
1393		return 0;
1394	}
1395
1396	/* In case of required privacy without resolvable private address,
1397	 * use an non-resolvable private address. This is useful for active
1398	 * scanning and non-connectable advertising.
1399	 */
1400	if (require_privacy) {
1401		bdaddr_t nrpa;
1402
1403		while (true) {
1404			/* The non-resolvable private address is generated
1405			 * from random six bytes with the two most significant
1406			 * bits cleared.
1407			 */
1408			get_random_bytes(&nrpa, 6);
1409			nrpa.b[5] &= 0x3f;
1410
1411			/* The non-resolvable private address shall not be
1412			 * equal to the public address.
1413			 */
1414			if (bacmp(&hdev->bdaddr, &nrpa))
1415				break;
1416		}
1417
1418		*own_addr_type = ADDR_LE_DEV_RANDOM;
1419		set_random_addr(req, &nrpa);
1420		return 0;
1421	}
1422
1423	/* If forcing static address is in use or there is no public
1424	 * address use the static address as random address (but skip
1425	 * the HCI command if the current random address is already the
1426	 * static one.
1427	 *
1428	 * In case BR/EDR has been disabled on a dual-mode controller
1429	 * and a static address has been configured, then use that
1430	 * address instead of the public BR/EDR address.
1431	 */
1432	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1433	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1434	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1435	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1436		*own_addr_type = ADDR_LE_DEV_RANDOM;
1437		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1438			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1439				    &hdev->static_addr);
1440		return 0;
1441	}
1442
1443	/* Neither privacy nor static address is being used so use a
1444	 * public address.
1445	 */
1446	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1447
1448	return 0;
1449}
1450
1451static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1452{
1453	struct bdaddr_list *b;
1454
1455	list_for_each_entry(b, &hdev->whitelist, list) {
1456		struct hci_conn *conn;
1457
1458		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1459		if (!conn)
1460			return true;
1461
1462		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1463			return true;
1464	}
1465
1466	return false;
1467}
1468
1469void __hci_req_update_scan(struct hci_request *req)
1470{
1471	struct hci_dev *hdev = req->hdev;
1472	u8 scan;
1473
1474	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1475		return;
1476
1477	if (!hdev_is_powered(hdev))
1478		return;
1479
1480	if (mgmt_powering_down(hdev))
1481		return;
1482
 
 
 
1483	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1484	    disconnected_whitelist_entries(hdev))
1485		scan = SCAN_PAGE;
1486	else
1487		scan = SCAN_DISABLED;
1488
1489	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1490		scan |= SCAN_INQUIRY;
1491
1492	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1493	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1494		return;
1495
1496	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1497}
1498
1499static int update_scan(struct hci_request *req, unsigned long opt)
1500{
1501	hci_dev_lock(req->hdev);
1502	__hci_req_update_scan(req);
1503	hci_dev_unlock(req->hdev);
1504	return 0;
1505}
1506
1507static void scan_update_work(struct work_struct *work)
1508{
1509	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1510
1511	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1512}
1513
1514static int connectable_update(struct hci_request *req, unsigned long opt)
1515{
1516	struct hci_dev *hdev = req->hdev;
1517
1518	hci_dev_lock(hdev);
1519
1520	__hci_req_update_scan(req);
1521
1522	/* If BR/EDR is not enabled and we disable advertising as a
1523	 * by-product of disabling connectable, we need to update the
1524	 * advertising flags.
1525	 */
1526	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1527		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
1528
1529	/* Update the advertising parameters if necessary */
1530	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1531	    !list_empty(&hdev->adv_instances))
1532		__hci_req_enable_advertising(req);
 
 
 
 
1533
1534	__hci_update_background_scan(req);
1535
1536	hci_dev_unlock(hdev);
1537
1538	return 0;
1539}
1540
1541static void connectable_update_work(struct work_struct *work)
1542{
1543	struct hci_dev *hdev = container_of(work, struct hci_dev,
1544					    connectable_update);
1545	u8 status;
1546
1547	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1548	mgmt_set_connectable_complete(hdev, status);
1549}
1550
1551static u8 get_service_classes(struct hci_dev *hdev)
1552{
1553	struct bt_uuid *uuid;
1554	u8 val = 0;
1555
1556	list_for_each_entry(uuid, &hdev->uuids, list)
1557		val |= uuid->svc_hint;
1558
1559	return val;
1560}
1561
1562void __hci_req_update_class(struct hci_request *req)
1563{
1564	struct hci_dev *hdev = req->hdev;
1565	u8 cod[3];
1566
1567	BT_DBG("%s", hdev->name);
1568
1569	if (!hdev_is_powered(hdev))
1570		return;
1571
1572	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573		return;
1574
1575	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1576		return;
1577
1578	cod[0] = hdev->minor_class;
1579	cod[1] = hdev->major_class;
1580	cod[2] = get_service_classes(hdev);
1581
1582	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1583		cod[1] |= 0x20;
1584
1585	if (memcmp(cod, hdev->dev_class, 3) == 0)
1586		return;
1587
1588	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1589}
1590
1591static void write_iac(struct hci_request *req)
1592{
1593	struct hci_dev *hdev = req->hdev;
1594	struct hci_cp_write_current_iac_lap cp;
1595
1596	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1597		return;
1598
1599	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1600		/* Limited discoverable mode */
1601		cp.num_iac = min_t(u8, hdev->num_iac, 2);
1602		cp.iac_lap[0] = 0x00;	/* LIAC */
1603		cp.iac_lap[1] = 0x8b;
1604		cp.iac_lap[2] = 0x9e;
1605		cp.iac_lap[3] = 0x33;	/* GIAC */
1606		cp.iac_lap[4] = 0x8b;
1607		cp.iac_lap[5] = 0x9e;
1608	} else {
1609		/* General discoverable mode */
1610		cp.num_iac = 1;
1611		cp.iac_lap[0] = 0x33;	/* GIAC */
1612		cp.iac_lap[1] = 0x8b;
1613		cp.iac_lap[2] = 0x9e;
1614	}
1615
1616	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1617		    (cp.num_iac * 3) + 1, &cp);
1618}
1619
1620static int discoverable_update(struct hci_request *req, unsigned long opt)
1621{
1622	struct hci_dev *hdev = req->hdev;
1623
1624	hci_dev_lock(hdev);
1625
1626	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1627		write_iac(req);
1628		__hci_req_update_scan(req);
1629		__hci_req_update_class(req);
1630	}
1631
1632	/* Advertising instances don't use the global discoverable setting, so
1633	 * only update AD if advertising was enabled using Set Advertising.
1634	 */
1635	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1636		__hci_req_update_adv_data(req, 0x00);
1637
1638		/* Discoverable mode affects the local advertising
1639		 * address in limited privacy mode.
1640		 */
1641		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1642			__hci_req_enable_advertising(req);
 
 
 
 
1643	}
1644
1645	hci_dev_unlock(hdev);
1646
1647	return 0;
1648}
1649
1650static void discoverable_update_work(struct work_struct *work)
1651{
1652	struct hci_dev *hdev = container_of(work, struct hci_dev,
1653					    discoverable_update);
1654	u8 status;
1655
1656	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1657	mgmt_set_discoverable_complete(hdev, status);
1658}
1659
1660void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1661		      u8 reason)
1662{
1663	switch (conn->state) {
1664	case BT_CONNECTED:
1665	case BT_CONFIG:
1666		if (conn->type == AMP_LINK) {
1667			struct hci_cp_disconn_phy_link cp;
1668
1669			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1670			cp.reason = reason;
1671			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1672				    &cp);
1673		} else {
1674			struct hci_cp_disconnect dc;
1675
1676			dc.handle = cpu_to_le16(conn->handle);
1677			dc.reason = reason;
1678			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1679		}
1680
1681		conn->state = BT_DISCONN;
1682
1683		break;
1684	case BT_CONNECT:
1685		if (conn->type == LE_LINK) {
1686			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1687				break;
1688			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1689				    0, NULL);
1690		} else if (conn->type == ACL_LINK) {
1691			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1692				break;
1693			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1694				    6, &conn->dst);
1695		}
1696		break;
1697	case BT_CONNECT2:
1698		if (conn->type == ACL_LINK) {
1699			struct hci_cp_reject_conn_req rej;
1700
1701			bacpy(&rej.bdaddr, &conn->dst);
1702			rej.reason = reason;
1703
1704			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1705				    sizeof(rej), &rej);
1706		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1707			struct hci_cp_reject_sync_conn_req rej;
1708
1709			bacpy(&rej.bdaddr, &conn->dst);
1710
1711			/* SCO rejection has its own limited set of
1712			 * allowed error values (0x0D-0x0F) which isn't
1713			 * compatible with most values passed to this
1714			 * function. To be safe hard-code one of the
1715			 * values that's suitable for SCO.
1716			 */
1717			rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1718
1719			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1720				    sizeof(rej), &rej);
1721		}
1722		break;
1723	default:
1724		conn->state = BT_CLOSED;
1725		break;
1726	}
1727}
1728
1729static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1730{
1731	if (status)
1732		BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1733}
1734
1735int hci_abort_conn(struct hci_conn *conn, u8 reason)
1736{
1737	struct hci_request req;
1738	int err;
1739
1740	hci_req_init(&req, conn->hdev);
1741
1742	__hci_abort_conn(&req, conn, reason);
1743
1744	err = hci_req_run(&req, abort_conn_complete);
1745	if (err && err != -ENODATA) {
1746		BT_ERR("Failed to run HCI request: err %d", err);
1747		return err;
1748	}
1749
1750	return 0;
1751}
1752
1753static int update_bg_scan(struct hci_request *req, unsigned long opt)
1754{
1755	hci_dev_lock(req->hdev);
1756	__hci_update_background_scan(req);
1757	hci_dev_unlock(req->hdev);
1758	return 0;
1759}
1760
1761static void bg_scan_update(struct work_struct *work)
1762{
1763	struct hci_dev *hdev = container_of(work, struct hci_dev,
1764					    bg_scan_update);
1765	struct hci_conn *conn;
1766	u8 status;
1767	int err;
1768
1769	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1770	if (!err)
1771		return;
1772
1773	hci_dev_lock(hdev);
1774
1775	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1776	if (conn)
1777		hci_le_conn_failed(conn, status);
1778
1779	hci_dev_unlock(hdev);
1780}
1781
1782static int le_scan_disable(struct hci_request *req, unsigned long opt)
1783{
1784	hci_req_add_le_scan_disable(req);
1785	return 0;
1786}
1787
1788static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1789{
1790	u8 length = opt;
1791	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1792	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1793	struct hci_cp_inquiry cp;
1794
1795	BT_DBG("%s", req->hdev->name);
 
 
 
1796
1797	hci_dev_lock(req->hdev);
1798	hci_inquiry_cache_flush(req->hdev);
1799	hci_dev_unlock(req->hdev);
1800
1801	memset(&cp, 0, sizeof(cp));
1802
1803	if (req->hdev->discovery.limited)
1804		memcpy(&cp.lap, liac, sizeof(cp.lap));
1805	else
1806		memcpy(&cp.lap, giac, sizeof(cp.lap));
1807
1808	cp.length = length;
1809
1810	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1811
1812	return 0;
1813}
1814
1815static void le_scan_disable_work(struct work_struct *work)
1816{
1817	struct hci_dev *hdev = container_of(work, struct hci_dev,
1818					    le_scan_disable.work);
1819	u8 status;
1820
1821	BT_DBG("%s", hdev->name);
1822
1823	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1824		return;
1825
1826	cancel_delayed_work(&hdev->le_scan_restart);
1827
1828	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1829	if (status) {
1830		BT_ERR("Failed to disable LE scan: status 0x%02x", status);
 
1831		return;
1832	}
1833
1834	hdev->discovery.scan_start = 0;
1835
1836	/* If we were running LE only scan, change discovery state. If
1837	 * we were running both LE and BR/EDR inquiry simultaneously,
1838	 * and BR/EDR inquiry is already finished, stop discovery,
1839	 * otherwise BR/EDR inquiry will stop discovery when finished.
1840	 * If we will resolve remote device name, do not change
1841	 * discovery state.
1842	 */
1843
1844	if (hdev->discovery.type == DISCOV_TYPE_LE)
1845		goto discov_stopped;
1846
1847	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1848		return;
1849
1850	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1851		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1852		    hdev->discovery.state != DISCOVERY_RESOLVING)
1853			goto discov_stopped;
1854
1855		return;
1856	}
1857
1858	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1859		     HCI_CMD_TIMEOUT, &status);
1860	if (status) {
1861		BT_ERR("Inquiry failed: status 0x%02x", status);
1862		goto discov_stopped;
1863	}
1864
1865	return;
1866
1867discov_stopped:
1868	hci_dev_lock(hdev);
1869	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1870	hci_dev_unlock(hdev);
1871}
1872
1873static int le_scan_restart(struct hci_request *req, unsigned long opt)
1874{
1875	struct hci_dev *hdev = req->hdev;
1876	struct hci_cp_le_set_scan_enable cp;
1877
1878	/* If controller is not scanning we are done. */
1879	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1880		return 0;
1881
1882	hci_req_add_le_scan_disable(req);
 
 
 
 
 
1883
1884	memset(&cp, 0, sizeof(cp));
1885	cp.enable = LE_SCAN_ENABLE;
1886	cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1887	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 
 
 
 
 
 
 
 
 
 
 
 
 
1888
1889	return 0;
1890}
1891
1892static void le_scan_restart_work(struct work_struct *work)
1893{
1894	struct hci_dev *hdev = container_of(work, struct hci_dev,
1895					    le_scan_restart.work);
1896	unsigned long timeout, duration, scan_start, now;
1897	u8 status;
1898
1899	BT_DBG("%s", hdev->name);
1900
1901	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1902	if (status) {
1903		BT_ERR("Failed to restart LE scan: status %d", status);
 
1904		return;
1905	}
1906
1907	hci_dev_lock(hdev);
1908
1909	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1910	    !hdev->discovery.scan_start)
1911		goto unlock;
1912
1913	/* When the scan was started, hdev->le_scan_disable has been queued
1914	 * after duration from scan_start. During scan restart this job
1915	 * has been canceled, and we need to queue it again after proper
1916	 * timeout, to make sure that scan does not run indefinitely.
1917	 */
1918	duration = hdev->discovery.scan_duration;
1919	scan_start = hdev->discovery.scan_start;
1920	now = jiffies;
1921	if (now - scan_start <= duration) {
1922		int elapsed;
1923
1924		if (now >= scan_start)
1925			elapsed = now - scan_start;
1926		else
1927			elapsed = ULONG_MAX - scan_start + now;
1928
1929		timeout = duration - elapsed;
1930	} else {
1931		timeout = 0;
1932	}
1933
1934	queue_delayed_work(hdev->req_workqueue,
1935			   &hdev->le_scan_disable, timeout);
1936
1937unlock:
1938	hci_dev_unlock(hdev);
1939}
1940
1941static void disable_advertising(struct hci_request *req)
1942{
1943	u8 enable = 0x00;
1944
1945	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1946}
1947
1948static int active_scan(struct hci_request *req, unsigned long opt)
1949{
1950	uint16_t interval = opt;
1951	struct hci_dev *hdev = req->hdev;
1952	struct hci_cp_le_set_scan_param param_cp;
1953	struct hci_cp_le_set_scan_enable enable_cp;
1954	u8 own_addr_type;
 
 
 
 
 
 
1955	int err;
1956
1957	BT_DBG("%s", hdev->name);
1958
1959	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1960		hci_dev_lock(hdev);
1961
1962		/* Don't let discovery abort an outgoing connection attempt
1963		 * that's using directed advertising.
1964		 */
1965		if (hci_lookup_le_connect(hdev)) {
1966			hci_dev_unlock(hdev);
1967			return -EBUSY;
1968		}
1969
1970		cancel_adv_timeout(hdev);
1971		hci_dev_unlock(hdev);
1972
1973		disable_advertising(req);
1974	}
1975
1976	/* If controller is scanning, it means the background scanning is
1977	 * running. Thus, we should temporarily stop it in order to set the
1978	 * discovery scanning parameters.
1979	 */
1980	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1981		hci_req_add_le_scan_disable(req);
 
 
1982
1983	/* All active scans will be done with either a resolvable private
1984	 * address (when privacy feature has been enabled) or non-resolvable
1985	 * private address.
1986	 */
1987	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
1988					&own_addr_type);
1989	if (err < 0)
1990		own_addr_type = ADDR_LE_DEV_PUBLIC;
1991
1992	memset(&param_cp, 0, sizeof(param_cp));
1993	param_cp.type = LE_SCAN_ACTIVE;
1994	param_cp.interval = cpu_to_le16(interval);
1995	param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1996	param_cp.own_address_type = own_addr_type;
1997
1998	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1999		    &param_cp);
2000
2001	memset(&enable_cp, 0, sizeof(enable_cp));
2002	enable_cp.enable = LE_SCAN_ENABLE;
2003	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2004
2005	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2006		    &enable_cp);
 
2007
 
 
 
2008	return 0;
2009}
2010
2011static int interleaved_discov(struct hci_request *req, unsigned long opt)
2012{
2013	int err;
2014
2015	BT_DBG("%s", req->hdev->name);
2016
2017	err = active_scan(req, opt);
2018	if (err)
2019		return err;
2020
2021	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2022}
2023
2024static void start_discovery(struct hci_dev *hdev, u8 *status)
2025{
2026	unsigned long timeout;
2027
2028	BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2029
2030	switch (hdev->discovery.type) {
2031	case DISCOV_TYPE_BREDR:
2032		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2033			hci_req_sync(hdev, bredr_inquiry,
2034				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2035				     status);
2036		return;
2037	case DISCOV_TYPE_INTERLEAVED:
2038		/* When running simultaneous discovery, the LE scanning time
2039		 * should occupy the whole discovery time sine BR/EDR inquiry
2040		 * and LE scanning are scheduled by the controller.
2041		 *
2042		 * For interleaving discovery in comparison, BR/EDR inquiry
2043		 * and LE scanning are done sequentially with separate
2044		 * timeouts.
2045		 */
2046		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2047			     &hdev->quirks)) {
2048			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2049			/* During simultaneous discovery, we double LE scan
2050			 * interval. We must leave some time for the controller
2051			 * to do BR/EDR inquiry.
2052			 */
2053			hci_req_sync(hdev, interleaved_discov,
2054				     DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2055				     status);
2056			break;
2057		}
2058
2059		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2060		hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2061			     HCI_CMD_TIMEOUT, status);
2062		break;
2063	case DISCOV_TYPE_LE:
2064		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2065		hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2066			     HCI_CMD_TIMEOUT, status);
2067		break;
2068	default:
2069		*status = HCI_ERROR_UNSPECIFIED;
2070		return;
2071	}
2072
2073	if (*status)
2074		return;
2075
2076	BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2077
2078	/* When service discovery is used and the controller has a
2079	 * strict duplicate filter, it is important to remember the
2080	 * start and duration of the scan. This is required for
2081	 * restarting scanning during the discovery phase.
2082	 */
2083	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2084		     hdev->discovery.result_filtering) {
2085		hdev->discovery.scan_start = jiffies;
2086		hdev->discovery.scan_duration = timeout;
2087	}
2088
2089	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2090			   timeout);
2091}
2092
2093bool hci_req_stop_discovery(struct hci_request *req)
2094{
2095	struct hci_dev *hdev = req->hdev;
2096	struct discovery_state *d = &hdev->discovery;
2097	struct hci_cp_remote_name_req_cancel cp;
2098	struct inquiry_entry *e;
2099	bool ret = false;
2100
2101	BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2102
2103	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2104		if (test_bit(HCI_INQUIRY, &hdev->flags))
2105			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2106
2107		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2108			cancel_delayed_work(&hdev->le_scan_disable);
2109			hci_req_add_le_scan_disable(req);
 
2110		}
2111
2112		ret = true;
2113	} else {
2114		/* Passive scanning */
2115		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2116			hci_req_add_le_scan_disable(req);
2117			ret = true;
2118		}
2119	}
2120
2121	/* No further actions needed for LE-only discovery */
2122	if (d->type == DISCOV_TYPE_LE)
2123		return ret;
2124
2125	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2126		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2127						     NAME_PENDING);
2128		if (!e)
2129			return ret;
2130
2131		bacpy(&cp.bdaddr, &e->data.bdaddr);
2132		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2133			    &cp);
2134		ret = true;
2135	}
2136
2137	return ret;
2138}
2139
2140static int stop_discovery(struct hci_request *req, unsigned long opt)
2141{
2142	hci_dev_lock(req->hdev);
2143	hci_req_stop_discovery(req);
2144	hci_dev_unlock(req->hdev);
2145
2146	return 0;
2147}
2148
2149static void discov_update(struct work_struct *work)
2150{
2151	struct hci_dev *hdev = container_of(work, struct hci_dev,
2152					    discov_update);
2153	u8 status = 0;
2154
2155	switch (hdev->discovery.state) {
2156	case DISCOVERY_STARTING:
2157		start_discovery(hdev, &status);
2158		mgmt_start_discovery_complete(hdev, status);
2159		if (status)
2160			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2161		else
2162			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2163		break;
2164	case DISCOVERY_STOPPING:
2165		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2166		mgmt_stop_discovery_complete(hdev, status);
2167		if (!status)
2168			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2169		break;
2170	case DISCOVERY_STOPPED:
2171	default:
2172		return;
2173	}
2174}
2175
2176static void discov_off(struct work_struct *work)
2177{
2178	struct hci_dev *hdev = container_of(work, struct hci_dev,
2179					    discov_off.work);
2180
2181	BT_DBG("%s", hdev->name);
2182
2183	hci_dev_lock(hdev);
2184
2185	/* When discoverable timeout triggers, then just make sure
2186	 * the limited discoverable flag is cleared. Even in the case
2187	 * of a timeout triggered from general discoverable, it is
2188	 * safe to unconditionally clear the flag.
2189	 */
2190	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2191	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2192	hdev->discov_timeout = 0;
2193
2194	hci_dev_unlock(hdev);
2195
2196	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2197	mgmt_new_settings(hdev);
2198}
2199
2200static int powered_update_hci(struct hci_request *req, unsigned long opt)
2201{
2202	struct hci_dev *hdev = req->hdev;
2203	u8 link_sec;
2204
2205	hci_dev_lock(hdev);
2206
2207	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2208	    !lmp_host_ssp_capable(hdev)) {
2209		u8 mode = 0x01;
2210
2211		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2212
2213		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2214			u8 support = 0x01;
2215
2216			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2217				    sizeof(support), &support);
2218		}
2219	}
2220
2221	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2222	    lmp_bredr_capable(hdev)) {
2223		struct hci_cp_write_le_host_supported cp;
2224
2225		cp.le = 0x01;
2226		cp.simul = 0x00;
2227
2228		/* Check first if we already have the right
2229		 * host state (host features set)
2230		 */
2231		if (cp.le != lmp_host_le_capable(hdev) ||
2232		    cp.simul != lmp_host_le_br_capable(hdev))
2233			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2234				    sizeof(cp), &cp);
2235	}
2236
2237	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2238		/* Make sure the controller has a good default for
2239		 * advertising data. This also applies to the case
2240		 * where BR/EDR was toggled during the AUTO_OFF phase.
2241		 */
2242		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2243		    list_empty(&hdev->adv_instances)) {
2244			__hci_req_update_adv_data(req, 0x00);
2245			__hci_req_update_scan_rsp_data(req, 0x00);
2246
2247			if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2248				__hci_req_enable_advertising(req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2249		} else if (!list_empty(&hdev->adv_instances)) {
2250			struct adv_info *adv_instance;
2251
2252			adv_instance = list_first_entry(&hdev->adv_instances,
2253							struct adv_info, list);
2254			__hci_req_schedule_adv_instance(req,
2255							adv_instance->instance,
2256							true);
2257		}
2258	}
2259
2260	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2261	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2262		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2263			    sizeof(link_sec), &link_sec);
2264
2265	if (lmp_bredr_capable(hdev)) {
2266		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2267			__hci_req_write_fast_connectable(req, true);
2268		else
2269			__hci_req_write_fast_connectable(req, false);
2270		__hci_req_update_scan(req);
2271		__hci_req_update_class(req);
2272		__hci_req_update_name(req);
2273		__hci_req_update_eir(req);
2274	}
2275
2276	hci_dev_unlock(hdev);
2277	return 0;
2278}
2279
2280int __hci_req_hci_power_on(struct hci_dev *hdev)
2281{
2282	/* Register the available SMP channels (BR/EDR and LE) only when
2283	 * successfully powering on the controller. This late
2284	 * registration is required so that LE SMP can clearly decide if
2285	 * the public address or static address is used.
2286	 */
2287	smp_register(hdev);
2288
2289	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2290			      NULL);
2291}
2292
2293void hci_request_setup(struct hci_dev *hdev)
2294{
2295	INIT_WORK(&hdev->discov_update, discov_update);
2296	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2297	INIT_WORK(&hdev->scan_update, scan_update_work);
2298	INIT_WORK(&hdev->connectable_update, connectable_update_work);
2299	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2300	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2301	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2302	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2303	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
 
2304}
2305
2306void hci_request_cancel_all(struct hci_dev *hdev)
2307{
2308	hci_req_sync_cancel(hdev, ENODEV);
2309
2310	cancel_work_sync(&hdev->discov_update);
2311	cancel_work_sync(&hdev->bg_scan_update);
2312	cancel_work_sync(&hdev->scan_update);
2313	cancel_work_sync(&hdev->connectable_update);
2314	cancel_work_sync(&hdev->discoverable_update);
2315	cancel_delayed_work_sync(&hdev->discov_off);
2316	cancel_delayed_work_sync(&hdev->le_scan_disable);
2317	cancel_delayed_work_sync(&hdev->le_scan_restart);
2318
2319	if (hdev->adv_instance_timeout) {
2320		cancel_delayed_work_sync(&hdev->adv_instance_expire);
2321		hdev->adv_instance_timeout = 0;
2322	}
 
 
2323}
v5.14.15
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2014 Intel Corporation
   5
   6   This program is free software; you can redistribute it and/or modify
   7   it under the terms of the GNU General Public License version 2 as
   8   published by the Free Software Foundation;
   9
  10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  11   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  12   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  13   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  14   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  15   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18
  19   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  20   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  21   SOFTWARE IS DISCLAIMED.
  22*/
  23
  24#include <linux/sched/signal.h>
  25
  26#include <net/bluetooth/bluetooth.h>
  27#include <net/bluetooth/hci_core.h>
  28#include <net/bluetooth/mgmt.h>
  29
  30#include "smp.h"
  31#include "hci_request.h"
  32#include "msft.h"
  33
  34#define HCI_REQ_DONE	  0
  35#define HCI_REQ_PEND	  1
  36#define HCI_REQ_CANCELED  2
  37
  38void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
  39{
  40	skb_queue_head_init(&req->cmd_q);
  41	req->hdev = hdev;
  42	req->err = 0;
  43}
  44
  45void hci_req_purge(struct hci_request *req)
  46{
  47	skb_queue_purge(&req->cmd_q);
  48}
  49
  50bool hci_req_status_pend(struct hci_dev *hdev)
  51{
  52	return hdev->req_status == HCI_REQ_PEND;
  53}
  54
  55static int req_run(struct hci_request *req, hci_req_complete_t complete,
  56		   hci_req_complete_skb_t complete_skb)
  57{
  58	struct hci_dev *hdev = req->hdev;
  59	struct sk_buff *skb;
  60	unsigned long flags;
  61
  62	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
  63
  64	/* If an error occurred during request building, remove all HCI
  65	 * commands queued on the HCI request queue.
  66	 */
  67	if (req->err) {
  68		skb_queue_purge(&req->cmd_q);
  69		return req->err;
  70	}
  71
  72	/* Do not allow empty requests */
  73	if (skb_queue_empty(&req->cmd_q))
  74		return -ENODATA;
  75
  76	skb = skb_peek_tail(&req->cmd_q);
  77	if (complete) {
  78		bt_cb(skb)->hci.req_complete = complete;
  79	} else if (complete_skb) {
  80		bt_cb(skb)->hci.req_complete_skb = complete_skb;
  81		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
  82	}
  83
  84	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
  85	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
  86	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
  87
  88	queue_work(hdev->workqueue, &hdev->cmd_work);
  89
  90	return 0;
  91}
  92
  93int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
  94{
  95	return req_run(req, complete, NULL);
  96}
  97
  98int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
  99{
 100	return req_run(req, NULL, complete);
 101}
 102
 103static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
 104				  struct sk_buff *skb)
 105{
 106	bt_dev_dbg(hdev, "result 0x%2.2x", result);
 107
 108	if (hdev->req_status == HCI_REQ_PEND) {
 109		hdev->req_result = result;
 110		hdev->req_status = HCI_REQ_DONE;
 111		if (skb)
 112			hdev->req_skb = skb_get(skb);
 113		wake_up_interruptible(&hdev->req_wait_q);
 114	}
 115}
 116
 117void hci_req_sync_cancel(struct hci_dev *hdev, int err)
 118{
 119	bt_dev_dbg(hdev, "err 0x%2.2x", err);
 120
 121	if (hdev->req_status == HCI_REQ_PEND) {
 122		hdev->req_result = err;
 123		hdev->req_status = HCI_REQ_CANCELED;
 124		wake_up_interruptible(&hdev->req_wait_q);
 125	}
 126}
 127
 128struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
 129				  const void *param, u8 event, u32 timeout)
 130{
 
 131	struct hci_request req;
 132	struct sk_buff *skb;
 133	int err = 0;
 134
 135	bt_dev_dbg(hdev, "");
 136
 137	hci_req_init(&req, hdev);
 138
 139	hci_req_add_ev(&req, opcode, plen, param, event);
 140
 141	hdev->req_status = HCI_REQ_PEND;
 142
 
 
 
 143	err = hci_req_run_skb(&req, hci_req_sync_complete);
 144	if (err < 0)
 
 
 145		return ERR_PTR(err);
 
 
 
 146
 147	err = wait_event_interruptible_timeout(hdev->req_wait_q,
 148			hdev->req_status != HCI_REQ_PEND, timeout);
 149
 150	if (err == -ERESTARTSYS)
 151		return ERR_PTR(-EINTR);
 152
 153	switch (hdev->req_status) {
 154	case HCI_REQ_DONE:
 155		err = -bt_to_errno(hdev->req_result);
 156		break;
 157
 158	case HCI_REQ_CANCELED:
 159		err = -hdev->req_result;
 160		break;
 161
 162	default:
 163		err = -ETIMEDOUT;
 164		break;
 165	}
 166
 167	hdev->req_status = hdev->req_result = 0;
 168	skb = hdev->req_skb;
 169	hdev->req_skb = NULL;
 170
 171	bt_dev_dbg(hdev, "end: err %d", err);
 172
 173	if (err < 0) {
 174		kfree_skb(skb);
 175		return ERR_PTR(err);
 176	}
 177
 178	if (!skb)
 179		return ERR_PTR(-ENODATA);
 180
 181	return skb;
 182}
 183EXPORT_SYMBOL(__hci_cmd_sync_ev);
 184
 185struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
 186			       const void *param, u32 timeout)
 187{
 188	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
 189}
 190EXPORT_SYMBOL(__hci_cmd_sync);
 191
 192/* Execute request and wait for completion. */
 193int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
 194						     unsigned long opt),
 195		   unsigned long opt, u32 timeout, u8 *hci_status)
 196{
 197	struct hci_request req;
 
 198	int err = 0;
 199
 200	bt_dev_dbg(hdev, "start");
 201
 202	hci_req_init(&req, hdev);
 203
 204	hdev->req_status = HCI_REQ_PEND;
 205
 206	err = func(&req, opt);
 207	if (err) {
 208		if (hci_status)
 209			*hci_status = HCI_ERROR_UNSPECIFIED;
 210		return err;
 211	}
 212
 
 
 
 213	err = hci_req_run_skb(&req, hci_req_sync_complete);
 214	if (err < 0) {
 215		hdev->req_status = 0;
 216
 
 
 
 217		/* ENODATA means the HCI request command queue is empty.
 218		 * This can happen when a request with conditionals doesn't
 219		 * trigger any commands to be sent. This is normal behavior
 220		 * and should not trigger an error return.
 221		 */
 222		if (err == -ENODATA) {
 223			if (hci_status)
 224				*hci_status = 0;
 225			return 0;
 226		}
 227
 228		if (hci_status)
 229			*hci_status = HCI_ERROR_UNSPECIFIED;
 230
 231		return err;
 232	}
 233
 234	err = wait_event_interruptible_timeout(hdev->req_wait_q,
 235			hdev->req_status != HCI_REQ_PEND, timeout);
 
 236
 237	if (err == -ERESTARTSYS)
 238		return -EINTR;
 239
 240	switch (hdev->req_status) {
 241	case HCI_REQ_DONE:
 242		err = -bt_to_errno(hdev->req_result);
 243		if (hci_status)
 244			*hci_status = hdev->req_result;
 245		break;
 246
 247	case HCI_REQ_CANCELED:
 248		err = -hdev->req_result;
 249		if (hci_status)
 250			*hci_status = HCI_ERROR_UNSPECIFIED;
 251		break;
 252
 253	default:
 254		err = -ETIMEDOUT;
 255		if (hci_status)
 256			*hci_status = HCI_ERROR_UNSPECIFIED;
 257		break;
 258	}
 259
 260	kfree_skb(hdev->req_skb);
 261	hdev->req_skb = NULL;
 262	hdev->req_status = hdev->req_result = 0;
 263
 264	bt_dev_dbg(hdev, "end: err %d", err);
 265
 266	return err;
 267}
 268
 269int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
 270						  unsigned long opt),
 271		 unsigned long opt, u32 timeout, u8 *hci_status)
 272{
 273	int ret;
 274
 
 
 
 275	/* Serialize all requests */
 276	hci_req_sync_lock(hdev);
 277	/* check the state after obtaing the lock to protect the HCI_UP
 278	 * against any races from hci_dev_do_close when the controller
 279	 * gets removed.
 280	 */
 281	if (test_bit(HCI_UP, &hdev->flags))
 282		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
 283	else
 284		ret = -ENETDOWN;
 285	hci_req_sync_unlock(hdev);
 286
 287	return ret;
 288}
 289
 290struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
 291				const void *param)
 292{
 293	int len = HCI_COMMAND_HDR_SIZE + plen;
 294	struct hci_command_hdr *hdr;
 295	struct sk_buff *skb;
 296
 297	skb = bt_skb_alloc(len, GFP_ATOMIC);
 298	if (!skb)
 299		return NULL;
 300
 301	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
 302	hdr->opcode = cpu_to_le16(opcode);
 303	hdr->plen   = plen;
 304
 305	if (plen)
 306		skb_put_data(skb, param, plen);
 307
 308	bt_dev_dbg(hdev, "skb len %d", skb->len);
 309
 310	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
 311	hci_skb_opcode(skb) = opcode;
 312
 313	return skb;
 314}
 315
 316/* Queue a command to an asynchronous HCI request */
 317void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 318		    const void *param, u8 event)
 319{
 320	struct hci_dev *hdev = req->hdev;
 321	struct sk_buff *skb;
 322
 323	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
 324
 325	/* If an error occurred during request building, there is no point in
 326	 * queueing the HCI command. We can simply return.
 327	 */
 328	if (req->err)
 329		return;
 330
 331	skb = hci_prepare_cmd(hdev, opcode, plen, param);
 332	if (!skb) {
 333		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
 334			   opcode);
 335		req->err = -ENOMEM;
 336		return;
 337	}
 338
 339	if (skb_queue_empty(&req->cmd_q))
 340		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
 341
 342	bt_cb(skb)->hci.req_event = event;
 343
 344	skb_queue_tail(&req->cmd_q, skb);
 345}
 346
 347void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
 348		 const void *param)
 349{
 350	hci_req_add_ev(req, opcode, plen, param, 0);
 351}
 352
 353void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
 354{
 355	struct hci_dev *hdev = req->hdev;
 356	struct hci_cp_write_page_scan_activity acp;
 357	u8 type;
 358
 359	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 360		return;
 361
 362	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 363		return;
 364
 365	if (enable) {
 366		type = PAGE_SCAN_TYPE_INTERLACED;
 367
 368		/* 160 msec page scan interval */
 369		acp.interval = cpu_to_le16(0x0100);
 370	} else {
 371		type = hdev->def_page_scan_type;
 372		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
 
 
 373	}
 374
 375	acp.window = cpu_to_le16(hdev->def_page_scan_window);
 376
 377	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
 378	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
 379		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
 380			    sizeof(acp), &acp);
 381
 382	if (hdev->page_scan_type != type)
 383		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 384}
 385
 386static void start_interleave_scan(struct hci_dev *hdev)
 387{
 388	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
 389	queue_delayed_work(hdev->req_workqueue,
 390			   &hdev->interleave_scan, 0);
 391}
 392
 393static bool is_interleave_scanning(struct hci_dev *hdev)
 394{
 395	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
 396}
 397
 398static void cancel_interleave_scan(struct hci_dev *hdev)
 399{
 400	bt_dev_dbg(hdev, "cancelling interleave scan");
 401
 402	cancel_delayed_work_sync(&hdev->interleave_scan);
 403
 404	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
 405}
 406
 407/* Return true if interleave_scan wasn't started until exiting this function,
 408 * otherwise, return false
 409 */
 410static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
 411{
 412	/* Do interleaved scan only if all of the following are true:
 413	 * - There is at least one ADV monitor
 414	 * - At least one pending LE connection or one device to be scanned for
 415	 * - Monitor offloading is not supported
 416	 * If so, we should alternate between allowlist scan and one without
 417	 * any filters to save power.
 418	 */
 419	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
 420				!(list_empty(&hdev->pend_le_conns) &&
 421				  list_empty(&hdev->pend_le_reports)) &&
 422				hci_get_adv_monitor_offload_ext(hdev) ==
 423				    HCI_ADV_MONITOR_EXT_NONE;
 424	bool is_interleaving = is_interleave_scanning(hdev);
 425
 426	if (use_interleaving && !is_interleaving) {
 427		start_interleave_scan(hdev);
 428		bt_dev_dbg(hdev, "starting interleave scan");
 429		return true;
 430	}
 431
 432	if (!use_interleaving && is_interleaving)
 433		cancel_interleave_scan(hdev);
 434
 435	return false;
 436}
 437
 438/* This function controls the background scanning based on hdev->pend_le_conns
 439 * list. If there are pending LE connection we start the background scanning,
 440 * otherwise we stop it.
 441 *
 442 * This function requires the caller holds hdev->lock.
 443 */
 444static void __hci_update_background_scan(struct hci_request *req)
 445{
 446	struct hci_dev *hdev = req->hdev;
 447
 448	if (!test_bit(HCI_UP, &hdev->flags) ||
 449	    test_bit(HCI_INIT, &hdev->flags) ||
 450	    hci_dev_test_flag(hdev, HCI_SETUP) ||
 451	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 452	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
 453	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
 454		return;
 455
 456	/* No point in doing scanning if LE support hasn't been enabled */
 457	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 458		return;
 459
 460	/* If discovery is active don't interfere with it */
 461	if (hdev->discovery.state != DISCOVERY_STOPPED)
 462		return;
 463
 464	/* Reset RSSI and UUID filters when starting background scanning
 465	 * since these filters are meant for service discovery only.
 466	 *
 467	 * The Start Discovery and Start Service Discovery operations
 468	 * ensure to set proper values for RSSI threshold and UUID
 469	 * filter list. So it is safe to just reset them here.
 470	 */
 471	hci_discovery_filter_clear(hdev);
 472
 473	bt_dev_dbg(hdev, "ADV monitoring is %s",
 474		   hci_is_adv_monitoring(hdev) ? "on" : "off");
 475
 476	if (list_empty(&hdev->pend_le_conns) &&
 477	    list_empty(&hdev->pend_le_reports) &&
 478	    !hci_is_adv_monitoring(hdev)) {
 479		/* If there is no pending LE connections or devices
 480		 * to be scanned for or no ADV monitors, we should stop the
 481		 * background scanning.
 482		 */
 483
 484		/* If controller is not scanning we are done. */
 485		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
 486			return;
 487
 488		hci_req_add_le_scan_disable(req, false);
 489
 490		bt_dev_dbg(hdev, "stopping background scanning");
 491	} else {
 492		/* If there is at least one pending LE connection, we should
 493		 * keep the background scan running.
 494		 */
 495
 496		/* If controller is connecting, we should not start scanning
 497		 * since some controllers are not able to scan and connect at
 498		 * the same time.
 499		 */
 500		if (hci_lookup_le_connect(hdev))
 501			return;
 502
 503		/* If controller is currently scanning, we stop it to ensure we
 504		 * don't miss any advertising (due to duplicates filter).
 505		 */
 506		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
 507			hci_req_add_le_scan_disable(req, false);
 508
 509		hci_req_add_le_passive_scan(req);
 510		bt_dev_dbg(hdev, "starting background scanning");
 
 511	}
 512}
 513
 514void __hci_req_update_name(struct hci_request *req)
 515{
 516	struct hci_dev *hdev = req->hdev;
 517	struct hci_cp_write_local_name cp;
 518
 519	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
 520
 521	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
 522}
 523
 524#define PNP_INFO_SVCLASS_ID		0x1200
 525
 526static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 527{
 528	u8 *ptr = data, *uuids_start = NULL;
 529	struct bt_uuid *uuid;
 530
 531	if (len < 4)
 532		return ptr;
 533
 534	list_for_each_entry(uuid, &hdev->uuids, list) {
 535		u16 uuid16;
 536
 537		if (uuid->size != 16)
 538			continue;
 539
 540		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
 541		if (uuid16 < 0x1100)
 542			continue;
 543
 544		if (uuid16 == PNP_INFO_SVCLASS_ID)
 545			continue;
 546
 547		if (!uuids_start) {
 548			uuids_start = ptr;
 549			uuids_start[0] = 1;
 550			uuids_start[1] = EIR_UUID16_ALL;
 551			ptr += 2;
 552		}
 553
 554		/* Stop if not enough space to put next UUID */
 555		if ((ptr - data) + sizeof(u16) > len) {
 556			uuids_start[1] = EIR_UUID16_SOME;
 557			break;
 558		}
 559
 560		*ptr++ = (uuid16 & 0x00ff);
 561		*ptr++ = (uuid16 & 0xff00) >> 8;
 562		uuids_start[0] += sizeof(uuid16);
 563	}
 564
 565	return ptr;
 566}
 567
 568static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 569{
 570	u8 *ptr = data, *uuids_start = NULL;
 571	struct bt_uuid *uuid;
 572
 573	if (len < 6)
 574		return ptr;
 575
 576	list_for_each_entry(uuid, &hdev->uuids, list) {
 577		if (uuid->size != 32)
 578			continue;
 579
 580		if (!uuids_start) {
 581			uuids_start = ptr;
 582			uuids_start[0] = 1;
 583			uuids_start[1] = EIR_UUID32_ALL;
 584			ptr += 2;
 585		}
 586
 587		/* Stop if not enough space to put next UUID */
 588		if ((ptr - data) + sizeof(u32) > len) {
 589			uuids_start[1] = EIR_UUID32_SOME;
 590			break;
 591		}
 592
 593		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
 594		ptr += sizeof(u32);
 595		uuids_start[0] += sizeof(u32);
 596	}
 597
 598	return ptr;
 599}
 600
 601static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 602{
 603	u8 *ptr = data, *uuids_start = NULL;
 604	struct bt_uuid *uuid;
 605
 606	if (len < 18)
 607		return ptr;
 608
 609	list_for_each_entry(uuid, &hdev->uuids, list) {
 610		if (uuid->size != 128)
 611			continue;
 612
 613		if (!uuids_start) {
 614			uuids_start = ptr;
 615			uuids_start[0] = 1;
 616			uuids_start[1] = EIR_UUID128_ALL;
 617			ptr += 2;
 618		}
 619
 620		/* Stop if not enough space to put next UUID */
 621		if ((ptr - data) + 16 > len) {
 622			uuids_start[1] = EIR_UUID128_SOME;
 623			break;
 624		}
 625
 626		memcpy(ptr, uuid->uuid, 16);
 627		ptr += 16;
 628		uuids_start[0] += 16;
 629	}
 630
 631	return ptr;
 632}
 633
 634static void create_eir(struct hci_dev *hdev, u8 *data)
 635{
 636	u8 *ptr = data;
 637	size_t name_len;
 638
 639	name_len = strlen(hdev->dev_name);
 640
 641	if (name_len > 0) {
 642		/* EIR Data type */
 643		if (name_len > 48) {
 644			name_len = 48;
 645			ptr[1] = EIR_NAME_SHORT;
 646		} else
 647			ptr[1] = EIR_NAME_COMPLETE;
 648
 649		/* EIR Data length */
 650		ptr[0] = name_len + 1;
 651
 652		memcpy(ptr + 2, hdev->dev_name, name_len);
 653
 654		ptr += (name_len + 2);
 655	}
 656
 657	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
 658		ptr[0] = 2;
 659		ptr[1] = EIR_TX_POWER;
 660		ptr[2] = (u8) hdev->inq_tx_power;
 661
 662		ptr += 3;
 663	}
 664
 665	if (hdev->devid_source > 0) {
 666		ptr[0] = 9;
 667		ptr[1] = EIR_DEVICE_ID;
 668
 669		put_unaligned_le16(hdev->devid_source, ptr + 2);
 670		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
 671		put_unaligned_le16(hdev->devid_product, ptr + 6);
 672		put_unaligned_le16(hdev->devid_version, ptr + 8);
 673
 674		ptr += 10;
 675	}
 676
 677	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 678	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 679	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 680}
 681
 682void __hci_req_update_eir(struct hci_request *req)
 683{
 684	struct hci_dev *hdev = req->hdev;
 685	struct hci_cp_write_eir cp;
 686
 687	if (!hdev_is_powered(hdev))
 688		return;
 689
 690	if (!lmp_ext_inq_capable(hdev))
 691		return;
 692
 693	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 694		return;
 695
 696	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
 697		return;
 698
 699	memset(&cp, 0, sizeof(cp));
 700
 701	create_eir(hdev, cp.data);
 702
 703	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
 704		return;
 705
 706	memcpy(hdev->eir, cp.data, sizeof(cp.data));
 707
 708	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 709}
 710
 711void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
 712{
 713	struct hci_dev *hdev = req->hdev;
 714
 715	if (hdev->scanning_paused) {
 716		bt_dev_dbg(hdev, "Scanning is paused for suspend");
 717		return;
 718	}
 719
 720	if (hdev->suspended)
 721		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
 722
 723	if (use_ext_scan(hdev)) {
 724		struct hci_cp_le_set_ext_scan_enable cp;
 725
 726		memset(&cp, 0, sizeof(cp));
 727		cp.enable = LE_SCAN_DISABLE;
 728		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
 729			    &cp);
 730	} else {
 731		struct hci_cp_le_set_scan_enable cp;
 732
 733		memset(&cp, 0, sizeof(cp));
 734		cp.enable = LE_SCAN_DISABLE;
 735		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 736	}
 737
 738	/* Disable address resolution */
 739	if (use_ll_privacy(hdev) &&
 740	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 741	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
 742		__u8 enable = 0x00;
 743
 744		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
 745	}
 746}
 747
 748static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
 749				 u8 bdaddr_type)
 750{
 751	struct hci_cp_le_del_from_accept_list cp;
 752
 753	cp.bdaddr_type = bdaddr_type;
 754	bacpy(&cp.bdaddr, bdaddr);
 755
 756	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
 757		   cp.bdaddr_type);
 758	hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
 759
 760	if (use_ll_privacy(req->hdev) &&
 761	    hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
 762		struct smp_irk *irk;
 763
 764		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
 765		if (irk) {
 766			struct hci_cp_le_del_from_resolv_list cp;
 767
 768			cp.bdaddr_type = bdaddr_type;
 769			bacpy(&cp.bdaddr, bdaddr);
 770
 771			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
 772				    sizeof(cp), &cp);
 773		}
 774	}
 775}
 776
 777/* Adds connection to accept list if needed. On error, returns -1. */
 778static int add_to_accept_list(struct hci_request *req,
 779			      struct hci_conn_params *params, u8 *num_entries,
 780			      bool allow_rpa)
 781{
 782	struct hci_cp_le_add_to_accept_list cp;
 783	struct hci_dev *hdev = req->hdev;
 784
 785	/* Already in accept list */
 786	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
 787				   params->addr_type))
 788		return 0;
 789
 790	/* Select filter policy to accept all advertising */
 791	if (*num_entries >= hdev->le_accept_list_size)
 792		return -1;
 793
 794	/* Accept list can not be used with RPAs */
 795	if (!allow_rpa &&
 796	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 797	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
 798		return -1;
 799	}
 800
 801	/* During suspend, only wakeable devices can be in accept list */
 802	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
 803						   params->current_flags))
 804		return 0;
 805
 806	*num_entries += 1;
 807	cp.bdaddr_type = params->addr_type;
 808	bacpy(&cp.bdaddr, &params->addr);
 809
 810	bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
 811		   cp.bdaddr_type);
 812	hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
 813
 814	if (use_ll_privacy(hdev) &&
 815	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
 816		struct smp_irk *irk;
 817
 818		irk = hci_find_irk_by_addr(hdev, &params->addr,
 819					   params->addr_type);
 820		if (irk) {
 821			struct hci_cp_le_add_to_resolv_list cp;
 822
 823			cp.bdaddr_type = params->addr_type;
 824			bacpy(&cp.bdaddr, &params->addr);
 825			memcpy(cp.peer_irk, irk->val, 16);
 826
 827			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
 828				memcpy(cp.local_irk, hdev->irk, 16);
 829			else
 830				memset(cp.local_irk, 0, 16);
 831
 832			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
 833				    sizeof(cp), &cp);
 834		}
 835	}
 836
 837	return 0;
 838}
 839
 840static u8 update_accept_list(struct hci_request *req)
 841{
 842	struct hci_dev *hdev = req->hdev;
 843	struct hci_conn_params *params;
 844	struct bdaddr_list *b;
 845	u8 num_entries = 0;
 846	bool pend_conn, pend_report;
 847	/* We allow usage of accept list even with RPAs in suspend. In the worst
 848	 * case, we won't be able to wake from devices that use the privacy1.2
 849	 * features. Additionally, once we support privacy1.2 and IRK
 850	 * offloading, we can update this to also check for those conditions.
 851	 */
 852	bool allow_rpa = hdev->suspended;
 853
 854	if (use_ll_privacy(hdev) &&
 855	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
 856		allow_rpa = true;
 857
 858	/* Go through the current accept list programmed into the
 859	 * controller one by one and check if that address is still
 860	 * in the list of pending connections or list of devices to
 861	 * report. If not present in either list, then queue the
 862	 * command to remove it from the controller.
 863	 */
 864	list_for_each_entry(b, &hdev->le_accept_list, list) {
 865		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
 866						      &b->bdaddr,
 867						      b->bdaddr_type);
 868		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
 869							&b->bdaddr,
 870							b->bdaddr_type);
 
 
 
 
 
 871
 872		/* If the device is not likely to connect or report,
 873		 * remove it from the accept list.
 874		 */
 875		if (!pend_conn && !pend_report) {
 876			del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
 877			continue;
 878		}
 879
 880		/* Accept list can not be used with RPAs */
 881		if (!allow_rpa &&
 882		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 883		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
 884			return 0x00;
 885		}
 886
 887		num_entries++;
 888	}
 889
 890	/* Since all no longer valid accept list entries have been
 891	 * removed, walk through the list of pending connections
 892	 * and ensure that any new device gets programmed into
 893	 * the controller.
 894	 *
 895	 * If the list of the devices is larger than the list of
 896	 * available accept list entries in the controller, then
 897	 * just abort and return filer policy value to not use the
 898	 * accept list.
 899	 */
 900	list_for_each_entry(params, &hdev->pend_le_conns, action) {
 901		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
 
 
 
 
 
 
 
 
 
 
 
 902			return 0x00;
 
 
 
 
 903	}
 904
 905	/* After adding all new pending connections, walk through
 906	 * the list of pending reports and also add these to the
 907	 * accept list if there is still space. Abort if space runs out.
 908	 */
 909	list_for_each_entry(params, &hdev->pend_le_reports, action) {
 910		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
 
 
 
 
 
 911			return 0x00;
 
 
 
 
 
 
 
 
 
 
 912	}
 913
 914	/* Use the allowlist unless the following conditions are all true:
 915	 * - We are not currently suspending
 916	 * - There are 1 or more ADV monitors registered and it's not offloaded
 917	 * - Interleaved scanning is not currently using the allowlist
 918	 */
 919	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
 920	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
 921	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
 922		return 0x00;
 923
 924	/* Select filter policy to use accept list */
 925	return 0x01;
 926}
 927
 928static bool scan_use_rpa(struct hci_dev *hdev)
 929{
 930	return hci_dev_test_flag(hdev, HCI_PRIVACY);
 931}
 932
 933static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
 934			       u16 window, u8 own_addr_type, u8 filter_policy,
 935			       bool filter_dup, bool addr_resolv)
 936{
 937	struct hci_dev *hdev = req->hdev;
 938
 939	if (hdev->scanning_paused) {
 940		bt_dev_dbg(hdev, "Scanning is paused for suspend");
 941		return;
 942	}
 943
 944	if (use_ll_privacy(hdev) &&
 945	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 946	    addr_resolv) {
 947		u8 enable = 0x01;
 948
 949		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
 950	}
 951
 952	/* Use ext scanning if set ext scan param and ext scan enable is
 953	 * supported
 954	 */
 955	if (use_ext_scan(hdev)) {
 956		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
 957		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
 958		struct hci_cp_le_scan_phy_params *phy_params;
 959		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
 960		u32 plen;
 961
 962		ext_param_cp = (void *)data;
 963		phy_params = (void *)ext_param_cp->data;
 964
 965		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
 966		ext_param_cp->own_addr_type = own_addr_type;
 967		ext_param_cp->filter_policy = filter_policy;
 968
 969		plen = sizeof(*ext_param_cp);
 970
 971		if (scan_1m(hdev) || scan_2m(hdev)) {
 972			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
 973
 974			memset(phy_params, 0, sizeof(*phy_params));
 975			phy_params->type = type;
 976			phy_params->interval = cpu_to_le16(interval);
 977			phy_params->window = cpu_to_le16(window);
 978
 979			plen += sizeof(*phy_params);
 980			phy_params++;
 981		}
 982
 983		if (scan_coded(hdev)) {
 984			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
 985
 986			memset(phy_params, 0, sizeof(*phy_params));
 987			phy_params->type = type;
 988			phy_params->interval = cpu_to_le16(interval);
 989			phy_params->window = cpu_to_le16(window);
 990
 991			plen += sizeof(*phy_params);
 992			phy_params++;
 993		}
 994
 995		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
 996			    plen, ext_param_cp);
 997
 998		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
 999		ext_enable_cp.enable = LE_SCAN_ENABLE;
1000		ext_enable_cp.filter_dup = filter_dup;
1001
1002		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1003			    sizeof(ext_enable_cp), &ext_enable_cp);
1004	} else {
1005		struct hci_cp_le_set_scan_param param_cp;
1006		struct hci_cp_le_set_scan_enable enable_cp;
1007
1008		memset(&param_cp, 0, sizeof(param_cp));
1009		param_cp.type = type;
1010		param_cp.interval = cpu_to_le16(interval);
1011		param_cp.window = cpu_to_le16(window);
1012		param_cp.own_address_type = own_addr_type;
1013		param_cp.filter_policy = filter_policy;
1014		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1015			    &param_cp);
1016
1017		memset(&enable_cp, 0, sizeof(enable_cp));
1018		enable_cp.enable = LE_SCAN_ENABLE;
1019		enable_cp.filter_dup = filter_dup;
1020		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1021			    &enable_cp);
1022	}
1023}
1024
1025/* Returns true if an le connection is in the scanning state */
1026static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1027{
1028	struct hci_conn_hash *h = &hdev->conn_hash;
1029	struct hci_conn  *c;
1030
1031	rcu_read_lock();
1032
1033	list_for_each_entry_rcu(c, &h->list, list) {
1034		if (c->type == LE_LINK && c->state == BT_CONNECT &&
1035		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
1036			rcu_read_unlock();
1037			return true;
1038		}
1039	}
1040
1041	rcu_read_unlock();
1042
1043	return false;
1044}
1045
1046/* Ensure to call hci_req_add_le_scan_disable() first to disable the
1047 * controller based address resolution to be able to reconfigure
1048 * resolving list.
1049 */
1050void hci_req_add_le_passive_scan(struct hci_request *req)
1051{
 
 
1052	struct hci_dev *hdev = req->hdev;
1053	u8 own_addr_type;
1054	u8 filter_policy;
1055	u16 window, interval;
1056	/* Default is to enable duplicates filter */
1057	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1058	/* Background scanning should run with address resolution */
1059	bool addr_resolv = true;
1060
1061	if (hdev->scanning_paused) {
1062		bt_dev_dbg(hdev, "Scanning is paused for suspend");
1063		return;
1064	}
1065
1066	/* Set require_privacy to false since no SCAN_REQ are send
1067	 * during passive scanning. Not using an non-resolvable address
1068	 * here is important so that peer devices using direct
1069	 * advertising with our address will be correctly reported
1070	 * by the controller.
1071	 */
1072	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1073				      &own_addr_type))
1074		return;
1075
1076	if (hdev->enable_advmon_interleave_scan &&
1077	    __hci_update_interleaved_scan(hdev))
1078		return;
1079
1080	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1081	/* Adding or removing entries from the accept list must
1082	 * happen before enabling scanning. The controller does
1083	 * not allow accept list modification while scanning.
1084	 */
1085	filter_policy = update_accept_list(req);
1086
1087	/* When the controller is using random resolvable addresses and
1088	 * with that having LE privacy enabled, then controllers with
1089	 * Extended Scanner Filter Policies support can now enable support
1090	 * for handling directed advertising.
1091	 *
1092	 * So instead of using filter polices 0x00 (no accept list)
1093	 * and 0x01 (accept list enabled) use the new filter policies
1094	 * 0x02 (no accept list) and 0x03 (accept list enabled).
1095	 */
1096	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1097	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1098		filter_policy |= 0x02;
1099
1100	if (hdev->suspended) {
1101		window = hdev->le_scan_window_suspend;
1102		interval = hdev->le_scan_int_suspend;
1103
1104		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1105	} else if (hci_is_le_conn_scanning(hdev)) {
1106		window = hdev->le_scan_window_connect;
1107		interval = hdev->le_scan_int_connect;
1108	} else if (hci_is_adv_monitoring(hdev)) {
1109		window = hdev->le_scan_window_adv_monitor;
1110		interval = hdev->le_scan_int_adv_monitor;
1111
1112		/* Disable duplicates filter when scanning for advertisement
1113		 * monitor for the following reasons.
1114		 *
1115		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
1116		 * controllers ignore RSSI_Sampling_Period when the duplicates
1117		 * filter is enabled.
1118		 *
1119		 * For SW pattern filtering, when we're not doing interleaved
1120		 * scanning, it is necessary to disable duplicates filter,
1121		 * otherwise hosts can only receive one advertisement and it's
1122		 * impossible to know if a peer is still in range.
1123		 */
1124		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
1125	} else {
1126		window = hdev->le_scan_window;
1127		interval = hdev->le_scan_interval;
1128	}
1129
1130	bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1131		   filter_policy);
1132	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1133			   own_addr_type, filter_policy, filter_dup,
1134			   addr_resolv);
1135}
1136
1137static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1138{
 
1139	struct adv_info *adv_instance;
1140
1141	/* Instance 0x00 always set local name */
1142	if (instance == 0x00)
1143		return true;
1144
1145	adv_instance = hci_find_adv_instance(hdev, instance);
1146	if (!adv_instance)
1147		return false;
1148
1149	if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1150	    adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1151		return true;
1152
1153	return adv_instance->scan_rsp_len ? true : false;
1154}
1155
1156static void hci_req_clear_event_filter(struct hci_request *req)
1157{
1158	struct hci_cp_set_event_filter f;
1159
1160	if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1161		return;
1162
1163	if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1164		memset(&f, 0, sizeof(f));
1165		f.flt_type = HCI_FLT_CLEAR_ALL;
1166		hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1167	}
1168}
1169
1170static void hci_req_set_event_filter(struct hci_request *req)
1171{
1172	struct bdaddr_list_with_flags *b;
1173	struct hci_cp_set_event_filter f;
1174	struct hci_dev *hdev = req->hdev;
1175	u8 scan = SCAN_DISABLED;
1176	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1177
1178	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179		return;
1180
1181	/* Always clear event filter when starting */
1182	hci_req_clear_event_filter(req);
1183
1184	list_for_each_entry(b, &hdev->accept_list, list) {
1185		if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1186					b->current_flags))
1187			continue;
1188
1189		memset(&f, 0, sizeof(f));
1190		bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1191		f.flt_type = HCI_FLT_CONN_SETUP;
1192		f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1193		f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1194
1195		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1196		hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1197		scan = SCAN_PAGE;
1198	}
1199
1200	if (scan && !scanning) {
1201		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1202		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1203	} else if (!scan && scanning) {
1204		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1205		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1206	}
1207}
1208
1209static void cancel_adv_timeout(struct hci_dev *hdev)
1210{
1211	if (hdev->adv_instance_timeout) {
1212		hdev->adv_instance_timeout = 0;
1213		cancel_delayed_work(&hdev->adv_instance_expire);
1214	}
1215}
1216
1217/* This function requires the caller holds hdev->lock */
1218void __hci_req_pause_adv_instances(struct hci_request *req)
1219{
1220	bt_dev_dbg(req->hdev, "Pausing advertising instances");
1221
1222	/* Call to disable any advertisements active on the controller.
1223	 * This will succeed even if no advertisements are configured.
1224	 */
1225	__hci_req_disable_advertising(req);
1226
1227	/* If we are using software rotation, pause the loop */
1228	if (!ext_adv_capable(req->hdev))
1229		cancel_adv_timeout(req->hdev);
1230}
1231
1232/* This function requires the caller holds hdev->lock */
1233static void __hci_req_resume_adv_instances(struct hci_request *req)
1234{
1235	struct adv_info *adv;
1236
1237	bt_dev_dbg(req->hdev, "Resuming advertising instances");
1238
1239	if (ext_adv_capable(req->hdev)) {
1240		/* Call for each tracked instance to be re-enabled */
1241		list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1242			__hci_req_enable_ext_advertising(req,
1243							 adv->instance);
1244		}
1245
1246	} else {
1247		/* Schedule for most recent instance to be restarted and begin
1248		 * the software rotation loop
1249		 */
1250		__hci_req_schedule_adv_instance(req,
1251						req->hdev->cur_adv_instance,
1252						true);
1253	}
1254}
1255
1256/* This function requires the caller holds hdev->lock */
1257int hci_req_resume_adv_instances(struct hci_dev *hdev)
1258{
1259	struct hci_request req;
1260
1261	hci_req_init(&req, hdev);
1262	__hci_req_resume_adv_instances(&req);
1263
1264	return hci_req_run(&req, NULL);
1265}
1266
1267static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1268{
1269	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1270		   status);
1271	if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1272	    test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1273		clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1274		clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1275		wake_up(&hdev->suspend_wait_q);
1276	}
1277
1278	if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1279		clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1280		wake_up(&hdev->suspend_wait_q);
1281	}
1282}
1283
1284static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1285					      bool enable)
1286{
1287	struct hci_dev *hdev = req->hdev;
1288
1289	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1290	case HCI_ADV_MONITOR_EXT_MSFT:
1291		msft_req_add_set_filter_enable(req, enable);
1292		break;
1293	default:
1294		return;
1295	}
1296
1297	/* No need to block when enabling since it's on resume path */
1298	if (hdev->suspended && !enable)
1299		set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1300}
1301
1302/* Call with hci_dev_lock */
1303void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1304{
1305	int old_state;
1306	struct hci_conn *conn;
1307	struct hci_request req;
1308	u8 page_scan;
1309	int disconnect_counter;
1310
1311	if (next == hdev->suspend_state) {
1312		bt_dev_dbg(hdev, "Same state before and after: %d", next);
1313		goto done;
1314	}
1315
1316	hdev->suspend_state = next;
1317	hci_req_init(&req, hdev);
1318
1319	if (next == BT_SUSPEND_DISCONNECT) {
1320		/* Mark device as suspended */
1321		hdev->suspended = true;
1322
1323		/* Pause discovery if not already stopped */
1324		old_state = hdev->discovery.state;
1325		if (old_state != DISCOVERY_STOPPED) {
1326			set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1327			hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1328			queue_work(hdev->req_workqueue, &hdev->discov_update);
1329		}
1330
1331		hdev->discovery_paused = true;
1332		hdev->discovery_old_state = old_state;
1333
1334		/* Stop directed advertising */
1335		old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1336		if (old_state) {
1337			set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1338			cancel_delayed_work(&hdev->discov_off);
1339			queue_delayed_work(hdev->req_workqueue,
1340					   &hdev->discov_off, 0);
1341		}
1342
1343		/* Pause other advertisements */
1344		if (hdev->adv_instance_cnt)
1345			__hci_req_pause_adv_instances(&req);
1346
1347		hdev->advertising_paused = true;
1348		hdev->advertising_old_state = old_state;
1349
1350		/* Disable page scan if enabled */
1351		if (test_bit(HCI_PSCAN, &hdev->flags)) {
1352			page_scan = SCAN_DISABLED;
1353			hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1354				    &page_scan);
1355			set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1356		}
1357
1358		/* Disable LE passive scan if enabled */
1359		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1360			cancel_interleave_scan(hdev);
1361			hci_req_add_le_scan_disable(&req, false);
1362		}
1363
1364		/* Disable advertisement filters */
1365		hci_req_add_set_adv_filter_enable(&req, false);
1366
1367		/* Prevent disconnects from causing scanning to be re-enabled */
1368		hdev->scanning_paused = true;
1369
1370		/* Run commands before disconnecting */
1371		hci_req_run(&req, suspend_req_complete);
1372
1373		disconnect_counter = 0;
1374		/* Soft disconnect everything (power off) */
1375		list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1376			hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1377			disconnect_counter++;
1378		}
1379
1380		if (disconnect_counter > 0) {
1381			bt_dev_dbg(hdev,
1382				   "Had %d disconnects. Will wait on them",
1383				   disconnect_counter);
1384			set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1385		}
1386	} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1387		/* Unpause to take care of updating scanning params */
1388		hdev->scanning_paused = false;
1389		/* Enable event filter for paired devices */
1390		hci_req_set_event_filter(&req);
1391		/* Enable passive scan at lower duty cycle */
1392		__hci_update_background_scan(&req);
1393		/* Pause scan changes again. */
1394		hdev->scanning_paused = true;
1395		hci_req_run(&req, suspend_req_complete);
1396	} else {
1397		hdev->suspended = false;
1398		hdev->scanning_paused = false;
1399
1400		/* Clear any event filters and restore scan state */
1401		hci_req_clear_event_filter(&req);
1402		__hci_req_update_scan(&req);
1403
1404		/* Reset passive/background scanning to normal */
1405		__hci_update_background_scan(&req);
1406		/* Enable all of the advertisement filters */
1407		hci_req_add_set_adv_filter_enable(&req, true);
1408
1409		/* Unpause directed advertising */
1410		hdev->advertising_paused = false;
1411		if (hdev->advertising_old_state) {
1412			set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1413				hdev->suspend_tasks);
1414			hci_dev_set_flag(hdev, HCI_ADVERTISING);
1415			queue_work(hdev->req_workqueue,
1416				   &hdev->discoverable_update);
1417			hdev->advertising_old_state = 0;
1418		}
1419
1420		/* Resume other advertisements */
1421		if (hdev->adv_instance_cnt)
1422			__hci_req_resume_adv_instances(&req);
1423
1424		/* Unpause discovery */
1425		hdev->discovery_paused = false;
1426		if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1427		    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1428			set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1429			hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1430			queue_work(hdev->req_workqueue, &hdev->discov_update);
1431		}
1432
1433		hci_req_run(&req, suspend_req_complete);
1434	}
1435
1436	hdev->suspend_state = next;
1437
1438done:
1439	clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1440	wake_up(&hdev->suspend_wait_q);
1441}
1442
1443static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1444{
1445	return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1446}
1447
1448void __hci_req_disable_advertising(struct hci_request *req)
1449{
1450	if (ext_adv_capable(req->hdev)) {
1451		__hci_req_disable_ext_adv_instance(req, 0x00);
1452
1453	} else {
1454		u8 enable = 0x00;
1455
1456		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1457	}
1458}
1459
1460static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1461{
1462	u32 flags;
1463	struct adv_info *adv_instance;
1464
1465	if (instance == 0x00) {
1466		/* Instance 0 always manages the "Tx Power" and "Flags"
1467		 * fields
1468		 */
1469		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1470
1471		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1472		 * corresponds to the "connectable" instance flag.
1473		 */
1474		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1475			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1476
1477		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1478			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1479		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1480			flags |= MGMT_ADV_FLAG_DISCOV;
1481
1482		return flags;
1483	}
1484
1485	adv_instance = hci_find_adv_instance(hdev, instance);
1486
1487	/* Return 0 when we got an invalid instance identifier. */
1488	if (!adv_instance)
1489		return 0;
1490
1491	return adv_instance->flags;
1492}
1493
1494static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1495{
1496	/* If privacy is not enabled don't use RPA */
1497	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1498		return false;
1499
1500	/* If basic privacy mode is enabled use RPA */
1501	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1502		return true;
1503
1504	/* If limited privacy mode is enabled don't use RPA if we're
1505	 * both discoverable and bondable.
1506	 */
1507	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1508	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1509		return false;
1510
1511	/* We're neither bondable nor discoverable in the limited
1512	 * privacy mode, therefore use RPA.
1513	 */
1514	return true;
1515}
1516
1517static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1518{
1519	/* If there is no connection we are OK to advertise. */
1520	if (hci_conn_num(hdev, LE_LINK) == 0)
1521		return true;
1522
1523	/* Check le_states if there is any connection in peripheral role. */
1524	if (hdev->conn_hash.le_num_peripheral > 0) {
1525		/* Peripheral connection state and non connectable mode bit 20.
1526		 */
1527		if (!connectable && !(hdev->le_states[2] & 0x10))
1528			return false;
1529
1530		/* Peripheral connection state and connectable mode bit 38
1531		 * and scannable bit 21.
1532		 */
1533		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1534				    !(hdev->le_states[2] & 0x20)))
1535			return false;
1536	}
1537
1538	/* Check le_states if there is any connection in central role. */
1539	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1540		/* Central connection state and non connectable mode bit 18. */
1541		if (!connectable && !(hdev->le_states[2] & 0x02))
1542			return false;
1543
1544		/* Central connection state and connectable mode bit 35 and
1545		 * scannable 19.
1546		 */
1547		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1548				    !(hdev->le_states[2] & 0x08)))
1549			return false;
1550	}
1551
1552	return true;
1553}
1554
1555void __hci_req_enable_advertising(struct hci_request *req)
1556{
1557	struct hci_dev *hdev = req->hdev;
1558	struct adv_info *adv_instance;
1559	struct hci_cp_le_set_adv_param cp;
1560	u8 own_addr_type, enable = 0x01;
1561	bool connectable;
1562	u16 adv_min_interval, adv_max_interval;
1563	u32 flags;
1564
1565	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1566	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1567
1568	/* If the "connectable" instance flag was not set, then choose between
1569	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1570	 */
1571	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1572		      mgmt_get_connectable(hdev);
1573
1574	if (!is_advertising_allowed(hdev, connectable))
1575		return;
1576
1577	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1578		__hci_req_disable_advertising(req);
1579
1580	/* Clear the HCI_LE_ADV bit temporarily so that the
1581	 * hci_update_random_address knows that it's safe to go ahead
1582	 * and write a new random address. The flag will be set back on
1583	 * as soon as the SET_ADV_ENABLE HCI command completes.
1584	 */
1585	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1586
 
 
 
 
 
 
 
 
1587	/* Set require_privacy to true only when non-connectable
1588	 * advertising is used. In that case it is fine to use a
1589	 * non-resolvable private address.
1590	 */
1591	if (hci_update_random_address(req, !connectable,
1592				      adv_use_rpa(hdev, flags),
1593				      &own_addr_type) < 0)
1594		return;
1595
1596	memset(&cp, 0, sizeof(cp));
 
 
1597
1598	if (adv_instance) {
1599		adv_min_interval = adv_instance->min_interval;
1600		adv_max_interval = adv_instance->max_interval;
1601	} else {
1602		adv_min_interval = hdev->le_adv_min_interval;
1603		adv_max_interval = hdev->le_adv_max_interval;
1604	}
1605
1606	if (connectable) {
1607		cp.type = LE_ADV_IND;
1608	} else {
1609		if (adv_cur_instance_is_scannable(hdev))
1610			cp.type = LE_ADV_SCAN_IND;
1611		else
1612			cp.type = LE_ADV_NONCONN_IND;
1613
1614		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1615		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1616			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1617			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1618		}
1619	}
1620
1621	cp.min_interval = cpu_to_le16(adv_min_interval);
1622	cp.max_interval = cpu_to_le16(adv_max_interval);
1623	cp.own_address_type = own_addr_type;
1624	cp.channel_map = hdev->le_adv_channel_map;
1625
1626	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1627
1628	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1629}
1630
1631u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1632{
1633	size_t short_len;
1634	size_t complete_len;
 
 
 
 
1635
1636	/* no space left for name (+ NULL + type + len) */
1637	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1638		return ad_len;
1639
1640	/* use complete name if present and fits */
1641	complete_len = strlen(hdev->dev_name);
1642	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1643		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1644				       hdev->dev_name, complete_len + 1);
1645
1646	/* use short name if present */
1647	short_len = strlen(hdev->short_name);
1648	if (short_len)
1649		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1650				       hdev->short_name, short_len + 1);
1651
1652	/* use shortened full name if present, we already know that name
1653	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1654	 */
1655	if (complete_len) {
1656		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1657
1658		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1659		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1660
1661		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1662				       sizeof(name));
1663	}
1664
1665	return ad_len;
1666}
1667
1668static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1669{
1670	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1671}
1672
1673static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1674{
1675	u8 scan_rsp_len = 0;
1676
1677	if (hdev->appearance)
1678		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1679
1680	return append_local_name(hdev, ptr, scan_rsp_len);
1681}
1682
1683static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1684					u8 *ptr)
1685{
1686	struct adv_info *adv_instance;
1687	u32 instance_flags;
1688	u8 scan_rsp_len = 0;
1689
1690	adv_instance = hci_find_adv_instance(hdev, instance);
1691	if (!adv_instance)
1692		return 0;
1693
1694	instance_flags = adv_instance->flags;
1695
1696	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1697		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1698
1699	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1700	       adv_instance->scan_rsp_len);
1701
1702	scan_rsp_len += adv_instance->scan_rsp_len;
1703
1704	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1705		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1706
1707	return scan_rsp_len;
1708}
1709
1710void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1711{
1712	struct hci_dev *hdev = req->hdev;
 
1713	u8 len;
1714
1715	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1716		return;
1717
1718	if (ext_adv_capable(hdev)) {
1719		struct {
1720			struct hci_cp_le_set_ext_scan_rsp_data cp;
1721			u8 data[HCI_MAX_EXT_AD_LENGTH];
1722		} pdu;
1723
1724		memset(&pdu, 0, sizeof(pdu));
1725
1726		if (instance)
1727			len = create_instance_scan_rsp_data(hdev, instance,
1728							    pdu.data);
1729		else
1730			len = create_default_scan_rsp_data(hdev, pdu.data);
1731
1732		if (hdev->scan_rsp_data_len == len &&
1733		    !memcmp(pdu.data, hdev->scan_rsp_data, len))
1734			return;
 
1735
1736		memcpy(hdev->scan_rsp_data, pdu.data, len);
1737		hdev->scan_rsp_data_len = len;
 
1738
1739		pdu.cp.handle = instance;
1740		pdu.cp.length = len;
1741		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1742		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1743
1744		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1745			    sizeof(pdu.cp) + len, &pdu.cp);
1746	} else {
1747		struct hci_cp_le_set_scan_rsp_data cp;
1748
1749		memset(&cp, 0, sizeof(cp));
1750
1751		if (instance)
1752			len = create_instance_scan_rsp_data(hdev, instance,
1753							    cp.data);
1754		else
1755			len = create_default_scan_rsp_data(hdev, cp.data);
1756
1757		if (hdev->scan_rsp_data_len == len &&
1758		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1759			return;
1760
1761		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1762		hdev->scan_rsp_data_len = len;
1763
1764		cp.length = len;
1765
1766		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1767	}
1768}
1769
1770static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1771{
1772	struct adv_info *adv_instance = NULL;
1773	u8 ad_len = 0, flags = 0;
1774	u32 instance_flags;
1775
1776	/* Return 0 when the current instance identifier is invalid. */
1777	if (instance) {
1778		adv_instance = hci_find_adv_instance(hdev, instance);
1779		if (!adv_instance)
1780			return 0;
1781	}
1782
1783	instance_flags = get_adv_instance_flags(hdev, instance);
1784
1785	/* If instance already has the flags set skip adding it once
1786	 * again.
1787	 */
1788	if (adv_instance && eir_get_data(adv_instance->adv_data,
1789					 adv_instance->adv_data_len, EIR_FLAGS,
1790					 NULL))
1791		goto skip_flags;
1792
1793	/* The Add Advertising command allows userspace to set both the general
1794	 * and limited discoverable flags.
1795	 */
1796	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1797		flags |= LE_AD_GENERAL;
1798
1799	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1800		flags |= LE_AD_LIMITED;
1801
1802	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1803		flags |= LE_AD_NO_BREDR;
1804
1805	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1806		/* If a discovery flag wasn't provided, simply use the global
1807		 * settings.
1808		 */
1809		if (!flags)
1810			flags |= mgmt_get_adv_discov_flags(hdev);
1811
 
 
 
1812		/* If flags would still be empty, then there is no need to
1813		 * include the "Flags" AD field".
1814		 */
1815		if (flags) {
1816			ptr[0] = 0x02;
1817			ptr[1] = EIR_FLAGS;
1818			ptr[2] = flags;
1819
1820			ad_len += 3;
1821			ptr += 3;
1822		}
1823	}
1824
1825skip_flags:
1826	if (adv_instance) {
1827		memcpy(ptr, adv_instance->adv_data,
1828		       adv_instance->adv_data_len);
1829		ad_len += adv_instance->adv_data_len;
1830		ptr += adv_instance->adv_data_len;
1831	}
1832
1833	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1834		s8 adv_tx_power;
 
 
 
 
1835
1836		if (ext_adv_capable(hdev)) {
1837			if (adv_instance)
1838				adv_tx_power = adv_instance->tx_power;
1839			else
1840				adv_tx_power = hdev->adv_tx_power;
1841		} else {
1842			adv_tx_power = hdev->adv_tx_power;
1843		}
1844
1845		/* Provide Tx Power only if we can provide a valid value for it */
1846		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1847			ptr[0] = 0x02;
1848			ptr[1] = EIR_TX_POWER;
1849			ptr[2] = (u8)adv_tx_power;
1850
1851			ad_len += 3;
1852			ptr += 3;
1853		}
1854	}
1855
1856	return ad_len;
1857}
1858
1859void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1860{
1861	struct hci_dev *hdev = req->hdev;
 
1862	u8 len;
1863
1864	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1865		return;
1866
1867	if (ext_adv_capable(hdev)) {
1868		struct {
1869			struct hci_cp_le_set_ext_adv_data cp;
1870			u8 data[HCI_MAX_EXT_AD_LENGTH];
1871		} pdu;
1872
1873		memset(&pdu, 0, sizeof(pdu));
1874
1875		len = create_instance_adv_data(hdev, instance, pdu.data);
1876
1877		/* There's nothing to do if the data hasn't changed */
1878		if (hdev->adv_data_len == len &&
1879		    memcmp(pdu.data, hdev->adv_data, len) == 0)
1880			return;
1881
1882		memcpy(hdev->adv_data, pdu.data, len);
1883		hdev->adv_data_len = len;
1884
1885		pdu.cp.length = len;
1886		pdu.cp.handle = instance;
1887		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1888		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1889
1890		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1891			    sizeof(pdu.cp) + len, &pdu.cp);
1892	} else {
1893		struct hci_cp_le_set_adv_data cp;
1894
1895		memset(&cp, 0, sizeof(cp));
1896
1897		len = create_instance_adv_data(hdev, instance, cp.data);
1898
1899		/* There's nothing to do if the data hasn't changed */
1900		if (hdev->adv_data_len == len &&
1901		    memcmp(cp.data, hdev->adv_data, len) == 0)
1902			return;
1903
1904		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1905		hdev->adv_data_len = len;
1906
1907		cp.length = len;
1908
1909		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1910	}
1911}
1912
1913int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1914{
1915	struct hci_request req;
1916
1917	hci_req_init(&req, hdev);
1918	__hci_req_update_adv_data(&req, instance);
1919
1920	return hci_req_run(&req, NULL);
1921}
1922
1923static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1924					    u16 opcode)
1925{
1926	BT_DBG("%s status %u", hdev->name, status);
1927}
1928
1929void hci_req_disable_address_resolution(struct hci_dev *hdev)
1930{
1931	struct hci_request req;
1932	__u8 enable = 0x00;
1933
1934	if (!use_ll_privacy(hdev) &&
1935	    !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1936		return;
1937
1938	hci_req_init(&req, hdev);
1939
1940	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1941
1942	hci_req_run(&req, enable_addr_resolution_complete);
1943}
1944
1945static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1946{
1947	bt_dev_dbg(hdev, "status %u", status);
1948}
1949
1950void hci_req_reenable_advertising(struct hci_dev *hdev)
1951{
1952	struct hci_request req;
1953
1954	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1955	    list_empty(&hdev->adv_instances))
1956		return;
1957
1958	hci_req_init(&req, hdev);
1959
1960	if (hdev->cur_adv_instance) {
1961		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1962						true);
1963	} else {
1964		if (ext_adv_capable(hdev)) {
1965			__hci_req_start_ext_adv(&req, 0x00);
1966		} else {
1967			__hci_req_update_adv_data(&req, 0x00);
1968			__hci_req_update_scan_rsp_data(&req, 0x00);
1969			__hci_req_enable_advertising(&req);
1970		}
1971	}
1972
1973	hci_req_run(&req, adv_enable_complete);
1974}
1975
1976static void adv_timeout_expire(struct work_struct *work)
1977{
1978	struct hci_dev *hdev = container_of(work, struct hci_dev,
1979					    adv_instance_expire.work);
1980
1981	struct hci_request req;
1982	u8 instance;
1983
1984	bt_dev_dbg(hdev, "");
1985
1986	hci_dev_lock(hdev);
1987
1988	hdev->adv_instance_timeout = 0;
1989
1990	instance = hdev->cur_adv_instance;
1991	if (instance == 0x00)
1992		goto unlock;
1993
1994	hci_req_init(&req, hdev);
1995
1996	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1997
1998	if (list_empty(&hdev->adv_instances))
1999		__hci_req_disable_advertising(&req);
2000
2001	hci_req_run(&req, NULL);
2002
2003unlock:
2004	hci_dev_unlock(hdev);
2005}
2006
2007static int hci_req_add_le_interleaved_scan(struct hci_request *req,
2008					   unsigned long opt)
2009{
2010	struct hci_dev *hdev = req->hdev;
2011	int ret = 0;
2012
2013	hci_dev_lock(hdev);
2014
2015	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2016		hci_req_add_le_scan_disable(req, false);
2017	hci_req_add_le_passive_scan(req);
2018
2019	switch (hdev->interleave_scan_state) {
2020	case INTERLEAVE_SCAN_ALLOWLIST:
2021		bt_dev_dbg(hdev, "next state: allowlist");
2022		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2023		break;
2024	case INTERLEAVE_SCAN_NO_FILTER:
2025		bt_dev_dbg(hdev, "next state: no filter");
2026		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2027		break;
2028	case INTERLEAVE_SCAN_NONE:
2029		BT_ERR("unexpected error");
2030		ret = -1;
2031	}
2032
2033	hci_dev_unlock(hdev);
2034
2035	return ret;
2036}
2037
2038static void interleave_scan_work(struct work_struct *work)
2039{
2040	struct hci_dev *hdev = container_of(work, struct hci_dev,
2041					    interleave_scan.work);
2042	u8 status;
2043	unsigned long timeout;
2044
2045	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2046		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2047	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2048		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2049	} else {
2050		bt_dev_err(hdev, "unexpected error");
2051		return;
2052	}
2053
2054	hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2055		     HCI_CMD_TIMEOUT, &status);
2056
2057	/* Don't continue interleaving if it was canceled */
2058	if (is_interleave_scanning(hdev))
2059		queue_delayed_work(hdev->req_workqueue,
2060				   &hdev->interleave_scan, timeout);
2061}
2062
2063int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2064			   bool use_rpa, struct adv_info *adv_instance,
2065			   u8 *own_addr_type, bdaddr_t *rand_addr)
2066{
2067	int err;
2068
2069	bacpy(rand_addr, BDADDR_ANY);
2070
2071	/* If privacy is enabled use a resolvable private address. If
2072	 * current RPA has expired then generate a new one.
2073	 */
2074	if (use_rpa) {
2075		/* If Controller supports LL Privacy use own address type is
2076		 * 0x03
2077		 */
2078		if (use_ll_privacy(hdev) &&
2079		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2080			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2081		else
2082			*own_addr_type = ADDR_LE_DEV_RANDOM;
2083
2084		if (adv_instance) {
2085			if (adv_rpa_valid(adv_instance))
2086				return 0;
2087		} else {
2088			if (rpa_valid(hdev))
2089				return 0;
2090		}
2091
2092		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2093		if (err < 0) {
2094			bt_dev_err(hdev, "failed to generate new RPA");
2095			return err;
2096		}
2097
2098		bacpy(rand_addr, &hdev->rpa);
2099
2100		return 0;
2101	}
2102
2103	/* In case of required privacy without resolvable private address,
2104	 * use an non-resolvable private address. This is useful for
2105	 * non-connectable advertising.
2106	 */
2107	if (require_privacy) {
2108		bdaddr_t nrpa;
2109
2110		while (true) {
2111			/* The non-resolvable private address is generated
2112			 * from random six bytes with the two most significant
2113			 * bits cleared.
2114			 */
2115			get_random_bytes(&nrpa, 6);
2116			nrpa.b[5] &= 0x3f;
2117
2118			/* The non-resolvable private address shall not be
2119			 * equal to the public address.
2120			 */
2121			if (bacmp(&hdev->bdaddr, &nrpa))
2122				break;
2123		}
2124
2125		*own_addr_type = ADDR_LE_DEV_RANDOM;
2126		bacpy(rand_addr, &nrpa);
2127
2128		return 0;
2129	}
2130
2131	/* No privacy so use a public address. */
2132	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2133
2134	return 0;
2135}
2136
2137void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2138{
2139	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2140}
2141
2142static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2143{
2144	struct hci_dev *hdev = req->hdev;
2145
2146	/* If we're advertising or initiating an LE connection we can't
2147	 * go ahead and change the random address at this time. This is
2148	 * because the eventual initiator address used for the
2149	 * subsequently created connection will be undefined (some
2150	 * controllers use the new address and others the one we had
2151	 * when the operation started).
2152	 *
2153	 * In this kind of scenario skip the update and let the random
2154	 * address be updated at the next cycle.
2155	 */
2156	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2157	    hci_lookup_le_connect(hdev)) {
2158		bt_dev_dbg(hdev, "Deferring random address update");
2159		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2160		return;
2161	}
2162
2163	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2164}
2165
2166int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2167{
2168	struct hci_cp_le_set_ext_adv_params cp;
2169	struct hci_dev *hdev = req->hdev;
2170	bool connectable;
2171	u32 flags;
2172	bdaddr_t random_addr;
2173	u8 own_addr_type;
2174	int err;
2175	struct adv_info *adv_instance;
2176	bool secondary_adv;
2177
2178	if (instance > 0) {
2179		adv_instance = hci_find_adv_instance(hdev, instance);
2180		if (!adv_instance)
2181			return -EINVAL;
2182	} else {
2183		adv_instance = NULL;
2184	}
2185
2186	flags = get_adv_instance_flags(hdev, instance);
2187
2188	/* If the "connectable" instance flag was not set, then choose between
2189	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2190	 */
2191	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2192		      mgmt_get_connectable(hdev);
2193
2194	if (!is_advertising_allowed(hdev, connectable))
2195		return -EPERM;
2196
2197	/* Set require_privacy to true only when non-connectable
2198	 * advertising is used. In that case it is fine to use a
2199	 * non-resolvable private address.
2200	 */
2201	err = hci_get_random_address(hdev, !connectable,
2202				     adv_use_rpa(hdev, flags), adv_instance,
2203				     &own_addr_type, &random_addr);
2204	if (err < 0)
2205		return err;
2206
2207	memset(&cp, 0, sizeof(cp));
2208
2209	if (adv_instance) {
2210		hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2211		hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2212		cp.tx_power = adv_instance->tx_power;
2213	} else {
2214		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2215		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2216		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2217	}
2218
2219	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2220
2221	if (connectable) {
2222		if (secondary_adv)
2223			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2224		else
2225			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2226	} else if (adv_instance_is_scannable(hdev, instance) ||
2227		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2228		if (secondary_adv)
2229			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2230		else
2231			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2232	} else {
2233		if (secondary_adv)
2234			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2235		else
2236			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2237	}
2238
2239	cp.own_addr_type = own_addr_type;
2240	cp.channel_map = hdev->le_adv_channel_map;
2241	cp.handle = instance;
2242
2243	if (flags & MGMT_ADV_FLAG_SEC_2M) {
2244		cp.primary_phy = HCI_ADV_PHY_1M;
2245		cp.secondary_phy = HCI_ADV_PHY_2M;
2246	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2247		cp.primary_phy = HCI_ADV_PHY_CODED;
2248		cp.secondary_phy = HCI_ADV_PHY_CODED;
2249	} else {
2250		/* In all other cases use 1M */
2251		cp.primary_phy = HCI_ADV_PHY_1M;
2252		cp.secondary_phy = HCI_ADV_PHY_1M;
2253	}
2254
2255	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2256
2257	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2258	    bacmp(&random_addr, BDADDR_ANY)) {
2259		struct hci_cp_le_set_adv_set_rand_addr cp;
2260
2261		/* Check if random address need to be updated */
2262		if (adv_instance) {
2263			if (!bacmp(&random_addr, &adv_instance->random_addr))
2264				return 0;
2265		} else {
2266			if (!bacmp(&random_addr, &hdev->random_addr))
2267				return 0;
2268			/* Instance 0x00 doesn't have an adv_info, instead it
2269			 * uses hdev->random_addr to track its address so
2270			 * whenever it needs to be updated this also set the
2271			 * random address since hdev->random_addr is shared with
2272			 * scan state machine.
2273			 */
2274			set_random_addr(req, &random_addr);
2275		}
2276
2277		memset(&cp, 0, sizeof(cp));
2278
2279		cp.handle = instance;
2280		bacpy(&cp.bdaddr, &random_addr);
2281
2282		hci_req_add(req,
2283			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2284			    sizeof(cp), &cp);
2285	}
2286
2287	return 0;
2288}
2289
2290int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2291{
2292	struct hci_dev *hdev = req->hdev;
2293	struct hci_cp_le_set_ext_adv_enable *cp;
2294	struct hci_cp_ext_adv_set *adv_set;
2295	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2296	struct adv_info *adv_instance;
2297
2298	if (instance > 0) {
2299		adv_instance = hci_find_adv_instance(hdev, instance);
2300		if (!adv_instance)
2301			return -EINVAL;
2302	} else {
2303		adv_instance = NULL;
2304	}
2305
2306	cp = (void *) data;
2307	adv_set = (void *) cp->data;
2308
2309	memset(cp, 0, sizeof(*cp));
2310
2311	cp->enable = 0x01;
2312	cp->num_of_sets = 0x01;
2313
2314	memset(adv_set, 0, sizeof(*adv_set));
2315
2316	adv_set->handle = instance;
2317
2318	/* Set duration per instance since controller is responsible for
2319	 * scheduling it.
2320	 */
2321	if (adv_instance && adv_instance->duration) {
2322		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2323
2324		/* Time = N * 10 ms */
2325		adv_set->duration = cpu_to_le16(duration / 10);
2326	}
2327
2328	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2329		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2330		    data);
2331
2332	return 0;
2333}
2334
2335int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2336{
2337	struct hci_dev *hdev = req->hdev;
2338	struct hci_cp_le_set_ext_adv_enable *cp;
2339	struct hci_cp_ext_adv_set *adv_set;
2340	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2341	u8 req_size;
2342
2343	/* If request specifies an instance that doesn't exist, fail */
2344	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2345		return -EINVAL;
2346
2347	memset(data, 0, sizeof(data));
2348
2349	cp = (void *)data;
2350	adv_set = (void *)cp->data;
2351
2352	/* Instance 0x00 indicates all advertising instances will be disabled */
2353	cp->num_of_sets = !!instance;
2354	cp->enable = 0x00;
2355
2356	adv_set->handle = instance;
2357
2358	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2359	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2360
2361	return 0;
2362}
2363
2364int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2365{
2366	struct hci_dev *hdev = req->hdev;
2367
2368	/* If request specifies an instance that doesn't exist, fail */
2369	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2370		return -EINVAL;
2371
2372	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2373
2374	return 0;
2375}
2376
2377int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2378{
2379	struct hci_dev *hdev = req->hdev;
2380	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2381	int err;
2382
2383	/* If instance isn't pending, the chip knows about it, and it's safe to
2384	 * disable
2385	 */
2386	if (adv_instance && !adv_instance->pending)
2387		__hci_req_disable_ext_adv_instance(req, instance);
2388
2389	err = __hci_req_setup_ext_adv_instance(req, instance);
2390	if (err < 0)
2391		return err;
2392
2393	__hci_req_update_scan_rsp_data(req, instance);
2394	__hci_req_enable_ext_advertising(req, instance);
2395
2396	return 0;
2397}
2398
2399int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2400				    bool force)
2401{
2402	struct hci_dev *hdev = req->hdev;
2403	struct adv_info *adv_instance = NULL;
2404	u16 timeout;
2405
2406	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2407	    list_empty(&hdev->adv_instances))
2408		return -EPERM;
2409
2410	if (hdev->adv_instance_timeout)
2411		return -EBUSY;
2412
2413	adv_instance = hci_find_adv_instance(hdev, instance);
2414	if (!adv_instance)
2415		return -ENOENT;
2416
2417	/* A zero timeout means unlimited advertising. As long as there is
2418	 * only one instance, duration should be ignored. We still set a timeout
2419	 * in case further instances are being added later on.
2420	 *
2421	 * If the remaining lifetime of the instance is more than the duration
2422	 * then the timeout corresponds to the duration, otherwise it will be
2423	 * reduced to the remaining instance lifetime.
2424	 */
2425	if (adv_instance->timeout == 0 ||
2426	    adv_instance->duration <= adv_instance->remaining_time)
2427		timeout = adv_instance->duration;
2428	else
2429		timeout = adv_instance->remaining_time;
2430
2431	/* The remaining time is being reduced unless the instance is being
2432	 * advertised without time limit.
2433	 */
2434	if (adv_instance->timeout)
2435		adv_instance->remaining_time =
2436				adv_instance->remaining_time - timeout;
2437
2438	/* Only use work for scheduling instances with legacy advertising */
2439	if (!ext_adv_capable(hdev)) {
2440		hdev->adv_instance_timeout = timeout;
2441		queue_delayed_work(hdev->req_workqueue,
2442			   &hdev->adv_instance_expire,
2443			   msecs_to_jiffies(timeout * 1000));
2444	}
2445
2446	/* If we're just re-scheduling the same instance again then do not
2447	 * execute any HCI commands. This happens when a single instance is
2448	 * being advertised.
2449	 */
2450	if (!force && hdev->cur_adv_instance == instance &&
2451	    hci_dev_test_flag(hdev, HCI_LE_ADV))
2452		return 0;
2453
2454	hdev->cur_adv_instance = instance;
2455	if (ext_adv_capable(hdev)) {
2456		__hci_req_start_ext_adv(req, instance);
2457	} else {
2458		__hci_req_update_adv_data(req, instance);
2459		__hci_req_update_scan_rsp_data(req, instance);
2460		__hci_req_enable_advertising(req);
2461	}
2462
2463	return 0;
2464}
2465
 
 
 
 
 
 
 
 
2466/* For a single instance:
2467 * - force == true: The instance will be removed even when its remaining
2468 *   lifetime is not zero.
2469 * - force == false: the instance will be deactivated but kept stored unless
2470 *   the remaining lifetime is zero.
2471 *
2472 * For instance == 0x00:
2473 * - force == true: All instances will be removed regardless of their timeout
2474 *   setting.
2475 * - force == false: Only instances that have a timeout will be removed.
2476 */
2477void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2478				struct hci_request *req, u8 instance,
2479				bool force)
2480{
2481	struct adv_info *adv_instance, *n, *next_instance = NULL;
2482	int err;
2483	u8 rem_inst;
2484
2485	/* Cancel any timeout concerning the removed instance(s). */
2486	if (!instance || hdev->cur_adv_instance == instance)
2487		cancel_adv_timeout(hdev);
2488
2489	/* Get the next instance to advertise BEFORE we remove
2490	 * the current one. This can be the same instance again
2491	 * if there is only one instance.
2492	 */
2493	if (instance && hdev->cur_adv_instance == instance)
2494		next_instance = hci_get_next_instance(hdev, instance);
2495
2496	if (instance == 0x00) {
2497		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2498					 list) {
2499			if (!(force || adv_instance->timeout))
2500				continue;
2501
2502			rem_inst = adv_instance->instance;
2503			err = hci_remove_adv_instance(hdev, rem_inst);
2504			if (!err)
2505				mgmt_advertising_removed(sk, hdev, rem_inst);
2506		}
2507	} else {
2508		adv_instance = hci_find_adv_instance(hdev, instance);
2509
2510		if (force || (adv_instance && adv_instance->timeout &&
2511			      !adv_instance->remaining_time)) {
2512			/* Don't advertise a removed instance. */
2513			if (next_instance &&
2514			    next_instance->instance == instance)
2515				next_instance = NULL;
2516
2517			err = hci_remove_adv_instance(hdev, instance);
2518			if (!err)
2519				mgmt_advertising_removed(sk, hdev, instance);
2520		}
2521	}
2522
2523	if (!req || !hdev_is_powered(hdev) ||
2524	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
2525		return;
2526
2527	if (next_instance && !ext_adv_capable(hdev))
2528		__hci_req_schedule_adv_instance(req, next_instance->instance,
2529						false);
2530}
2531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2532int hci_update_random_address(struct hci_request *req, bool require_privacy,
2533			      bool use_rpa, u8 *own_addr_type)
2534{
2535	struct hci_dev *hdev = req->hdev;
2536	int err;
2537
2538	/* If privacy is enabled use a resolvable private address. If
2539	 * current RPA has expired or there is something else than
2540	 * the current RPA in use, then generate a new one.
2541	 */
2542	if (use_rpa) {
2543		/* If Controller supports LL Privacy use own address type is
2544		 * 0x03
2545		 */
2546		if (use_ll_privacy(hdev) &&
2547		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2548			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2549		else
2550			*own_addr_type = ADDR_LE_DEV_RANDOM;
2551
2552		if (rpa_valid(hdev))
 
2553			return 0;
2554
2555		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2556		if (err < 0) {
2557			bt_dev_err(hdev, "failed to generate new RPA");
2558			return err;
2559		}
2560
2561		set_random_addr(req, &hdev->rpa);
2562
 
 
 
2563		return 0;
2564	}
2565
2566	/* In case of required privacy without resolvable private address,
2567	 * use an non-resolvable private address. This is useful for active
2568	 * scanning and non-connectable advertising.
2569	 */
2570	if (require_privacy) {
2571		bdaddr_t nrpa;
2572
2573		while (true) {
2574			/* The non-resolvable private address is generated
2575			 * from random six bytes with the two most significant
2576			 * bits cleared.
2577			 */
2578			get_random_bytes(&nrpa, 6);
2579			nrpa.b[5] &= 0x3f;
2580
2581			/* The non-resolvable private address shall not be
2582			 * equal to the public address.
2583			 */
2584			if (bacmp(&hdev->bdaddr, &nrpa))
2585				break;
2586		}
2587
2588		*own_addr_type = ADDR_LE_DEV_RANDOM;
2589		set_random_addr(req, &nrpa);
2590		return 0;
2591	}
2592
2593	/* If forcing static address is in use or there is no public
2594	 * address use the static address as random address (but skip
2595	 * the HCI command if the current random address is already the
2596	 * static one.
2597	 *
2598	 * In case BR/EDR has been disabled on a dual-mode controller
2599	 * and a static address has been configured, then use that
2600	 * address instead of the public BR/EDR address.
2601	 */
2602	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2603	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2604	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2605	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2606		*own_addr_type = ADDR_LE_DEV_RANDOM;
2607		if (bacmp(&hdev->static_addr, &hdev->random_addr))
2608			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2609				    &hdev->static_addr);
2610		return 0;
2611	}
2612
2613	/* Neither privacy nor static address is being used so use a
2614	 * public address.
2615	 */
2616	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2617
2618	return 0;
2619}
2620
2621static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2622{
2623	struct bdaddr_list *b;
2624
2625	list_for_each_entry(b, &hdev->accept_list, list) {
2626		struct hci_conn *conn;
2627
2628		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2629		if (!conn)
2630			return true;
2631
2632		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2633			return true;
2634	}
2635
2636	return false;
2637}
2638
2639void __hci_req_update_scan(struct hci_request *req)
2640{
2641	struct hci_dev *hdev = req->hdev;
2642	u8 scan;
2643
2644	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2645		return;
2646
2647	if (!hdev_is_powered(hdev))
2648		return;
2649
2650	if (mgmt_powering_down(hdev))
2651		return;
2652
2653	if (hdev->scanning_paused)
2654		return;
2655
2656	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2657	    disconnected_accept_list_entries(hdev))
2658		scan = SCAN_PAGE;
2659	else
2660		scan = SCAN_DISABLED;
2661
2662	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2663		scan |= SCAN_INQUIRY;
2664
2665	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2666	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2667		return;
2668
2669	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2670}
2671
2672static int update_scan(struct hci_request *req, unsigned long opt)
2673{
2674	hci_dev_lock(req->hdev);
2675	__hci_req_update_scan(req);
2676	hci_dev_unlock(req->hdev);
2677	return 0;
2678}
2679
2680static void scan_update_work(struct work_struct *work)
2681{
2682	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2683
2684	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2685}
2686
2687static int connectable_update(struct hci_request *req, unsigned long opt)
2688{
2689	struct hci_dev *hdev = req->hdev;
2690
2691	hci_dev_lock(hdev);
2692
2693	__hci_req_update_scan(req);
2694
2695	/* If BR/EDR is not enabled and we disable advertising as a
2696	 * by-product of disabling connectable, we need to update the
2697	 * advertising flags.
2698	 */
2699	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2700		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2701
2702	/* Update the advertising parameters if necessary */
2703	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2704	    !list_empty(&hdev->adv_instances)) {
2705		if (ext_adv_capable(hdev))
2706			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2707		else
2708			__hci_req_enable_advertising(req);
2709	}
2710
2711	__hci_update_background_scan(req);
2712
2713	hci_dev_unlock(hdev);
2714
2715	return 0;
2716}
2717
2718static void connectable_update_work(struct work_struct *work)
2719{
2720	struct hci_dev *hdev = container_of(work, struct hci_dev,
2721					    connectable_update);
2722	u8 status;
2723
2724	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2725	mgmt_set_connectable_complete(hdev, status);
2726}
2727
2728static u8 get_service_classes(struct hci_dev *hdev)
2729{
2730	struct bt_uuid *uuid;
2731	u8 val = 0;
2732
2733	list_for_each_entry(uuid, &hdev->uuids, list)
2734		val |= uuid->svc_hint;
2735
2736	return val;
2737}
2738
2739void __hci_req_update_class(struct hci_request *req)
2740{
2741	struct hci_dev *hdev = req->hdev;
2742	u8 cod[3];
2743
2744	bt_dev_dbg(hdev, "");
2745
2746	if (!hdev_is_powered(hdev))
2747		return;
2748
2749	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2750		return;
2751
2752	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2753		return;
2754
2755	cod[0] = hdev->minor_class;
2756	cod[1] = hdev->major_class;
2757	cod[2] = get_service_classes(hdev);
2758
2759	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2760		cod[1] |= 0x20;
2761
2762	if (memcmp(cod, hdev->dev_class, 3) == 0)
2763		return;
2764
2765	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2766}
2767
2768static void write_iac(struct hci_request *req)
2769{
2770	struct hci_dev *hdev = req->hdev;
2771	struct hci_cp_write_current_iac_lap cp;
2772
2773	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2774		return;
2775
2776	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2777		/* Limited discoverable mode */
2778		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2779		cp.iac_lap[0] = 0x00;	/* LIAC */
2780		cp.iac_lap[1] = 0x8b;
2781		cp.iac_lap[2] = 0x9e;
2782		cp.iac_lap[3] = 0x33;	/* GIAC */
2783		cp.iac_lap[4] = 0x8b;
2784		cp.iac_lap[5] = 0x9e;
2785	} else {
2786		/* General discoverable mode */
2787		cp.num_iac = 1;
2788		cp.iac_lap[0] = 0x33;	/* GIAC */
2789		cp.iac_lap[1] = 0x8b;
2790		cp.iac_lap[2] = 0x9e;
2791	}
2792
2793	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2794		    (cp.num_iac * 3) + 1, &cp);
2795}
2796
2797static int discoverable_update(struct hci_request *req, unsigned long opt)
2798{
2799	struct hci_dev *hdev = req->hdev;
2800
2801	hci_dev_lock(hdev);
2802
2803	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2804		write_iac(req);
2805		__hci_req_update_scan(req);
2806		__hci_req_update_class(req);
2807	}
2808
2809	/* Advertising instances don't use the global discoverable setting, so
2810	 * only update AD if advertising was enabled using Set Advertising.
2811	 */
2812	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2813		__hci_req_update_adv_data(req, 0x00);
2814
2815		/* Discoverable mode affects the local advertising
2816		 * address in limited privacy mode.
2817		 */
2818		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2819			if (ext_adv_capable(hdev))
2820				__hci_req_start_ext_adv(req, 0x00);
2821			else
2822				__hci_req_enable_advertising(req);
2823		}
2824	}
2825
2826	hci_dev_unlock(hdev);
2827
2828	return 0;
2829}
2830
2831static void discoverable_update_work(struct work_struct *work)
2832{
2833	struct hci_dev *hdev = container_of(work, struct hci_dev,
2834					    discoverable_update);
2835	u8 status;
2836
2837	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2838	mgmt_set_discoverable_complete(hdev, status);
2839}
2840
2841void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2842		      u8 reason)
2843{
2844	switch (conn->state) {
2845	case BT_CONNECTED:
2846	case BT_CONFIG:
2847		if (conn->type == AMP_LINK) {
2848			struct hci_cp_disconn_phy_link cp;
2849
2850			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2851			cp.reason = reason;
2852			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2853				    &cp);
2854		} else {
2855			struct hci_cp_disconnect dc;
2856
2857			dc.handle = cpu_to_le16(conn->handle);
2858			dc.reason = reason;
2859			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2860		}
2861
2862		conn->state = BT_DISCONN;
2863
2864		break;
2865	case BT_CONNECT:
2866		if (conn->type == LE_LINK) {
2867			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2868				break;
2869			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2870				    0, NULL);
2871		} else if (conn->type == ACL_LINK) {
2872			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2873				break;
2874			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2875				    6, &conn->dst);
2876		}
2877		break;
2878	case BT_CONNECT2:
2879		if (conn->type == ACL_LINK) {
2880			struct hci_cp_reject_conn_req rej;
2881
2882			bacpy(&rej.bdaddr, &conn->dst);
2883			rej.reason = reason;
2884
2885			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2886				    sizeof(rej), &rej);
2887		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2888			struct hci_cp_reject_sync_conn_req rej;
2889
2890			bacpy(&rej.bdaddr, &conn->dst);
2891
2892			/* SCO rejection has its own limited set of
2893			 * allowed error values (0x0D-0x0F) which isn't
2894			 * compatible with most values passed to this
2895			 * function. To be safe hard-code one of the
2896			 * values that's suitable for SCO.
2897			 */
2898			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2899
2900			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2901				    sizeof(rej), &rej);
2902		}
2903		break;
2904	default:
2905		conn->state = BT_CLOSED;
2906		break;
2907	}
2908}
2909
2910static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2911{
2912	if (status)
2913		bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2914}
2915
2916int hci_abort_conn(struct hci_conn *conn, u8 reason)
2917{
2918	struct hci_request req;
2919	int err;
2920
2921	hci_req_init(&req, conn->hdev);
2922
2923	__hci_abort_conn(&req, conn, reason);
2924
2925	err = hci_req_run(&req, abort_conn_complete);
2926	if (err && err != -ENODATA) {
2927		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2928		return err;
2929	}
2930
2931	return 0;
2932}
2933
2934static int update_bg_scan(struct hci_request *req, unsigned long opt)
2935{
2936	hci_dev_lock(req->hdev);
2937	__hci_update_background_scan(req);
2938	hci_dev_unlock(req->hdev);
2939	return 0;
2940}
2941
2942static void bg_scan_update(struct work_struct *work)
2943{
2944	struct hci_dev *hdev = container_of(work, struct hci_dev,
2945					    bg_scan_update);
2946	struct hci_conn *conn;
2947	u8 status;
2948	int err;
2949
2950	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2951	if (!err)
2952		return;
2953
2954	hci_dev_lock(hdev);
2955
2956	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2957	if (conn)
2958		hci_le_conn_failed(conn, status);
2959
2960	hci_dev_unlock(hdev);
2961}
2962
2963static int le_scan_disable(struct hci_request *req, unsigned long opt)
2964{
2965	hci_req_add_le_scan_disable(req, false);
2966	return 0;
2967}
2968
2969static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2970{
2971	u8 length = opt;
2972	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2973	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2974	struct hci_cp_inquiry cp;
2975
2976	if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2977		return 0;
2978
2979	bt_dev_dbg(req->hdev, "");
2980
2981	hci_dev_lock(req->hdev);
2982	hci_inquiry_cache_flush(req->hdev);
2983	hci_dev_unlock(req->hdev);
2984
2985	memset(&cp, 0, sizeof(cp));
2986
2987	if (req->hdev->discovery.limited)
2988		memcpy(&cp.lap, liac, sizeof(cp.lap));
2989	else
2990		memcpy(&cp.lap, giac, sizeof(cp.lap));
2991
2992	cp.length = length;
2993
2994	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2995
2996	return 0;
2997}
2998
2999static void le_scan_disable_work(struct work_struct *work)
3000{
3001	struct hci_dev *hdev = container_of(work, struct hci_dev,
3002					    le_scan_disable.work);
3003	u8 status;
3004
3005	bt_dev_dbg(hdev, "");
3006
3007	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3008		return;
3009
3010	cancel_delayed_work(&hdev->le_scan_restart);
3011
3012	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3013	if (status) {
3014		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3015			   status);
3016		return;
3017	}
3018
3019	hdev->discovery.scan_start = 0;
3020
3021	/* If we were running LE only scan, change discovery state. If
3022	 * we were running both LE and BR/EDR inquiry simultaneously,
3023	 * and BR/EDR inquiry is already finished, stop discovery,
3024	 * otherwise BR/EDR inquiry will stop discovery when finished.
3025	 * If we will resolve remote device name, do not change
3026	 * discovery state.
3027	 */
3028
3029	if (hdev->discovery.type == DISCOV_TYPE_LE)
3030		goto discov_stopped;
3031
3032	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3033		return;
3034
3035	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3036		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3037		    hdev->discovery.state != DISCOVERY_RESOLVING)
3038			goto discov_stopped;
3039
3040		return;
3041	}
3042
3043	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3044		     HCI_CMD_TIMEOUT, &status);
3045	if (status) {
3046		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3047		goto discov_stopped;
3048	}
3049
3050	return;
3051
3052discov_stopped:
3053	hci_dev_lock(hdev);
3054	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3055	hci_dev_unlock(hdev);
3056}
3057
3058static int le_scan_restart(struct hci_request *req, unsigned long opt)
3059{
3060	struct hci_dev *hdev = req->hdev;
 
3061
3062	/* If controller is not scanning we are done. */
3063	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3064		return 0;
3065
3066	if (hdev->scanning_paused) {
3067		bt_dev_dbg(hdev, "Scanning is paused for suspend");
3068		return 0;
3069	}
3070
3071	hci_req_add_le_scan_disable(req, false);
3072
3073	if (use_ext_scan(hdev)) {
3074		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3075
3076		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3077		ext_enable_cp.enable = LE_SCAN_ENABLE;
3078		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3079
3080		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3081			    sizeof(ext_enable_cp), &ext_enable_cp);
3082	} else {
3083		struct hci_cp_le_set_scan_enable cp;
3084
3085		memset(&cp, 0, sizeof(cp));
3086		cp.enable = LE_SCAN_ENABLE;
3087		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3088		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3089	}
3090
3091	return 0;
3092}
3093
3094static void le_scan_restart_work(struct work_struct *work)
3095{
3096	struct hci_dev *hdev = container_of(work, struct hci_dev,
3097					    le_scan_restart.work);
3098	unsigned long timeout, duration, scan_start, now;
3099	u8 status;
3100
3101	bt_dev_dbg(hdev, "");
3102
3103	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3104	if (status) {
3105		bt_dev_err(hdev, "failed to restart LE scan: status %d",
3106			   status);
3107		return;
3108	}
3109
3110	hci_dev_lock(hdev);
3111
3112	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3113	    !hdev->discovery.scan_start)
3114		goto unlock;
3115
3116	/* When the scan was started, hdev->le_scan_disable has been queued
3117	 * after duration from scan_start. During scan restart this job
3118	 * has been canceled, and we need to queue it again after proper
3119	 * timeout, to make sure that scan does not run indefinitely.
3120	 */
3121	duration = hdev->discovery.scan_duration;
3122	scan_start = hdev->discovery.scan_start;
3123	now = jiffies;
3124	if (now - scan_start <= duration) {
3125		int elapsed;
3126
3127		if (now >= scan_start)
3128			elapsed = now - scan_start;
3129		else
3130			elapsed = ULONG_MAX - scan_start + now;
3131
3132		timeout = duration - elapsed;
3133	} else {
3134		timeout = 0;
3135	}
3136
3137	queue_delayed_work(hdev->req_workqueue,
3138			   &hdev->le_scan_disable, timeout);
3139
3140unlock:
3141	hci_dev_unlock(hdev);
3142}
3143
 
 
 
 
 
 
 
3144static int active_scan(struct hci_request *req, unsigned long opt)
3145{
3146	uint16_t interval = opt;
3147	struct hci_dev *hdev = req->hdev;
 
 
3148	u8 own_addr_type;
3149	/* Accept list is not used for discovery */
3150	u8 filter_policy = 0x00;
3151	/* Default is to enable duplicates filter */
3152	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3153	/* Discovery doesn't require controller address resolution */
3154	bool addr_resolv = false;
3155	int err;
3156
3157	bt_dev_dbg(hdev, "");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3158
3159	/* If controller is scanning, it means the background scanning is
3160	 * running. Thus, we should temporarily stop it in order to set the
3161	 * discovery scanning parameters.
3162	 */
3163	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3164		hci_req_add_le_scan_disable(req, false);
3165		cancel_interleave_scan(hdev);
3166	}
3167
3168	/* All active scans will be done with either a resolvable private
3169	 * address (when privacy feature has been enabled) or non-resolvable
3170	 * private address.
3171	 */
3172	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3173					&own_addr_type);
3174	if (err < 0)
3175		own_addr_type = ADDR_LE_DEV_PUBLIC;
3176
3177	if (hci_is_adv_monitoring(hdev)) {
3178		/* Duplicate filter should be disabled when some advertisement
3179		 * monitor is activated, otherwise AdvMon can only receive one
3180		 * advertisement for one peer(*) during active scanning, and
3181		 * might report loss to these peers.
3182		 *
3183		 * Note that different controllers have different meanings of
3184		 * |duplicate|. Some of them consider packets with the same
3185		 * address as duplicate, and others consider packets with the
3186		 * same address and the same RSSI as duplicate. Although in the
3187		 * latter case we don't need to disable duplicate filter, but
3188		 * it is common to have active scanning for a short period of
3189		 * time, the power impact should be neglectable.
3190		 */
3191		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
3192	}
3193
3194	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3195			   hdev->le_scan_window_discovery, own_addr_type,
3196			   filter_policy, filter_dup, addr_resolv);
3197	return 0;
3198}
3199
3200static int interleaved_discov(struct hci_request *req, unsigned long opt)
3201{
3202	int err;
3203
3204	bt_dev_dbg(req->hdev, "");
3205
3206	err = active_scan(req, opt);
3207	if (err)
3208		return err;
3209
3210	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3211}
3212
3213static void start_discovery(struct hci_dev *hdev, u8 *status)
3214{
3215	unsigned long timeout;
3216
3217	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3218
3219	switch (hdev->discovery.type) {
3220	case DISCOV_TYPE_BREDR:
3221		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3222			hci_req_sync(hdev, bredr_inquiry,
3223				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3224				     status);
3225		return;
3226	case DISCOV_TYPE_INTERLEAVED:
3227		/* When running simultaneous discovery, the LE scanning time
3228		 * should occupy the whole discovery time sine BR/EDR inquiry
3229		 * and LE scanning are scheduled by the controller.
3230		 *
3231		 * For interleaving discovery in comparison, BR/EDR inquiry
3232		 * and LE scanning are done sequentially with separate
3233		 * timeouts.
3234		 */
3235		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3236			     &hdev->quirks)) {
3237			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3238			/* During simultaneous discovery, we double LE scan
3239			 * interval. We must leave some time for the controller
3240			 * to do BR/EDR inquiry.
3241			 */
3242			hci_req_sync(hdev, interleaved_discov,
3243				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3244				     status);
3245			break;
3246		}
3247
3248		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3249		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3250			     HCI_CMD_TIMEOUT, status);
3251		break;
3252	case DISCOV_TYPE_LE:
3253		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3254		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3255			     HCI_CMD_TIMEOUT, status);
3256		break;
3257	default:
3258		*status = HCI_ERROR_UNSPECIFIED;
3259		return;
3260	}
3261
3262	if (*status)
3263		return;
3264
3265	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3266
3267	/* When service discovery is used and the controller has a
3268	 * strict duplicate filter, it is important to remember the
3269	 * start and duration of the scan. This is required for
3270	 * restarting scanning during the discovery phase.
3271	 */
3272	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3273		     hdev->discovery.result_filtering) {
3274		hdev->discovery.scan_start = jiffies;
3275		hdev->discovery.scan_duration = timeout;
3276	}
3277
3278	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3279			   timeout);
3280}
3281
3282bool hci_req_stop_discovery(struct hci_request *req)
3283{
3284	struct hci_dev *hdev = req->hdev;
3285	struct discovery_state *d = &hdev->discovery;
3286	struct hci_cp_remote_name_req_cancel cp;
3287	struct inquiry_entry *e;
3288	bool ret = false;
3289
3290	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3291
3292	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3293		if (test_bit(HCI_INQUIRY, &hdev->flags))
3294			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3295
3296		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3297			cancel_delayed_work(&hdev->le_scan_disable);
3298			cancel_delayed_work(&hdev->le_scan_restart);
3299			hci_req_add_le_scan_disable(req, false);
3300		}
3301
3302		ret = true;
3303	} else {
3304		/* Passive scanning */
3305		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3306			hci_req_add_le_scan_disable(req, false);
3307			ret = true;
3308		}
3309	}
3310
3311	/* No further actions needed for LE-only discovery */
3312	if (d->type == DISCOV_TYPE_LE)
3313		return ret;
3314
3315	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3316		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3317						     NAME_PENDING);
3318		if (!e)
3319			return ret;
3320
3321		bacpy(&cp.bdaddr, &e->data.bdaddr);
3322		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3323			    &cp);
3324		ret = true;
3325	}
3326
3327	return ret;
3328}
3329
3330static int stop_discovery(struct hci_request *req, unsigned long opt)
3331{
3332	hci_dev_lock(req->hdev);
3333	hci_req_stop_discovery(req);
3334	hci_dev_unlock(req->hdev);
3335
3336	return 0;
3337}
3338
3339static void discov_update(struct work_struct *work)
3340{
3341	struct hci_dev *hdev = container_of(work, struct hci_dev,
3342					    discov_update);
3343	u8 status = 0;
3344
3345	switch (hdev->discovery.state) {
3346	case DISCOVERY_STARTING:
3347		start_discovery(hdev, &status);
3348		mgmt_start_discovery_complete(hdev, status);
3349		if (status)
3350			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3351		else
3352			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3353		break;
3354	case DISCOVERY_STOPPING:
3355		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3356		mgmt_stop_discovery_complete(hdev, status);
3357		if (!status)
3358			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3359		break;
3360	case DISCOVERY_STOPPED:
3361	default:
3362		return;
3363	}
3364}
3365
3366static void discov_off(struct work_struct *work)
3367{
3368	struct hci_dev *hdev = container_of(work, struct hci_dev,
3369					    discov_off.work);
3370
3371	bt_dev_dbg(hdev, "");
3372
3373	hci_dev_lock(hdev);
3374
3375	/* When discoverable timeout triggers, then just make sure
3376	 * the limited discoverable flag is cleared. Even in the case
3377	 * of a timeout triggered from general discoverable, it is
3378	 * safe to unconditionally clear the flag.
3379	 */
3380	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3381	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3382	hdev->discov_timeout = 0;
3383
3384	hci_dev_unlock(hdev);
3385
3386	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3387	mgmt_new_settings(hdev);
3388}
3389
3390static int powered_update_hci(struct hci_request *req, unsigned long opt)
3391{
3392	struct hci_dev *hdev = req->hdev;
3393	u8 link_sec;
3394
3395	hci_dev_lock(hdev);
3396
3397	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3398	    !lmp_host_ssp_capable(hdev)) {
3399		u8 mode = 0x01;
3400
3401		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3402
3403		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3404			u8 support = 0x01;
3405
3406			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3407				    sizeof(support), &support);
3408		}
3409	}
3410
3411	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3412	    lmp_bredr_capable(hdev)) {
3413		struct hci_cp_write_le_host_supported cp;
3414
3415		cp.le = 0x01;
3416		cp.simul = 0x00;
3417
3418		/* Check first if we already have the right
3419		 * host state (host features set)
3420		 */
3421		if (cp.le != lmp_host_le_capable(hdev) ||
3422		    cp.simul != lmp_host_le_br_capable(hdev))
3423			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3424				    sizeof(cp), &cp);
3425	}
3426
3427	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3428		/* Make sure the controller has a good default for
3429		 * advertising data. This also applies to the case
3430		 * where BR/EDR was toggled during the AUTO_OFF phase.
3431		 */
3432		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3433		    list_empty(&hdev->adv_instances)) {
3434			int err;
 
3435
3436			if (ext_adv_capable(hdev)) {
3437				err = __hci_req_setup_ext_adv_instance(req,
3438								       0x00);
3439				if (!err)
3440					__hci_req_update_scan_rsp_data(req,
3441								       0x00);
3442			} else {
3443				err = 0;
3444				__hci_req_update_adv_data(req, 0x00);
3445				__hci_req_update_scan_rsp_data(req, 0x00);
3446			}
3447
3448			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3449				if (!ext_adv_capable(hdev))
3450					__hci_req_enable_advertising(req);
3451				else if (!err)
3452					__hci_req_enable_ext_advertising(req,
3453									 0x00);
3454			}
3455		} else if (!list_empty(&hdev->adv_instances)) {
3456			struct adv_info *adv_instance;
3457
3458			adv_instance = list_first_entry(&hdev->adv_instances,
3459							struct adv_info, list);
3460			__hci_req_schedule_adv_instance(req,
3461							adv_instance->instance,
3462							true);
3463		}
3464	}
3465
3466	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3467	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3468		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3469			    sizeof(link_sec), &link_sec);
3470
3471	if (lmp_bredr_capable(hdev)) {
3472		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3473			__hci_req_write_fast_connectable(req, true);
3474		else
3475			__hci_req_write_fast_connectable(req, false);
3476		__hci_req_update_scan(req);
3477		__hci_req_update_class(req);
3478		__hci_req_update_name(req);
3479		__hci_req_update_eir(req);
3480	}
3481
3482	hci_dev_unlock(hdev);
3483	return 0;
3484}
3485
3486int __hci_req_hci_power_on(struct hci_dev *hdev)
3487{
3488	/* Register the available SMP channels (BR/EDR and LE) only when
3489	 * successfully powering on the controller. This late
3490	 * registration is required so that LE SMP can clearly decide if
3491	 * the public address or static address is used.
3492	 */
3493	smp_register(hdev);
3494
3495	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3496			      NULL);
3497}
3498
3499void hci_request_setup(struct hci_dev *hdev)
3500{
3501	INIT_WORK(&hdev->discov_update, discov_update);
3502	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3503	INIT_WORK(&hdev->scan_update, scan_update_work);
3504	INIT_WORK(&hdev->connectable_update, connectable_update_work);
3505	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3506	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3507	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3508	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3509	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3510	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3511}
3512
3513void hci_request_cancel_all(struct hci_dev *hdev)
3514{
3515	hci_req_sync_cancel(hdev, ENODEV);
3516
3517	cancel_work_sync(&hdev->discov_update);
3518	cancel_work_sync(&hdev->bg_scan_update);
3519	cancel_work_sync(&hdev->scan_update);
3520	cancel_work_sync(&hdev->connectable_update);
3521	cancel_work_sync(&hdev->discoverable_update);
3522	cancel_delayed_work_sync(&hdev->discov_off);
3523	cancel_delayed_work_sync(&hdev->le_scan_disable);
3524	cancel_delayed_work_sync(&hdev->le_scan_restart);
3525
3526	if (hdev->adv_instance_timeout) {
3527		cancel_delayed_work_sync(&hdev->adv_instance_expire);
3528		hdev->adv_instance_timeout = 0;
3529	}
3530
3531	cancel_interleave_scan(hdev);
3532}