Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2014 Intel Corporation
   5
   6   This program is free software; you can redistribute it and/or modify
   7   it under the terms of the GNU General Public License version 2 as
   8   published by the Free Software Foundation;
   9
  10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  11   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  12   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  13   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  14   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  15   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18
  19   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  20   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  21   SOFTWARE IS DISCLAIMED.
  22*/
  23
  24#include <linux/sched/signal.h>
  25
  26#include <net/bluetooth/bluetooth.h>
  27#include <net/bluetooth/hci_core.h>
  28#include <net/bluetooth/mgmt.h>
  29
  30#include "smp.h"
  31#include "hci_request.h"
 
  32
  33#define HCI_REQ_DONE	  0
  34#define HCI_REQ_PEND	  1
  35#define HCI_REQ_CANCELED  2
  36
  37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
  38{
  39	skb_queue_head_init(&req->cmd_q);
  40	req->hdev = hdev;
  41	req->err = 0;
  42}
  43
  44void hci_req_purge(struct hci_request *req)
  45{
  46	skb_queue_purge(&req->cmd_q);
  47}
  48
  49bool hci_req_status_pend(struct hci_dev *hdev)
  50{
  51	return hdev->req_status == HCI_REQ_PEND;
  52}
  53
  54static int req_run(struct hci_request *req, hci_req_complete_t complete,
  55		   hci_req_complete_skb_t complete_skb)
  56{
  57	struct hci_dev *hdev = req->hdev;
  58	struct sk_buff *skb;
  59	unsigned long flags;
  60
  61	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
  62
  63	/* If an error occurred during request building, remove all HCI
  64	 * commands queued on the HCI request queue.
  65	 */
  66	if (req->err) {
  67		skb_queue_purge(&req->cmd_q);
  68		return req->err;
  69	}
  70
  71	/* Do not allow empty requests */
  72	if (skb_queue_empty(&req->cmd_q))
  73		return -ENODATA;
  74
  75	skb = skb_peek_tail(&req->cmd_q);
  76	if (complete) {
  77		bt_cb(skb)->hci.req_complete = complete;
  78	} else if (complete_skb) {
  79		bt_cb(skb)->hci.req_complete_skb = complete_skb;
  80		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
  81	}
  82
  83	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
  84	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
  85	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
  86
  87	queue_work(hdev->workqueue, &hdev->cmd_work);
  88
  89	return 0;
  90}
  91
  92int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
  93{
  94	return req_run(req, complete, NULL);
  95}
  96
  97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
  98{
  99	return req_run(req, NULL, complete);
 100}
 101
 102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
 103				  struct sk_buff *skb)
 104{
 105	BT_DBG("%s result 0x%2.2x", hdev->name, result);
 106
 107	if (hdev->req_status == HCI_REQ_PEND) {
 108		hdev->req_result = result;
 109		hdev->req_status = HCI_REQ_DONE;
 110		if (skb)
 111			hdev->req_skb = skb_get(skb);
 112		wake_up_interruptible(&hdev->req_wait_q);
 113	}
 114}
 115
 116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
 117{
 118	BT_DBG("%s err 0x%2.2x", hdev->name, err);
 119
 120	if (hdev->req_status == HCI_REQ_PEND) {
 121		hdev->req_result = err;
 122		hdev->req_status = HCI_REQ_CANCELED;
 123		wake_up_interruptible(&hdev->req_wait_q);
 124	}
 125}
 126
 127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
 128				  const void *param, u8 event, u32 timeout)
 129{
 130	struct hci_request req;
 131	struct sk_buff *skb;
 132	int err = 0;
 133
 134	BT_DBG("%s", hdev->name);
 135
 136	hci_req_init(&req, hdev);
 137
 138	hci_req_add_ev(&req, opcode, plen, param, event);
 139
 140	hdev->req_status = HCI_REQ_PEND;
 141
 142	err = hci_req_run_skb(&req, hci_req_sync_complete);
 143	if (err < 0)
 144		return ERR_PTR(err);
 145
 146	err = wait_event_interruptible_timeout(hdev->req_wait_q,
 147			hdev->req_status != HCI_REQ_PEND, timeout);
 148
 149	if (err == -ERESTARTSYS)
 150		return ERR_PTR(-EINTR);
 151
 152	switch (hdev->req_status) {
 153	case HCI_REQ_DONE:
 154		err = -bt_to_errno(hdev->req_result);
 155		break;
 156
 157	case HCI_REQ_CANCELED:
 158		err = -hdev->req_result;
 159		break;
 160
 161	default:
 162		err = -ETIMEDOUT;
 163		break;
 164	}
 165
 166	hdev->req_status = hdev->req_result = 0;
 167	skb = hdev->req_skb;
 168	hdev->req_skb = NULL;
 169
 170	BT_DBG("%s end: err %d", hdev->name, err);
 171
 172	if (err < 0) {
 173		kfree_skb(skb);
 174		return ERR_PTR(err);
 175	}
 176
 177	if (!skb)
 178		return ERR_PTR(-ENODATA);
 179
 180	return skb;
 181}
 182EXPORT_SYMBOL(__hci_cmd_sync_ev);
 183
 184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
 185			       const void *param, u32 timeout)
 186{
 187	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
 188}
 189EXPORT_SYMBOL(__hci_cmd_sync);
 190
 191/* Execute request and wait for completion. */
 192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
 193						     unsigned long opt),
 194		   unsigned long opt, u32 timeout, u8 *hci_status)
 195{
 196	struct hci_request req;
 197	int err = 0;
 198
 199	BT_DBG("%s start", hdev->name);
 200
 201	hci_req_init(&req, hdev);
 202
 203	hdev->req_status = HCI_REQ_PEND;
 204
 205	err = func(&req, opt);
 206	if (err) {
 207		if (hci_status)
 208			*hci_status = HCI_ERROR_UNSPECIFIED;
 209		return err;
 210	}
 211
 212	err = hci_req_run_skb(&req, hci_req_sync_complete);
 213	if (err < 0) {
 214		hdev->req_status = 0;
 215
 216		/* ENODATA means the HCI request command queue is empty.
 217		 * This can happen when a request with conditionals doesn't
 218		 * trigger any commands to be sent. This is normal behavior
 219		 * and should not trigger an error return.
 220		 */
 221		if (err == -ENODATA) {
 222			if (hci_status)
 223				*hci_status = 0;
 224			return 0;
 225		}
 226
 227		if (hci_status)
 228			*hci_status = HCI_ERROR_UNSPECIFIED;
 229
 230		return err;
 231	}
 232
 233	err = wait_event_interruptible_timeout(hdev->req_wait_q,
 234			hdev->req_status != HCI_REQ_PEND, timeout);
 235
 236	if (err == -ERESTARTSYS)
 237		return -EINTR;
 238
 239	switch (hdev->req_status) {
 240	case HCI_REQ_DONE:
 241		err = -bt_to_errno(hdev->req_result);
 242		if (hci_status)
 243			*hci_status = hdev->req_result;
 244		break;
 245
 246	case HCI_REQ_CANCELED:
 247		err = -hdev->req_result;
 248		if (hci_status)
 249			*hci_status = HCI_ERROR_UNSPECIFIED;
 250		break;
 251
 252	default:
 253		err = -ETIMEDOUT;
 254		if (hci_status)
 255			*hci_status = HCI_ERROR_UNSPECIFIED;
 256		break;
 257	}
 258
 259	kfree_skb(hdev->req_skb);
 260	hdev->req_skb = NULL;
 261	hdev->req_status = hdev->req_result = 0;
 262
 263	BT_DBG("%s end: err %d", hdev->name, err);
 264
 265	return err;
 266}
 267
 268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
 269						  unsigned long opt),
 270		 unsigned long opt, u32 timeout, u8 *hci_status)
 271{
 272	int ret;
 273
 274	if (!test_bit(HCI_UP, &hdev->flags))
 275		return -ENETDOWN;
 276
 277	/* Serialize all requests */
 278	hci_req_sync_lock(hdev);
 279	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
 
 
 
 
 
 
 
 280	hci_req_sync_unlock(hdev);
 281
 282	return ret;
 283}
 284
 285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
 286				const void *param)
 287{
 288	int len = HCI_COMMAND_HDR_SIZE + plen;
 289	struct hci_command_hdr *hdr;
 290	struct sk_buff *skb;
 291
 292	skb = bt_skb_alloc(len, GFP_ATOMIC);
 293	if (!skb)
 294		return NULL;
 295
 296	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
 297	hdr->opcode = cpu_to_le16(opcode);
 298	hdr->plen   = plen;
 299
 300	if (plen)
 301		skb_put_data(skb, param, plen);
 302
 303	BT_DBG("skb len %d", skb->len);
 304
 305	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
 306	hci_skb_opcode(skb) = opcode;
 307
 308	return skb;
 309}
 310
 311/* Queue a command to an asynchronous HCI request */
 312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 313		    const void *param, u8 event)
 314{
 315	struct hci_dev *hdev = req->hdev;
 316	struct sk_buff *skb;
 317
 318	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
 319
 320	/* If an error occurred during request building, there is no point in
 321	 * queueing the HCI command. We can simply return.
 322	 */
 323	if (req->err)
 324		return;
 325
 326	skb = hci_prepare_cmd(hdev, opcode, plen, param);
 327	if (!skb) {
 328		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
 329			   opcode);
 330		req->err = -ENOMEM;
 331		return;
 332	}
 333
 334	if (skb_queue_empty(&req->cmd_q))
 335		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
 336
 337	bt_cb(skb)->hci.req_event = event;
 338
 339	skb_queue_tail(&req->cmd_q, skb);
 340}
 341
 342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
 343		 const void *param)
 344{
 345	hci_req_add_ev(req, opcode, plen, param, 0);
 346}
 347
 348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
 349{
 350	struct hci_dev *hdev = req->hdev;
 351	struct hci_cp_write_page_scan_activity acp;
 352	u8 type;
 353
 354	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 355		return;
 356
 357	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 358		return;
 359
 360	if (enable) {
 361		type = PAGE_SCAN_TYPE_INTERLACED;
 362
 363		/* 160 msec page scan interval */
 364		acp.interval = cpu_to_le16(0x0100);
 365	} else {
 366		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
 367
 368		/* default 1.28 sec page scan */
 369		acp.interval = cpu_to_le16(0x0800);
 370	}
 371
 372	acp.window = cpu_to_le16(0x0012);
 373
 374	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
 375	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
 376		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
 377			    sizeof(acp), &acp);
 378
 379	if (hdev->page_scan_type != type)
 380		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 381}
 382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 383/* This function controls the background scanning based on hdev->pend_le_conns
 384 * list. If there are pending LE connection we start the background scanning,
 385 * otherwise we stop it.
 386 *
 387 * This function requires the caller holds hdev->lock.
 388 */
 389static void __hci_update_background_scan(struct hci_request *req)
 390{
 391	struct hci_dev *hdev = req->hdev;
 392
 393	if (!test_bit(HCI_UP, &hdev->flags) ||
 394	    test_bit(HCI_INIT, &hdev->flags) ||
 395	    hci_dev_test_flag(hdev, HCI_SETUP) ||
 396	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 397	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
 398	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
 399		return;
 400
 401	/* No point in doing scanning if LE support hasn't been enabled */
 402	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 403		return;
 404
 405	/* If discovery is active don't interfere with it */
 406	if (hdev->discovery.state != DISCOVERY_STOPPED)
 407		return;
 408
 409	/* Reset RSSI and UUID filters when starting background scanning
 410	 * since these filters are meant for service discovery only.
 411	 *
 412	 * The Start Discovery and Start Service Discovery operations
 413	 * ensure to set proper values for RSSI threshold and UUID
 414	 * filter list. So it is safe to just reset them here.
 415	 */
 416	hci_discovery_filter_clear(hdev);
 417
 
 
 
 418	if (list_empty(&hdev->pend_le_conns) &&
 419	    list_empty(&hdev->pend_le_reports)) {
 
 420		/* If there is no pending LE connections or devices
 421		 * to be scanned for, we should stop the background
 422		 * scanning.
 423		 */
 424
 425		/* If controller is not scanning we are done. */
 426		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
 427			return;
 428
 429		hci_req_add_le_scan_disable(req);
 430
 431		BT_DBG("%s stopping background scanning", hdev->name);
 432	} else {
 433		/* If there is at least one pending LE connection, we should
 434		 * keep the background scan running.
 435		 */
 436
 437		/* If controller is connecting, we should not start scanning
 438		 * since some controllers are not able to scan and connect at
 439		 * the same time.
 440		 */
 441		if (hci_lookup_le_connect(hdev))
 442			return;
 443
 444		/* If controller is currently scanning, we stop it to ensure we
 445		 * don't miss any advertising (due to duplicates filter).
 446		 */
 447		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
 448			hci_req_add_le_scan_disable(req);
 449
 450		hci_req_add_le_passive_scan(req);
 451
 452		BT_DBG("%s starting background scanning", hdev->name);
 453	}
 454}
 455
 456void __hci_req_update_name(struct hci_request *req)
 457{
 458	struct hci_dev *hdev = req->hdev;
 459	struct hci_cp_write_local_name cp;
 460
 461	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
 462
 463	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
 464}
 465
 466#define PNP_INFO_SVCLASS_ID		0x1200
 467
 468static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 469{
 470	u8 *ptr = data, *uuids_start = NULL;
 471	struct bt_uuid *uuid;
 472
 473	if (len < 4)
 474		return ptr;
 475
 476	list_for_each_entry(uuid, &hdev->uuids, list) {
 477		u16 uuid16;
 478
 479		if (uuid->size != 16)
 480			continue;
 481
 482		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
 483		if (uuid16 < 0x1100)
 484			continue;
 485
 486		if (uuid16 == PNP_INFO_SVCLASS_ID)
 487			continue;
 488
 489		if (!uuids_start) {
 490			uuids_start = ptr;
 491			uuids_start[0] = 1;
 492			uuids_start[1] = EIR_UUID16_ALL;
 493			ptr += 2;
 494		}
 495
 496		/* Stop if not enough space to put next UUID */
 497		if ((ptr - data) + sizeof(u16) > len) {
 498			uuids_start[1] = EIR_UUID16_SOME;
 499			break;
 500		}
 501
 502		*ptr++ = (uuid16 & 0x00ff);
 503		*ptr++ = (uuid16 & 0xff00) >> 8;
 504		uuids_start[0] += sizeof(uuid16);
 505	}
 506
 507	return ptr;
 508}
 509
 510static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 511{
 512	u8 *ptr = data, *uuids_start = NULL;
 513	struct bt_uuid *uuid;
 514
 515	if (len < 6)
 516		return ptr;
 517
 518	list_for_each_entry(uuid, &hdev->uuids, list) {
 519		if (uuid->size != 32)
 520			continue;
 521
 522		if (!uuids_start) {
 523			uuids_start = ptr;
 524			uuids_start[0] = 1;
 525			uuids_start[1] = EIR_UUID32_ALL;
 526			ptr += 2;
 527		}
 528
 529		/* Stop if not enough space to put next UUID */
 530		if ((ptr - data) + sizeof(u32) > len) {
 531			uuids_start[1] = EIR_UUID32_SOME;
 532			break;
 533		}
 534
 535		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
 536		ptr += sizeof(u32);
 537		uuids_start[0] += sizeof(u32);
 538	}
 539
 540	return ptr;
 541}
 542
 543static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 544{
 545	u8 *ptr = data, *uuids_start = NULL;
 546	struct bt_uuid *uuid;
 547
 548	if (len < 18)
 549		return ptr;
 550
 551	list_for_each_entry(uuid, &hdev->uuids, list) {
 552		if (uuid->size != 128)
 553			continue;
 554
 555		if (!uuids_start) {
 556			uuids_start = ptr;
 557			uuids_start[0] = 1;
 558			uuids_start[1] = EIR_UUID128_ALL;
 559			ptr += 2;
 560		}
 561
 562		/* Stop if not enough space to put next UUID */
 563		if ((ptr - data) + 16 > len) {
 564			uuids_start[1] = EIR_UUID128_SOME;
 565			break;
 566		}
 567
 568		memcpy(ptr, uuid->uuid, 16);
 569		ptr += 16;
 570		uuids_start[0] += 16;
 571	}
 572
 573	return ptr;
 574}
 575
 576static void create_eir(struct hci_dev *hdev, u8 *data)
 577{
 578	u8 *ptr = data;
 579	size_t name_len;
 580
 581	name_len = strlen(hdev->dev_name);
 582
 583	if (name_len > 0) {
 584		/* EIR Data type */
 585		if (name_len > 48) {
 586			name_len = 48;
 587			ptr[1] = EIR_NAME_SHORT;
 588		} else
 589			ptr[1] = EIR_NAME_COMPLETE;
 590
 591		/* EIR Data length */
 592		ptr[0] = name_len + 1;
 593
 594		memcpy(ptr + 2, hdev->dev_name, name_len);
 595
 596		ptr += (name_len + 2);
 597	}
 598
 599	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
 600		ptr[0] = 2;
 601		ptr[1] = EIR_TX_POWER;
 602		ptr[2] = (u8) hdev->inq_tx_power;
 603
 604		ptr += 3;
 605	}
 606
 607	if (hdev->devid_source > 0) {
 608		ptr[0] = 9;
 609		ptr[1] = EIR_DEVICE_ID;
 610
 611		put_unaligned_le16(hdev->devid_source, ptr + 2);
 612		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
 613		put_unaligned_le16(hdev->devid_product, ptr + 6);
 614		put_unaligned_le16(hdev->devid_version, ptr + 8);
 615
 616		ptr += 10;
 617	}
 618
 619	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 620	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 621	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 622}
 623
 624void __hci_req_update_eir(struct hci_request *req)
 625{
 626	struct hci_dev *hdev = req->hdev;
 627	struct hci_cp_write_eir cp;
 628
 629	if (!hdev_is_powered(hdev))
 630		return;
 631
 632	if (!lmp_ext_inq_capable(hdev))
 633		return;
 634
 635	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 636		return;
 637
 638	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
 639		return;
 640
 641	memset(&cp, 0, sizeof(cp));
 642
 643	create_eir(hdev, cp.data);
 644
 645	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
 646		return;
 647
 648	memcpy(hdev->eir, cp.data, sizeof(cp.data));
 649
 650	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 651}
 652
 653void hci_req_add_le_scan_disable(struct hci_request *req)
 654{
 655	struct hci_dev *hdev = req->hdev;
 656
 
 
 
 
 
 
 
 
 657	if (use_ext_scan(hdev)) {
 658		struct hci_cp_le_set_ext_scan_enable cp;
 659
 660		memset(&cp, 0, sizeof(cp));
 661		cp.enable = LE_SCAN_DISABLE;
 662		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
 663			    &cp);
 664	} else {
 665		struct hci_cp_le_set_scan_enable cp;
 666
 667		memset(&cp, 0, sizeof(cp));
 668		cp.enable = LE_SCAN_DISABLE;
 669		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 670	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671}
 672
 673static void add_to_white_list(struct hci_request *req,
 674			      struct hci_conn_params *params)
 
 
 675{
 676	struct hci_cp_le_add_to_white_list cp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 677
 
 678	cp.bdaddr_type = params->addr_type;
 679	bacpy(&cp.bdaddr, &params->addr);
 680
 681	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 682}
 683
 684static u8 update_white_list(struct hci_request *req)
 685{
 686	struct hci_dev *hdev = req->hdev;
 687	struct hci_conn_params *params;
 688	struct bdaddr_list *b;
 689	uint8_t white_list_entries = 0;
 
 
 
 
 
 
 
 
 
 
 
 690
 691	/* Go through the current white list programmed into the
 692	 * controller one by one and check if that address is still
 693	 * in the list of pending connections or list of devices to
 694	 * report. If not present in either list, then queue the
 695	 * command to remove it from the controller.
 696	 */
 697	list_for_each_entry(b, &hdev->le_white_list, list) {
 698		/* If the device is neither in pend_le_conns nor
 699		 * pend_le_reports then remove it from the whitelist.
 700		 */
 701		if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
 702					       &b->bdaddr, b->bdaddr_type) &&
 703		    !hci_pend_le_action_lookup(&hdev->pend_le_reports,
 704					       &b->bdaddr, b->bdaddr_type)) {
 705			struct hci_cp_le_del_from_white_list cp;
 706
 707			cp.bdaddr_type = b->bdaddr_type;
 708			bacpy(&cp.bdaddr, &b->bdaddr);
 709
 710			hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
 711				    sizeof(cp), &cp);
 
 
 
 712			continue;
 713		}
 714
 715		if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
 716			/* White list can not be used with RPAs */
 
 
 717			return 0x00;
 718		}
 719
 720		white_list_entries++;
 721	}
 722
 723	/* Since all no longer valid white list entries have been
 724	 * removed, walk through the list of pending connections
 725	 * and ensure that any new device gets programmed into
 726	 * the controller.
 727	 *
 728	 * If the list of the devices is larger than the list of
 729	 * available white list entries in the controller, then
 730	 * just abort and return filer policy value to not use the
 731	 * white list.
 732	 */
 733	list_for_each_entry(params, &hdev->pend_le_conns, action) {
 734		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 735					   &params->addr, params->addr_type))
 736			continue;
 737
 738		if (white_list_entries >= hdev->le_white_list_size) {
 739			/* Select filter policy to accept all advertising */
 740			return 0x00;
 741		}
 742
 743		if (hci_find_irk_by_addr(hdev, &params->addr,
 744					 params->addr_type)) {
 745			/* White list can not be used with RPAs */
 746			return 0x00;
 747		}
 748
 749		white_list_entries++;
 750		add_to_white_list(req, params);
 751	}
 752
 753	/* After adding all new pending connections, walk through
 754	 * the list of pending reports and also add these to the
 755	 * white list if there is still space.
 756	 */
 757	list_for_each_entry(params, &hdev->pend_le_reports, action) {
 758		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 759					   &params->addr, params->addr_type))
 760			continue;
 761
 762		if (white_list_entries >= hdev->le_white_list_size) {
 763			/* Select filter policy to accept all advertising */
 764			return 0x00;
 765		}
 766
 767		if (hci_find_irk_by_addr(hdev, &params->addr,
 768					 params->addr_type)) {
 769			/* White list can not be used with RPAs */
 770			return 0x00;
 771		}
 772
 773		white_list_entries++;
 774		add_to_white_list(req, params);
 775	}
 776
 777	/* Select filter policy to use white list */
 
 
 
 
 
 
 
 
 
 
 778	return 0x01;
 779}
 780
 781static bool scan_use_rpa(struct hci_dev *hdev)
 782{
 783	return hci_dev_test_flag(hdev, HCI_PRIVACY);
 784}
 785
 786static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
 787			       u16 window, u8 own_addr_type, u8 filter_policy)
 
 788{
 789	struct hci_dev *hdev = req->hdev;
 790
 
 
 
 
 
 
 
 
 
 
 
 
 
 791	/* Use ext scanning if set ext scan param and ext scan enable is
 792	 * supported
 793	 */
 794	if (use_ext_scan(hdev)) {
 795		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
 796		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
 797		struct hci_cp_le_scan_phy_params *phy_params;
 798		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
 799		u32 plen;
 800
 801		ext_param_cp = (void *)data;
 802		phy_params = (void *)ext_param_cp->data;
 803
 804		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
 805		ext_param_cp->own_addr_type = own_addr_type;
 806		ext_param_cp->filter_policy = filter_policy;
 807
 808		plen = sizeof(*ext_param_cp);
 809
 810		if (scan_1m(hdev) || scan_2m(hdev)) {
 811			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
 812
 813			memset(phy_params, 0, sizeof(*phy_params));
 814			phy_params->type = type;
 815			phy_params->interval = cpu_to_le16(interval);
 816			phy_params->window = cpu_to_le16(window);
 817
 818			plen += sizeof(*phy_params);
 819			phy_params++;
 820		}
 821
 822		if (scan_coded(hdev)) {
 823			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
 824
 825			memset(phy_params, 0, sizeof(*phy_params));
 826			phy_params->type = type;
 827			phy_params->interval = cpu_to_le16(interval);
 828			phy_params->window = cpu_to_le16(window);
 829
 830			plen += sizeof(*phy_params);
 831			phy_params++;
 832		}
 833
 834		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
 835			    plen, ext_param_cp);
 836
 837		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
 838		ext_enable_cp.enable = LE_SCAN_ENABLE;
 839		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
 840
 841		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
 842			    sizeof(ext_enable_cp), &ext_enable_cp);
 843	} else {
 844		struct hci_cp_le_set_scan_param param_cp;
 845		struct hci_cp_le_set_scan_enable enable_cp;
 846
 847		memset(&param_cp, 0, sizeof(param_cp));
 848		param_cp.type = type;
 849		param_cp.interval = cpu_to_le16(interval);
 850		param_cp.window = cpu_to_le16(window);
 851		param_cp.own_address_type = own_addr_type;
 852		param_cp.filter_policy = filter_policy;
 853		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
 854			    &param_cp);
 855
 856		memset(&enable_cp, 0, sizeof(enable_cp));
 857		enable_cp.enable = LE_SCAN_ENABLE;
 858		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
 859		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
 860			    &enable_cp);
 861	}
 862}
 863
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 864void hci_req_add_le_passive_scan(struct hci_request *req)
 865{
 866	struct hci_dev *hdev = req->hdev;
 867	u8 own_addr_type;
 868	u8 filter_policy;
 
 
 
 
 
 
 
 
 
 
 869
 870	/* Set require_privacy to false since no SCAN_REQ are send
 871	 * during passive scanning. Not using an non-resolvable address
 872	 * here is important so that peer devices using direct
 873	 * advertising with our address will be correctly reported
 874	 * by the controller.
 875	 */
 876	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
 877				      &own_addr_type))
 878		return;
 879
 880	/* Adding or removing entries from the white list must
 
 
 
 
 
 881	 * happen before enabling scanning. The controller does
 882	 * not allow white list modification while scanning.
 883	 */
 884	filter_policy = update_white_list(req);
 885
 886	/* When the controller is using random resolvable addresses and
 887	 * with that having LE privacy enabled, then controllers with
 888	 * Extended Scanner Filter Policies support can now enable support
 889	 * for handling directed advertising.
 890	 *
 891	 * So instead of using filter polices 0x00 (no whitelist)
 892	 * and 0x01 (whitelist enabled) use the new filter policies
 893	 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
 894	 */
 895	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
 896	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
 897		filter_policy |= 0x02;
 898
 899	hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
 900			   hdev->le_scan_window, own_addr_type, filter_policy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 901}
 902
 903static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
 904{
 905	struct adv_info *adv_instance;
 906
 907	/* Ignore instance 0 */
 908	if (instance == 0x00)
 909		return 0;
 910
 911	adv_instance = hci_find_adv_instance(hdev, instance);
 912	if (!adv_instance)
 913		return 0;
 914
 915	/* TODO: Take into account the "appearance" and "local-name" flags here.
 916	 * These are currently being ignored as they are not supported.
 917	 */
 918	return adv_instance->scan_rsp_len;
 
 919}
 920
 921static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
 922{
 923	u8 instance = hdev->cur_adv_instance;
 924	struct adv_info *adv_instance;
 925
 926	/* Ignore instance 0 */
 927	if (instance == 0x00)
 928		return 0;
 929
 930	adv_instance = hci_find_adv_instance(hdev, instance);
 931	if (!adv_instance)
 932		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 933
 934	/* TODO: Take into account the "appearance" and "local-name" flags here.
 935	 * These are currently being ignored as they are not supported.
 
 
 
 
 
 936	 */
 937	return adv_instance->scan_rsp_len;
 
 
 
 
 938}
 939
 940void __hci_req_disable_advertising(struct hci_request *req)
 
 941{
 
 
 
 
 942	if (ext_adv_capable(req->hdev)) {
 943		struct hci_cp_le_set_ext_adv_enable cp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944
 945		cp.enable = 0x00;
 946		/* Disable all sets since we only support one set at the moment */
 947		cp.num_of_sets = 0x00;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 948
 949		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
 950	} else {
 951		u8 enable = 0x00;
 952
 953		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
 954	}
 955}
 956
 957static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
 958{
 959	u32 flags;
 960	struct adv_info *adv_instance;
 961
 962	if (instance == 0x00) {
 963		/* Instance 0 always manages the "Tx Power" and "Flags"
 964		 * fields
 965		 */
 966		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
 967
 968		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
 969		 * corresponds to the "connectable" instance flag.
 970		 */
 971		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
 972			flags |= MGMT_ADV_FLAG_CONNECTABLE;
 973
 974		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
 975			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
 976		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
 977			flags |= MGMT_ADV_FLAG_DISCOV;
 978
 979		return flags;
 980	}
 981
 982	adv_instance = hci_find_adv_instance(hdev, instance);
 983
 984	/* Return 0 when we got an invalid instance identifier. */
 985	if (!adv_instance)
 986		return 0;
 987
 988	return adv_instance->flags;
 989}
 990
 991static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
 992{
 993	/* If privacy is not enabled don't use RPA */
 994	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
 995		return false;
 996
 997	/* If basic privacy mode is enabled use RPA */
 998	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
 999		return true;
1000
1001	/* If limited privacy mode is enabled don't use RPA if we're
1002	 * both discoverable and bondable.
1003	 */
1004	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1006		return false;
1007
1008	/* We're neither bondable nor discoverable in the limited
1009	 * privacy mode, therefore use RPA.
1010	 */
1011	return true;
1012}
1013
1014static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015{
1016	/* If there is no connection we are OK to advertise. */
1017	if (hci_conn_num(hdev, LE_LINK) == 0)
1018		return true;
1019
1020	/* Check le_states if there is any connection in slave role. */
1021	if (hdev->conn_hash.le_num_slave > 0) {
1022		/* Slave connection state and non connectable mode bit 20. */
 
1023		if (!connectable && !(hdev->le_states[2] & 0x10))
1024			return false;
1025
1026		/* Slave connection state and connectable mode bit 38
1027		 * and scannable bit 21.
1028		 */
1029		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030				    !(hdev->le_states[2] & 0x20)))
1031			return false;
1032	}
1033
1034	/* Check le_states if there is any connection in master role. */
1035	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036		/* Master connection state and non connectable mode bit 18. */
1037		if (!connectable && !(hdev->le_states[2] & 0x02))
1038			return false;
1039
1040		/* Master connection state and connectable mode bit 35 and
1041		 * scannable 19.
1042		 */
1043		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1044				    !(hdev->le_states[2] & 0x08)))
1045			return false;
1046	}
1047
1048	return true;
1049}
1050
1051void __hci_req_enable_advertising(struct hci_request *req)
1052{
1053	struct hci_dev *hdev = req->hdev;
 
1054	struct hci_cp_le_set_adv_param cp;
1055	u8 own_addr_type, enable = 0x01;
1056	bool connectable;
1057	u16 adv_min_interval, adv_max_interval;
1058	u32 flags;
1059
1060	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
 
1061
1062	/* If the "connectable" instance flag was not set, then choose between
1063	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1064	 */
1065	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1066		      mgmt_get_connectable(hdev);
1067
1068	if (!is_advertising_allowed(hdev, connectable))
1069		return;
1070
1071	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1072		__hci_req_disable_advertising(req);
1073
1074	/* Clear the HCI_LE_ADV bit temporarily so that the
1075	 * hci_update_random_address knows that it's safe to go ahead
1076	 * and write a new random address. The flag will be set back on
1077	 * as soon as the SET_ADV_ENABLE HCI command completes.
1078	 */
1079	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1080
1081	/* Set require_privacy to true only when non-connectable
1082	 * advertising is used. In that case it is fine to use a
1083	 * non-resolvable private address.
1084	 */
1085	if (hci_update_random_address(req, !connectable,
1086				      adv_use_rpa(hdev, flags),
1087				      &own_addr_type) < 0)
1088		return;
1089
1090	memset(&cp, 0, sizeof(cp));
1091
1092	if (connectable) {
1093		cp.type = LE_ADV_IND;
1094
 
1095		adv_min_interval = hdev->le_adv_min_interval;
1096		adv_max_interval = hdev->le_adv_max_interval;
 
 
 
 
1097	} else {
1098		if (get_cur_adv_instance_scan_rsp_len(hdev))
1099			cp.type = LE_ADV_SCAN_IND;
1100		else
1101			cp.type = LE_ADV_NONCONN_IND;
1102
1103		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1104		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1105			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1106			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1107		} else {
1108			adv_min_interval = hdev->le_adv_min_interval;
1109			adv_max_interval = hdev->le_adv_max_interval;
1110		}
1111	}
1112
1113	cp.min_interval = cpu_to_le16(adv_min_interval);
1114	cp.max_interval = cpu_to_le16(adv_max_interval);
1115	cp.own_address_type = own_addr_type;
1116	cp.channel_map = hdev->le_adv_channel_map;
1117
1118	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1119
1120	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1121}
1122
1123u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1124{
1125	size_t short_len;
1126	size_t complete_len;
1127
1128	/* no space left for name (+ NULL + type + len) */
1129	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1130		return ad_len;
1131
1132	/* use complete name if present and fits */
1133	complete_len = strlen(hdev->dev_name);
1134	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1135		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1136				       hdev->dev_name, complete_len + 1);
1137
1138	/* use short name if present */
1139	short_len = strlen(hdev->short_name);
1140	if (short_len)
1141		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1142				       hdev->short_name, short_len + 1);
1143
1144	/* use shortened full name if present, we already know that name
1145	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1146	 */
1147	if (complete_len) {
1148		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1149
1150		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1151		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1152
1153		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1154				       sizeof(name));
1155	}
1156
1157	return ad_len;
1158}
1159
1160static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1161{
1162	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1163}
1164
1165static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1166{
1167	u8 scan_rsp_len = 0;
1168
1169	if (hdev->appearance) {
1170		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1171	}
1172
1173	return append_local_name(hdev, ptr, scan_rsp_len);
1174}
1175
1176static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1177					u8 *ptr)
1178{
1179	struct adv_info *adv_instance;
1180	u32 instance_flags;
1181	u8 scan_rsp_len = 0;
1182
1183	adv_instance = hci_find_adv_instance(hdev, instance);
1184	if (!adv_instance)
1185		return 0;
1186
1187	instance_flags = adv_instance->flags;
1188
1189	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1190		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1191	}
1192
1193	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1194	       adv_instance->scan_rsp_len);
1195
1196	scan_rsp_len += adv_instance->scan_rsp_len;
1197
1198	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1199		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1200
1201	return scan_rsp_len;
1202}
1203
1204void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1205{
1206	struct hci_dev *hdev = req->hdev;
1207	u8 len;
1208
1209	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1210		return;
1211
1212	if (ext_adv_capable(hdev)) {
1213		struct hci_cp_le_set_ext_scan_rsp_data cp;
 
 
 
1214
1215		memset(&cp, 0, sizeof(cp));
1216
1217		if (instance)
1218			len = create_instance_scan_rsp_data(hdev, instance,
1219							    cp.data);
1220		else
1221			len = create_default_scan_rsp_data(hdev, cp.data);
1222
1223		if (hdev->scan_rsp_data_len == len &&
1224		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1225			return;
1226
1227		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1228		hdev->scan_rsp_data_len = len;
1229
1230		cp.handle = 0;
1231		cp.length = len;
1232		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1233		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1234
1235		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1236			    &cp);
1237	} else {
1238		struct hci_cp_le_set_scan_rsp_data cp;
1239
1240		memset(&cp, 0, sizeof(cp));
1241
1242		if (instance)
1243			len = create_instance_scan_rsp_data(hdev, instance,
1244							    cp.data);
1245		else
1246			len = create_default_scan_rsp_data(hdev, cp.data);
1247
1248		if (hdev->scan_rsp_data_len == len &&
1249		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1250			return;
1251
1252		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1253		hdev->scan_rsp_data_len = len;
1254
1255		cp.length = len;
1256
1257		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1258	}
1259}
1260
1261static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1262{
1263	struct adv_info *adv_instance = NULL;
1264	u8 ad_len = 0, flags = 0;
1265	u32 instance_flags;
1266
1267	/* Return 0 when the current instance identifier is invalid. */
1268	if (instance) {
1269		adv_instance = hci_find_adv_instance(hdev, instance);
1270		if (!adv_instance)
1271			return 0;
1272	}
1273
1274	instance_flags = get_adv_instance_flags(hdev, instance);
1275
 
 
 
 
 
 
 
 
1276	/* The Add Advertising command allows userspace to set both the general
1277	 * and limited discoverable flags.
1278	 */
1279	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1280		flags |= LE_AD_GENERAL;
1281
1282	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1283		flags |= LE_AD_LIMITED;
1284
1285	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1286		flags |= LE_AD_NO_BREDR;
1287
1288	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1289		/* If a discovery flag wasn't provided, simply use the global
1290		 * settings.
1291		 */
1292		if (!flags)
1293			flags |= mgmt_get_adv_discov_flags(hdev);
1294
1295		/* If flags would still be empty, then there is no need to
1296		 * include the "Flags" AD field".
1297		 */
1298		if (flags) {
1299			ptr[0] = 0x02;
1300			ptr[1] = EIR_FLAGS;
1301			ptr[2] = flags;
1302
1303			ad_len += 3;
1304			ptr += 3;
1305		}
1306	}
1307
 
1308	if (adv_instance) {
1309		memcpy(ptr, adv_instance->adv_data,
1310		       adv_instance->adv_data_len);
1311		ad_len += adv_instance->adv_data_len;
1312		ptr += adv_instance->adv_data_len;
1313	}
1314
1315	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1316		s8 adv_tx_power;
1317
1318		if (ext_adv_capable(hdev)) {
1319			if (adv_instance)
1320				adv_tx_power = adv_instance->tx_power;
1321			else
1322				adv_tx_power = hdev->adv_tx_power;
1323		} else {
1324			adv_tx_power = hdev->adv_tx_power;
1325		}
1326
1327		/* Provide Tx Power only if we can provide a valid value for it */
1328		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1329			ptr[0] = 0x02;
1330			ptr[1] = EIR_TX_POWER;
1331			ptr[2] = (u8)adv_tx_power;
1332
1333			ad_len += 3;
1334			ptr += 3;
1335		}
1336	}
1337
1338	return ad_len;
1339}
1340
1341void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1342{
1343	struct hci_dev *hdev = req->hdev;
1344	u8 len;
1345
1346	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347		return;
1348
1349	if (ext_adv_capable(hdev)) {
1350		struct hci_cp_le_set_ext_adv_data cp;
 
 
 
1351
1352		memset(&cp, 0, sizeof(cp));
1353
1354		len = create_instance_adv_data(hdev, instance, cp.data);
1355
1356		/* There's nothing to do if the data hasn't changed */
1357		if (hdev->adv_data_len == len &&
1358		    memcmp(cp.data, hdev->adv_data, len) == 0)
1359			return;
1360
1361		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1362		hdev->adv_data_len = len;
1363
1364		cp.length = len;
1365		cp.handle = 0;
1366		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1367		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1368
1369		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
 
1370	} else {
1371		struct hci_cp_le_set_adv_data cp;
1372
1373		memset(&cp, 0, sizeof(cp));
1374
1375		len = create_instance_adv_data(hdev, instance, cp.data);
1376
1377		/* There's nothing to do if the data hasn't changed */
1378		if (hdev->adv_data_len == len &&
1379		    memcmp(cp.data, hdev->adv_data, len) == 0)
1380			return;
1381
1382		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1383		hdev->adv_data_len = len;
1384
1385		cp.length = len;
1386
1387		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1388	}
1389}
1390
1391int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1392{
1393	struct hci_request req;
1394
1395	hci_req_init(&req, hdev);
1396	__hci_req_update_adv_data(&req, instance);
1397
1398	return hci_req_run(&req, NULL);
1399}
1400
1401static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 
1402{
1403	BT_DBG("%s status %u", hdev->name, status);
1404}
1405
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1406void hci_req_reenable_advertising(struct hci_dev *hdev)
1407{
1408	struct hci_request req;
1409
1410	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1411	    list_empty(&hdev->adv_instances))
1412		return;
1413
1414	hci_req_init(&req, hdev);
1415
1416	if (hdev->cur_adv_instance) {
1417		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1418						true);
1419	} else {
1420		if (ext_adv_capable(hdev)) {
1421			__hci_req_start_ext_adv(&req, 0x00);
1422		} else {
1423			__hci_req_update_adv_data(&req, 0x00);
1424			__hci_req_update_scan_rsp_data(&req, 0x00);
1425			__hci_req_enable_advertising(&req);
1426		}
1427	}
1428
1429	hci_req_run(&req, adv_enable_complete);
1430}
1431
1432static void adv_timeout_expire(struct work_struct *work)
1433{
1434	struct hci_dev *hdev = container_of(work, struct hci_dev,
1435					    adv_instance_expire.work);
1436
1437	struct hci_request req;
1438	u8 instance;
1439
1440	BT_DBG("%s", hdev->name);
1441
1442	hci_dev_lock(hdev);
1443
1444	hdev->adv_instance_timeout = 0;
1445
1446	instance = hdev->cur_adv_instance;
1447	if (instance == 0x00)
1448		goto unlock;
1449
1450	hci_req_init(&req, hdev);
1451
1452	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1453
1454	if (list_empty(&hdev->adv_instances))
1455		__hci_req_disable_advertising(&req);
1456
1457	hci_req_run(&req, NULL);
1458
1459unlock:
1460	hci_dev_unlock(hdev);
1461}
1462
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1463int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1464			   bool use_rpa, struct adv_info *adv_instance,
1465			   u8 *own_addr_type, bdaddr_t *rand_addr)
1466{
1467	int err;
1468
1469	bacpy(rand_addr, BDADDR_ANY);
1470
1471	/* If privacy is enabled use a resolvable private address. If
1472	 * current RPA has expired then generate a new one.
1473	 */
1474	if (use_rpa) {
1475		int to;
1476
1477		*own_addr_type = ADDR_LE_DEV_RANDOM;
 
 
 
 
 
1478
1479		if (adv_instance) {
1480			if (!adv_instance->rpa_expired &&
1481			    !bacmp(&adv_instance->random_addr, &hdev->rpa))
1482				return 0;
1483
1484			adv_instance->rpa_expired = false;
1485		} else {
1486			if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1487			    !bacmp(&hdev->random_addr, &hdev->rpa))
1488				return 0;
1489		}
1490
1491		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1492		if (err < 0) {
1493			BT_ERR("%s failed to generate new RPA", hdev->name);
1494			return err;
1495		}
1496
1497		bacpy(rand_addr, &hdev->rpa);
1498
1499		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1500		if (adv_instance)
1501			queue_delayed_work(hdev->workqueue,
1502					   &adv_instance->rpa_expired_cb, to);
1503		else
1504			queue_delayed_work(hdev->workqueue,
1505					   &hdev->rpa_expired, to);
1506
1507		return 0;
1508	}
1509
1510	/* In case of required privacy without resolvable private address,
1511	 * use an non-resolvable private address. This is useful for
1512	 * non-connectable advertising.
1513	 */
1514	if (require_privacy) {
1515		bdaddr_t nrpa;
1516
1517		while (true) {
1518			/* The non-resolvable private address is generated
1519			 * from random six bytes with the two most significant
1520			 * bits cleared.
1521			 */
1522			get_random_bytes(&nrpa, 6);
1523			nrpa.b[5] &= 0x3f;
1524
1525			/* The non-resolvable private address shall not be
1526			 * equal to the public address.
1527			 */
1528			if (bacmp(&hdev->bdaddr, &nrpa))
1529				break;
1530		}
1531
1532		*own_addr_type = ADDR_LE_DEV_RANDOM;
1533		bacpy(rand_addr, &nrpa);
1534
1535		return 0;
1536	}
1537
1538	/* No privacy so use a public address. */
1539	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1540
1541	return 0;
1542}
1543
1544void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1545{
1546	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1547}
1548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1549int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1550{
1551	struct hci_cp_le_set_ext_adv_params cp;
1552	struct hci_dev *hdev = req->hdev;
1553	bool connectable;
1554	u32 flags;
1555	bdaddr_t random_addr;
1556	u8 own_addr_type;
1557	int err;
1558	struct adv_info *adv_instance;
1559	bool secondary_adv;
1560	/* In ext adv set param interval is 3 octets */
1561	const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1562
1563	if (instance > 0) {
1564		adv_instance = hci_find_adv_instance(hdev, instance);
1565		if (!adv_instance)
1566			return -EINVAL;
1567	} else {
1568		adv_instance = NULL;
1569	}
1570
1571	flags = get_adv_instance_flags(hdev, instance);
1572
1573	/* If the "connectable" instance flag was not set, then choose between
1574	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1575	 */
1576	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1577		      mgmt_get_connectable(hdev);
1578
1579	if (!is_advertising_allowed(hdev, connectable))
1580		return -EPERM;
1581
1582	/* Set require_privacy to true only when non-connectable
1583	 * advertising is used. In that case it is fine to use a
1584	 * non-resolvable private address.
1585	 */
1586	err = hci_get_random_address(hdev, !connectable,
1587				     adv_use_rpa(hdev, flags), adv_instance,
1588				     &own_addr_type, &random_addr);
1589	if (err < 0)
1590		return err;
1591
1592	memset(&cp, 0, sizeof(cp));
1593
1594	memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1595	memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
 
 
 
 
 
 
 
1596
1597	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1598
1599	if (connectable) {
1600		if (secondary_adv)
1601			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1602		else
1603			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1604	} else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
 
1605		if (secondary_adv)
1606			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1607		else
1608			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1609	} else {
1610		if (secondary_adv)
1611			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1612		else
1613			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1614	}
1615
1616	cp.own_addr_type = own_addr_type;
1617	cp.channel_map = hdev->le_adv_channel_map;
1618	cp.tx_power = 127;
1619	cp.handle = instance;
1620
1621	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1622		cp.primary_phy = HCI_ADV_PHY_1M;
1623		cp.secondary_phy = HCI_ADV_PHY_2M;
1624	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1625		cp.primary_phy = HCI_ADV_PHY_CODED;
1626		cp.secondary_phy = HCI_ADV_PHY_CODED;
1627	} else {
1628		/* In all other cases use 1M */
1629		cp.primary_phy = HCI_ADV_PHY_1M;
1630		cp.secondary_phy = HCI_ADV_PHY_1M;
1631	}
1632
1633	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1634
1635	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1636	    bacmp(&random_addr, BDADDR_ANY)) {
1637		struct hci_cp_le_set_adv_set_rand_addr cp;
1638
1639		/* Check if random address need to be updated */
1640		if (adv_instance) {
1641			if (!bacmp(&random_addr, &adv_instance->random_addr))
1642				return 0;
1643		} else {
1644			if (!bacmp(&random_addr, &hdev->random_addr))
1645				return 0;
 
 
 
 
 
 
 
1646		}
1647
1648		memset(&cp, 0, sizeof(cp));
1649
1650		cp.handle = 0;
1651		bacpy(&cp.bdaddr, &random_addr);
1652
1653		hci_req_add(req,
1654			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1655			    sizeof(cp), &cp);
1656	}
1657
1658	return 0;
1659}
1660
1661int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1662{
1663	struct hci_dev *hdev = req->hdev;
1664	struct hci_cp_le_set_ext_adv_enable *cp;
1665	struct hci_cp_ext_adv_set *adv_set;
1666	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1667	struct adv_info *adv_instance;
1668
1669	if (instance > 0) {
1670		adv_instance = hci_find_adv_instance(hdev, instance);
1671		if (!adv_instance)
1672			return -EINVAL;
1673	} else {
1674		adv_instance = NULL;
1675	}
1676
1677	cp = (void *) data;
1678	adv_set = (void *) cp->data;
1679
1680	memset(cp, 0, sizeof(*cp));
1681
1682	cp->enable = 0x01;
1683	cp->num_of_sets = 0x01;
1684
1685	memset(adv_set, 0, sizeof(*adv_set));
1686
1687	adv_set->handle = instance;
1688
1689	/* Set duration per instance since controller is responsible for
1690	 * scheduling it.
1691	 */
1692	if (adv_instance && adv_instance->duration) {
1693		u16 duration = adv_instance->duration * MSEC_PER_SEC;
1694
1695		/* Time = N * 10 ms */
1696		adv_set->duration = cpu_to_le16(duration / 10);
1697	}
1698
1699	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1700		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1701		    data);
1702
1703	return 0;
1704}
1705
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1706int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1707{
1708	struct hci_dev *hdev = req->hdev;
 
1709	int err;
1710
1711	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1712		__hci_req_disable_advertising(req);
 
 
 
1713
1714	err = __hci_req_setup_ext_adv_instance(req, instance);
1715	if (err < 0)
1716		return err;
1717
1718	__hci_req_update_scan_rsp_data(req, instance);
1719	__hci_req_enable_ext_advertising(req, instance);
1720
1721	return 0;
1722}
1723
1724int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1725				    bool force)
1726{
1727	struct hci_dev *hdev = req->hdev;
1728	struct adv_info *adv_instance = NULL;
1729	u16 timeout;
1730
1731	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1732	    list_empty(&hdev->adv_instances))
1733		return -EPERM;
1734
1735	if (hdev->adv_instance_timeout)
1736		return -EBUSY;
1737
1738	adv_instance = hci_find_adv_instance(hdev, instance);
1739	if (!adv_instance)
1740		return -ENOENT;
1741
1742	/* A zero timeout means unlimited advertising. As long as there is
1743	 * only one instance, duration should be ignored. We still set a timeout
1744	 * in case further instances are being added later on.
1745	 *
1746	 * If the remaining lifetime of the instance is more than the duration
1747	 * then the timeout corresponds to the duration, otherwise it will be
1748	 * reduced to the remaining instance lifetime.
1749	 */
1750	if (adv_instance->timeout == 0 ||
1751	    adv_instance->duration <= adv_instance->remaining_time)
1752		timeout = adv_instance->duration;
1753	else
1754		timeout = adv_instance->remaining_time;
1755
1756	/* The remaining time is being reduced unless the instance is being
1757	 * advertised without time limit.
1758	 */
1759	if (adv_instance->timeout)
1760		adv_instance->remaining_time =
1761				adv_instance->remaining_time - timeout;
1762
1763	/* Only use work for scheduling instances with legacy advertising */
1764	if (!ext_adv_capable(hdev)) {
1765		hdev->adv_instance_timeout = timeout;
1766		queue_delayed_work(hdev->req_workqueue,
1767			   &hdev->adv_instance_expire,
1768			   msecs_to_jiffies(timeout * 1000));
1769	}
1770
1771	/* If we're just re-scheduling the same instance again then do not
1772	 * execute any HCI commands. This happens when a single instance is
1773	 * being advertised.
1774	 */
1775	if (!force && hdev->cur_adv_instance == instance &&
1776	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1777		return 0;
1778
1779	hdev->cur_adv_instance = instance;
1780	if (ext_adv_capable(hdev)) {
1781		__hci_req_start_ext_adv(req, instance);
1782	} else {
1783		__hci_req_update_adv_data(req, instance);
1784		__hci_req_update_scan_rsp_data(req, instance);
1785		__hci_req_enable_advertising(req);
1786	}
1787
1788	return 0;
1789}
1790
1791static void cancel_adv_timeout(struct hci_dev *hdev)
1792{
1793	if (hdev->adv_instance_timeout) {
1794		hdev->adv_instance_timeout = 0;
1795		cancel_delayed_work(&hdev->adv_instance_expire);
1796	}
1797}
1798
1799/* For a single instance:
1800 * - force == true: The instance will be removed even when its remaining
1801 *   lifetime is not zero.
1802 * - force == false: the instance will be deactivated but kept stored unless
1803 *   the remaining lifetime is zero.
1804 *
1805 * For instance == 0x00:
1806 * - force == true: All instances will be removed regardless of their timeout
1807 *   setting.
1808 * - force == false: Only instances that have a timeout will be removed.
1809 */
1810void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1811				struct hci_request *req, u8 instance,
1812				bool force)
1813{
1814	struct adv_info *adv_instance, *n, *next_instance = NULL;
1815	int err;
1816	u8 rem_inst;
1817
1818	/* Cancel any timeout concerning the removed instance(s). */
1819	if (!instance || hdev->cur_adv_instance == instance)
1820		cancel_adv_timeout(hdev);
1821
1822	/* Get the next instance to advertise BEFORE we remove
1823	 * the current one. This can be the same instance again
1824	 * if there is only one instance.
1825	 */
1826	if (instance && hdev->cur_adv_instance == instance)
1827		next_instance = hci_get_next_instance(hdev, instance);
1828
1829	if (instance == 0x00) {
1830		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1831					 list) {
1832			if (!(force || adv_instance->timeout))
1833				continue;
1834
1835			rem_inst = adv_instance->instance;
1836			err = hci_remove_adv_instance(hdev, rem_inst);
1837			if (!err)
1838				mgmt_advertising_removed(sk, hdev, rem_inst);
1839		}
1840	} else {
1841		adv_instance = hci_find_adv_instance(hdev, instance);
1842
1843		if (force || (adv_instance && adv_instance->timeout &&
1844			      !adv_instance->remaining_time)) {
1845			/* Don't advertise a removed instance. */
1846			if (next_instance &&
1847			    next_instance->instance == instance)
1848				next_instance = NULL;
1849
1850			err = hci_remove_adv_instance(hdev, instance);
1851			if (!err)
1852				mgmt_advertising_removed(sk, hdev, instance);
1853		}
1854	}
1855
1856	if (!req || !hdev_is_powered(hdev) ||
1857	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
1858		return;
1859
1860	if (next_instance)
1861		__hci_req_schedule_adv_instance(req, next_instance->instance,
1862						false);
1863}
1864
1865static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1866{
1867	struct hci_dev *hdev = req->hdev;
1868
1869	/* If we're advertising or initiating an LE connection we can't
1870	 * go ahead and change the random address at this time. This is
1871	 * because the eventual initiator address used for the
1872	 * subsequently created connection will be undefined (some
1873	 * controllers use the new address and others the one we had
1874	 * when the operation started).
1875	 *
1876	 * In this kind of scenario skip the update and let the random
1877	 * address be updated at the next cycle.
1878	 */
1879	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1880	    hci_lookup_le_connect(hdev)) {
1881		BT_DBG("Deferring random address update");
1882		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1883		return;
1884	}
1885
1886	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1887}
1888
1889int hci_update_random_address(struct hci_request *req, bool require_privacy,
1890			      bool use_rpa, u8 *own_addr_type)
1891{
1892	struct hci_dev *hdev = req->hdev;
1893	int err;
1894
1895	/* If privacy is enabled use a resolvable private address. If
1896	 * current RPA has expired or there is something else than
1897	 * the current RPA in use, then generate a new one.
1898	 */
1899	if (use_rpa) {
1900		int to;
1901
1902		*own_addr_type = ADDR_LE_DEV_RANDOM;
 
 
 
 
 
1903
1904		if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1905		    !bacmp(&hdev->random_addr, &hdev->rpa))
1906			return 0;
1907
1908		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1909		if (err < 0) {
1910			bt_dev_err(hdev, "failed to generate new RPA");
1911			return err;
1912		}
1913
1914		set_random_addr(req, &hdev->rpa);
1915
1916		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1917		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1918
1919		return 0;
1920	}
1921
1922	/* In case of required privacy without resolvable private address,
1923	 * use an non-resolvable private address. This is useful for active
1924	 * scanning and non-connectable advertising.
1925	 */
1926	if (require_privacy) {
1927		bdaddr_t nrpa;
1928
1929		while (true) {
1930			/* The non-resolvable private address is generated
1931			 * from random six bytes with the two most significant
1932			 * bits cleared.
1933			 */
1934			get_random_bytes(&nrpa, 6);
1935			nrpa.b[5] &= 0x3f;
1936
1937			/* The non-resolvable private address shall not be
1938			 * equal to the public address.
1939			 */
1940			if (bacmp(&hdev->bdaddr, &nrpa))
1941				break;
1942		}
1943
1944		*own_addr_type = ADDR_LE_DEV_RANDOM;
1945		set_random_addr(req, &nrpa);
1946		return 0;
1947	}
1948
1949	/* If forcing static address is in use or there is no public
1950	 * address use the static address as random address (but skip
1951	 * the HCI command if the current random address is already the
1952	 * static one.
1953	 *
1954	 * In case BR/EDR has been disabled on a dual-mode controller
1955	 * and a static address has been configured, then use that
1956	 * address instead of the public BR/EDR address.
1957	 */
1958	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1959	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1960	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1961	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1962		*own_addr_type = ADDR_LE_DEV_RANDOM;
1963		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1964			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1965				    &hdev->static_addr);
1966		return 0;
1967	}
1968
1969	/* Neither privacy nor static address is being used so use a
1970	 * public address.
1971	 */
1972	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1973
1974	return 0;
1975}
1976
1977static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1978{
1979	struct bdaddr_list *b;
1980
1981	list_for_each_entry(b, &hdev->whitelist, list) {
1982		struct hci_conn *conn;
1983
1984		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1985		if (!conn)
1986			return true;
1987
1988		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1989			return true;
1990	}
1991
1992	return false;
1993}
1994
1995void __hci_req_update_scan(struct hci_request *req)
1996{
1997	struct hci_dev *hdev = req->hdev;
1998	u8 scan;
1999
2000	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2001		return;
2002
2003	if (!hdev_is_powered(hdev))
2004		return;
2005
2006	if (mgmt_powering_down(hdev))
2007		return;
2008
 
 
 
2009	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2010	    disconnected_whitelist_entries(hdev))
2011		scan = SCAN_PAGE;
2012	else
2013		scan = SCAN_DISABLED;
2014
2015	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2016		scan |= SCAN_INQUIRY;
2017
2018	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2019	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2020		return;
2021
2022	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2023}
2024
2025static int update_scan(struct hci_request *req, unsigned long opt)
2026{
2027	hci_dev_lock(req->hdev);
2028	__hci_req_update_scan(req);
2029	hci_dev_unlock(req->hdev);
2030	return 0;
2031}
2032
2033static void scan_update_work(struct work_struct *work)
2034{
2035	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2036
2037	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2038}
2039
2040static int connectable_update(struct hci_request *req, unsigned long opt)
2041{
2042	struct hci_dev *hdev = req->hdev;
2043
2044	hci_dev_lock(hdev);
2045
2046	__hci_req_update_scan(req);
2047
2048	/* If BR/EDR is not enabled and we disable advertising as a
2049	 * by-product of disabling connectable, we need to update the
2050	 * advertising flags.
2051	 */
2052	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2053		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2054
2055	/* Update the advertising parameters if necessary */
2056	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2057	    !list_empty(&hdev->adv_instances)) {
2058		if (ext_adv_capable(hdev))
2059			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2060		else
2061			__hci_req_enable_advertising(req);
2062	}
2063
2064	__hci_update_background_scan(req);
2065
2066	hci_dev_unlock(hdev);
2067
2068	return 0;
2069}
2070
2071static void connectable_update_work(struct work_struct *work)
2072{
2073	struct hci_dev *hdev = container_of(work, struct hci_dev,
2074					    connectable_update);
2075	u8 status;
2076
2077	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2078	mgmt_set_connectable_complete(hdev, status);
2079}
2080
2081static u8 get_service_classes(struct hci_dev *hdev)
2082{
2083	struct bt_uuid *uuid;
2084	u8 val = 0;
2085
2086	list_for_each_entry(uuid, &hdev->uuids, list)
2087		val |= uuid->svc_hint;
2088
2089	return val;
2090}
2091
2092void __hci_req_update_class(struct hci_request *req)
2093{
2094	struct hci_dev *hdev = req->hdev;
2095	u8 cod[3];
2096
2097	BT_DBG("%s", hdev->name);
2098
2099	if (!hdev_is_powered(hdev))
2100		return;
2101
2102	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2103		return;
2104
2105	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2106		return;
2107
2108	cod[0] = hdev->minor_class;
2109	cod[1] = hdev->major_class;
2110	cod[2] = get_service_classes(hdev);
2111
2112	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2113		cod[1] |= 0x20;
2114
2115	if (memcmp(cod, hdev->dev_class, 3) == 0)
2116		return;
2117
2118	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2119}
2120
2121static void write_iac(struct hci_request *req)
2122{
2123	struct hci_dev *hdev = req->hdev;
2124	struct hci_cp_write_current_iac_lap cp;
2125
2126	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2127		return;
2128
2129	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2130		/* Limited discoverable mode */
2131		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2132		cp.iac_lap[0] = 0x00;	/* LIAC */
2133		cp.iac_lap[1] = 0x8b;
2134		cp.iac_lap[2] = 0x9e;
2135		cp.iac_lap[3] = 0x33;	/* GIAC */
2136		cp.iac_lap[4] = 0x8b;
2137		cp.iac_lap[5] = 0x9e;
2138	} else {
2139		/* General discoverable mode */
2140		cp.num_iac = 1;
2141		cp.iac_lap[0] = 0x33;	/* GIAC */
2142		cp.iac_lap[1] = 0x8b;
2143		cp.iac_lap[2] = 0x9e;
2144	}
2145
2146	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2147		    (cp.num_iac * 3) + 1, &cp);
2148}
2149
2150static int discoverable_update(struct hci_request *req, unsigned long opt)
2151{
2152	struct hci_dev *hdev = req->hdev;
2153
2154	hci_dev_lock(hdev);
2155
2156	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2157		write_iac(req);
2158		__hci_req_update_scan(req);
2159		__hci_req_update_class(req);
2160	}
2161
2162	/* Advertising instances don't use the global discoverable setting, so
2163	 * only update AD if advertising was enabled using Set Advertising.
2164	 */
2165	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2166		__hci_req_update_adv_data(req, 0x00);
2167
2168		/* Discoverable mode affects the local advertising
2169		 * address in limited privacy mode.
2170		 */
2171		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2172			if (ext_adv_capable(hdev))
2173				__hci_req_start_ext_adv(req, 0x00);
2174			else
2175				__hci_req_enable_advertising(req);
2176		}
2177	}
2178
2179	hci_dev_unlock(hdev);
2180
2181	return 0;
2182}
2183
2184static void discoverable_update_work(struct work_struct *work)
2185{
2186	struct hci_dev *hdev = container_of(work, struct hci_dev,
2187					    discoverable_update);
2188	u8 status;
2189
2190	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2191	mgmt_set_discoverable_complete(hdev, status);
2192}
2193
2194void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2195		      u8 reason)
2196{
2197	switch (conn->state) {
2198	case BT_CONNECTED:
2199	case BT_CONFIG:
2200		if (conn->type == AMP_LINK) {
2201			struct hci_cp_disconn_phy_link cp;
2202
2203			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2204			cp.reason = reason;
2205			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2206				    &cp);
2207		} else {
2208			struct hci_cp_disconnect dc;
2209
2210			dc.handle = cpu_to_le16(conn->handle);
2211			dc.reason = reason;
2212			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2213		}
2214
2215		conn->state = BT_DISCONN;
2216
2217		break;
2218	case BT_CONNECT:
2219		if (conn->type == LE_LINK) {
2220			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2221				break;
2222			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2223				    0, NULL);
2224		} else if (conn->type == ACL_LINK) {
2225			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2226				break;
2227			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2228				    6, &conn->dst);
2229		}
2230		break;
2231	case BT_CONNECT2:
2232		if (conn->type == ACL_LINK) {
2233			struct hci_cp_reject_conn_req rej;
2234
2235			bacpy(&rej.bdaddr, &conn->dst);
2236			rej.reason = reason;
2237
2238			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2239				    sizeof(rej), &rej);
2240		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2241			struct hci_cp_reject_sync_conn_req rej;
2242
2243			bacpy(&rej.bdaddr, &conn->dst);
2244
2245			/* SCO rejection has its own limited set of
2246			 * allowed error values (0x0D-0x0F) which isn't
2247			 * compatible with most values passed to this
2248			 * function. To be safe hard-code one of the
2249			 * values that's suitable for SCO.
2250			 */
2251			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2252
2253			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2254				    sizeof(rej), &rej);
2255		}
2256		break;
2257	default:
2258		conn->state = BT_CLOSED;
2259		break;
2260	}
2261}
2262
2263static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2264{
2265	if (status)
2266		BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2267}
2268
2269int hci_abort_conn(struct hci_conn *conn, u8 reason)
2270{
2271	struct hci_request req;
2272	int err;
2273
2274	hci_req_init(&req, conn->hdev);
2275
2276	__hci_abort_conn(&req, conn, reason);
2277
2278	err = hci_req_run(&req, abort_conn_complete);
2279	if (err && err != -ENODATA) {
2280		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2281		return err;
2282	}
2283
2284	return 0;
2285}
2286
2287static int update_bg_scan(struct hci_request *req, unsigned long opt)
2288{
2289	hci_dev_lock(req->hdev);
2290	__hci_update_background_scan(req);
2291	hci_dev_unlock(req->hdev);
2292	return 0;
2293}
2294
2295static void bg_scan_update(struct work_struct *work)
2296{
2297	struct hci_dev *hdev = container_of(work, struct hci_dev,
2298					    bg_scan_update);
2299	struct hci_conn *conn;
2300	u8 status;
2301	int err;
2302
2303	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2304	if (!err)
2305		return;
2306
2307	hci_dev_lock(hdev);
2308
2309	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2310	if (conn)
2311		hci_le_conn_failed(conn, status);
2312
2313	hci_dev_unlock(hdev);
2314}
2315
2316static int le_scan_disable(struct hci_request *req, unsigned long opt)
2317{
2318	hci_req_add_le_scan_disable(req);
2319	return 0;
2320}
2321
2322static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2323{
2324	u8 length = opt;
2325	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2326	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2327	struct hci_cp_inquiry cp;
2328
2329	BT_DBG("%s", req->hdev->name);
 
 
 
2330
2331	hci_dev_lock(req->hdev);
2332	hci_inquiry_cache_flush(req->hdev);
2333	hci_dev_unlock(req->hdev);
2334
2335	memset(&cp, 0, sizeof(cp));
2336
2337	if (req->hdev->discovery.limited)
2338		memcpy(&cp.lap, liac, sizeof(cp.lap));
2339	else
2340		memcpy(&cp.lap, giac, sizeof(cp.lap));
2341
2342	cp.length = length;
2343
2344	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2345
2346	return 0;
2347}
2348
2349static void le_scan_disable_work(struct work_struct *work)
2350{
2351	struct hci_dev *hdev = container_of(work, struct hci_dev,
2352					    le_scan_disable.work);
2353	u8 status;
2354
2355	BT_DBG("%s", hdev->name);
2356
2357	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2358		return;
2359
2360	cancel_delayed_work(&hdev->le_scan_restart);
2361
2362	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2363	if (status) {
2364		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2365			   status);
2366		return;
2367	}
2368
2369	hdev->discovery.scan_start = 0;
2370
2371	/* If we were running LE only scan, change discovery state. If
2372	 * we were running both LE and BR/EDR inquiry simultaneously,
2373	 * and BR/EDR inquiry is already finished, stop discovery,
2374	 * otherwise BR/EDR inquiry will stop discovery when finished.
2375	 * If we will resolve remote device name, do not change
2376	 * discovery state.
2377	 */
2378
2379	if (hdev->discovery.type == DISCOV_TYPE_LE)
2380		goto discov_stopped;
2381
2382	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2383		return;
2384
2385	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2386		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2387		    hdev->discovery.state != DISCOVERY_RESOLVING)
2388			goto discov_stopped;
2389
2390		return;
2391	}
2392
2393	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2394		     HCI_CMD_TIMEOUT, &status);
2395	if (status) {
2396		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2397		goto discov_stopped;
2398	}
2399
2400	return;
2401
2402discov_stopped:
2403	hci_dev_lock(hdev);
2404	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2405	hci_dev_unlock(hdev);
2406}
2407
2408static int le_scan_restart(struct hci_request *req, unsigned long opt)
2409{
2410	struct hci_dev *hdev = req->hdev;
2411
2412	/* If controller is not scanning we are done. */
2413	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2414		return 0;
2415
2416	hci_req_add_le_scan_disable(req);
 
 
 
 
 
2417
2418	if (use_ext_scan(hdev)) {
2419		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2420
2421		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2422		ext_enable_cp.enable = LE_SCAN_ENABLE;
2423		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2424
2425		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2426			    sizeof(ext_enable_cp), &ext_enable_cp);
2427	} else {
2428		struct hci_cp_le_set_scan_enable cp;
2429
2430		memset(&cp, 0, sizeof(cp));
2431		cp.enable = LE_SCAN_ENABLE;
2432		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2433		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2434	}
2435
2436	return 0;
2437}
2438
2439static void le_scan_restart_work(struct work_struct *work)
2440{
2441	struct hci_dev *hdev = container_of(work, struct hci_dev,
2442					    le_scan_restart.work);
2443	unsigned long timeout, duration, scan_start, now;
2444	u8 status;
2445
2446	BT_DBG("%s", hdev->name);
2447
2448	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2449	if (status) {
2450		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2451			   status);
2452		return;
2453	}
2454
2455	hci_dev_lock(hdev);
2456
2457	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2458	    !hdev->discovery.scan_start)
2459		goto unlock;
2460
2461	/* When the scan was started, hdev->le_scan_disable has been queued
2462	 * after duration from scan_start. During scan restart this job
2463	 * has been canceled, and we need to queue it again after proper
2464	 * timeout, to make sure that scan does not run indefinitely.
2465	 */
2466	duration = hdev->discovery.scan_duration;
2467	scan_start = hdev->discovery.scan_start;
2468	now = jiffies;
2469	if (now - scan_start <= duration) {
2470		int elapsed;
2471
2472		if (now >= scan_start)
2473			elapsed = now - scan_start;
2474		else
2475			elapsed = ULONG_MAX - scan_start + now;
2476
2477		timeout = duration - elapsed;
2478	} else {
2479		timeout = 0;
2480	}
2481
2482	queue_delayed_work(hdev->req_workqueue,
2483			   &hdev->le_scan_disable, timeout);
2484
2485unlock:
2486	hci_dev_unlock(hdev);
2487}
2488
2489static int active_scan(struct hci_request *req, unsigned long opt)
2490{
2491	uint16_t interval = opt;
2492	struct hci_dev *hdev = req->hdev;
2493	u8 own_addr_type;
 
 
 
 
 
 
2494	int err;
2495
2496	BT_DBG("%s", hdev->name);
2497
2498	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2499		hci_dev_lock(hdev);
2500
2501		/* Don't let discovery abort an outgoing connection attempt
2502		 * that's using directed advertising.
2503		 */
2504		if (hci_lookup_le_connect(hdev)) {
2505			hci_dev_unlock(hdev);
2506			return -EBUSY;
2507		}
2508
2509		cancel_adv_timeout(hdev);
2510		hci_dev_unlock(hdev);
2511
2512		__hci_req_disable_advertising(req);
2513	}
2514
2515	/* If controller is scanning, it means the background scanning is
2516	 * running. Thus, we should temporarily stop it in order to set the
2517	 * discovery scanning parameters.
2518	 */
2519	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2520		hci_req_add_le_scan_disable(req);
 
 
2521
2522	/* All active scans will be done with either a resolvable private
2523	 * address (when privacy feature has been enabled) or non-resolvable
2524	 * private address.
2525	 */
2526	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2527					&own_addr_type);
2528	if (err < 0)
2529		own_addr_type = ADDR_LE_DEV_PUBLIC;
2530
2531	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2532			   own_addr_type, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2533	return 0;
2534}
2535
2536static int interleaved_discov(struct hci_request *req, unsigned long opt)
2537{
2538	int err;
2539
2540	BT_DBG("%s", req->hdev->name);
2541
2542	err = active_scan(req, opt);
2543	if (err)
2544		return err;
2545
2546	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2547}
2548
2549static void start_discovery(struct hci_dev *hdev, u8 *status)
2550{
2551	unsigned long timeout;
2552
2553	BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2554
2555	switch (hdev->discovery.type) {
2556	case DISCOV_TYPE_BREDR:
2557		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2558			hci_req_sync(hdev, bredr_inquiry,
2559				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2560				     status);
2561		return;
2562	case DISCOV_TYPE_INTERLEAVED:
2563		/* When running simultaneous discovery, the LE scanning time
2564		 * should occupy the whole discovery time sine BR/EDR inquiry
2565		 * and LE scanning are scheduled by the controller.
2566		 *
2567		 * For interleaving discovery in comparison, BR/EDR inquiry
2568		 * and LE scanning are done sequentially with separate
2569		 * timeouts.
2570		 */
2571		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2572			     &hdev->quirks)) {
2573			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2574			/* During simultaneous discovery, we double LE scan
2575			 * interval. We must leave some time for the controller
2576			 * to do BR/EDR inquiry.
2577			 */
2578			hci_req_sync(hdev, interleaved_discov,
2579				     DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2580				     status);
2581			break;
2582		}
2583
2584		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2585		hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2586			     HCI_CMD_TIMEOUT, status);
2587		break;
2588	case DISCOV_TYPE_LE:
2589		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2590		hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2591			     HCI_CMD_TIMEOUT, status);
2592		break;
2593	default:
2594		*status = HCI_ERROR_UNSPECIFIED;
2595		return;
2596	}
2597
2598	if (*status)
2599		return;
2600
2601	BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2602
2603	/* When service discovery is used and the controller has a
2604	 * strict duplicate filter, it is important to remember the
2605	 * start and duration of the scan. This is required for
2606	 * restarting scanning during the discovery phase.
2607	 */
2608	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2609		     hdev->discovery.result_filtering) {
2610		hdev->discovery.scan_start = jiffies;
2611		hdev->discovery.scan_duration = timeout;
2612	}
2613
2614	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2615			   timeout);
2616}
2617
2618bool hci_req_stop_discovery(struct hci_request *req)
2619{
2620	struct hci_dev *hdev = req->hdev;
2621	struct discovery_state *d = &hdev->discovery;
2622	struct hci_cp_remote_name_req_cancel cp;
2623	struct inquiry_entry *e;
2624	bool ret = false;
2625
2626	BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2627
2628	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2629		if (test_bit(HCI_INQUIRY, &hdev->flags))
2630			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2631
2632		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2633			cancel_delayed_work(&hdev->le_scan_disable);
2634			hci_req_add_le_scan_disable(req);
 
2635		}
2636
2637		ret = true;
2638	} else {
2639		/* Passive scanning */
2640		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2641			hci_req_add_le_scan_disable(req);
2642			ret = true;
2643		}
2644	}
2645
2646	/* No further actions needed for LE-only discovery */
2647	if (d->type == DISCOV_TYPE_LE)
2648		return ret;
2649
2650	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2651		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2652						     NAME_PENDING);
2653		if (!e)
2654			return ret;
2655
2656		bacpy(&cp.bdaddr, &e->data.bdaddr);
2657		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2658			    &cp);
2659		ret = true;
2660	}
2661
2662	return ret;
2663}
2664
2665static int stop_discovery(struct hci_request *req, unsigned long opt)
2666{
2667	hci_dev_lock(req->hdev);
2668	hci_req_stop_discovery(req);
2669	hci_dev_unlock(req->hdev);
2670
2671	return 0;
2672}
2673
2674static void discov_update(struct work_struct *work)
2675{
2676	struct hci_dev *hdev = container_of(work, struct hci_dev,
2677					    discov_update);
2678	u8 status = 0;
2679
2680	switch (hdev->discovery.state) {
2681	case DISCOVERY_STARTING:
2682		start_discovery(hdev, &status);
2683		mgmt_start_discovery_complete(hdev, status);
2684		if (status)
2685			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2686		else
2687			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2688		break;
2689	case DISCOVERY_STOPPING:
2690		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2691		mgmt_stop_discovery_complete(hdev, status);
2692		if (!status)
2693			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2694		break;
2695	case DISCOVERY_STOPPED:
2696	default:
2697		return;
2698	}
2699}
2700
2701static void discov_off(struct work_struct *work)
2702{
2703	struct hci_dev *hdev = container_of(work, struct hci_dev,
2704					    discov_off.work);
2705
2706	BT_DBG("%s", hdev->name);
2707
2708	hci_dev_lock(hdev);
2709
2710	/* When discoverable timeout triggers, then just make sure
2711	 * the limited discoverable flag is cleared. Even in the case
2712	 * of a timeout triggered from general discoverable, it is
2713	 * safe to unconditionally clear the flag.
2714	 */
2715	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2716	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2717	hdev->discov_timeout = 0;
2718
2719	hci_dev_unlock(hdev);
2720
2721	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2722	mgmt_new_settings(hdev);
2723}
2724
2725static int powered_update_hci(struct hci_request *req, unsigned long opt)
2726{
2727	struct hci_dev *hdev = req->hdev;
2728	u8 link_sec;
2729
2730	hci_dev_lock(hdev);
2731
2732	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2733	    !lmp_host_ssp_capable(hdev)) {
2734		u8 mode = 0x01;
2735
2736		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2737
2738		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2739			u8 support = 0x01;
2740
2741			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2742				    sizeof(support), &support);
2743		}
2744	}
2745
2746	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2747	    lmp_bredr_capable(hdev)) {
2748		struct hci_cp_write_le_host_supported cp;
2749
2750		cp.le = 0x01;
2751		cp.simul = 0x00;
2752
2753		/* Check first if we already have the right
2754		 * host state (host features set)
2755		 */
2756		if (cp.le != lmp_host_le_capable(hdev) ||
2757		    cp.simul != lmp_host_le_br_capable(hdev))
2758			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2759				    sizeof(cp), &cp);
2760	}
2761
2762	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2763		/* Make sure the controller has a good default for
2764		 * advertising data. This also applies to the case
2765		 * where BR/EDR was toggled during the AUTO_OFF phase.
2766		 */
2767		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2768		    list_empty(&hdev->adv_instances)) {
2769			int err;
2770
2771			if (ext_adv_capable(hdev)) {
2772				err = __hci_req_setup_ext_adv_instance(req,
2773								       0x00);
2774				if (!err)
2775					__hci_req_update_scan_rsp_data(req,
2776								       0x00);
2777			} else {
2778				err = 0;
2779				__hci_req_update_adv_data(req, 0x00);
2780				__hci_req_update_scan_rsp_data(req, 0x00);
2781			}
2782
2783			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2784				if (!ext_adv_capable(hdev))
2785					__hci_req_enable_advertising(req);
2786				else if (!err)
2787					__hci_req_enable_ext_advertising(req,
2788									 0x00);
2789			}
2790		} else if (!list_empty(&hdev->adv_instances)) {
2791			struct adv_info *adv_instance;
2792
2793			adv_instance = list_first_entry(&hdev->adv_instances,
2794							struct adv_info, list);
2795			__hci_req_schedule_adv_instance(req,
2796							adv_instance->instance,
2797							true);
2798		}
2799	}
2800
2801	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2802	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2803		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2804			    sizeof(link_sec), &link_sec);
2805
2806	if (lmp_bredr_capable(hdev)) {
2807		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2808			__hci_req_write_fast_connectable(req, true);
2809		else
2810			__hci_req_write_fast_connectable(req, false);
2811		__hci_req_update_scan(req);
2812		__hci_req_update_class(req);
2813		__hci_req_update_name(req);
2814		__hci_req_update_eir(req);
2815	}
2816
2817	hci_dev_unlock(hdev);
2818	return 0;
2819}
2820
2821int __hci_req_hci_power_on(struct hci_dev *hdev)
2822{
2823	/* Register the available SMP channels (BR/EDR and LE) only when
2824	 * successfully powering on the controller. This late
2825	 * registration is required so that LE SMP can clearly decide if
2826	 * the public address or static address is used.
2827	 */
2828	smp_register(hdev);
2829
2830	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2831			      NULL);
2832}
2833
2834void hci_request_setup(struct hci_dev *hdev)
2835{
2836	INIT_WORK(&hdev->discov_update, discov_update);
2837	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2838	INIT_WORK(&hdev->scan_update, scan_update_work);
2839	INIT_WORK(&hdev->connectable_update, connectable_update_work);
2840	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2841	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2842	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2843	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2844	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
 
2845}
2846
2847void hci_request_cancel_all(struct hci_dev *hdev)
2848{
2849	hci_req_sync_cancel(hdev, ENODEV);
2850
2851	cancel_work_sync(&hdev->discov_update);
2852	cancel_work_sync(&hdev->bg_scan_update);
2853	cancel_work_sync(&hdev->scan_update);
2854	cancel_work_sync(&hdev->connectable_update);
2855	cancel_work_sync(&hdev->discoverable_update);
2856	cancel_delayed_work_sync(&hdev->discov_off);
2857	cancel_delayed_work_sync(&hdev->le_scan_disable);
2858	cancel_delayed_work_sync(&hdev->le_scan_restart);
2859
2860	if (hdev->adv_instance_timeout) {
2861		cancel_delayed_work_sync(&hdev->adv_instance_expire);
2862		hdev->adv_instance_timeout = 0;
2863	}
 
 
2864}
v5.14.15
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2014 Intel Corporation
   5
   6   This program is free software; you can redistribute it and/or modify
   7   it under the terms of the GNU General Public License version 2 as
   8   published by the Free Software Foundation;
   9
  10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  11   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  12   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  13   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  14   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  15   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18
  19   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  20   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  21   SOFTWARE IS DISCLAIMED.
  22*/
  23
  24#include <linux/sched/signal.h>
  25
  26#include <net/bluetooth/bluetooth.h>
  27#include <net/bluetooth/hci_core.h>
  28#include <net/bluetooth/mgmt.h>
  29
  30#include "smp.h"
  31#include "hci_request.h"
  32#include "msft.h"
  33
  34#define HCI_REQ_DONE	  0
  35#define HCI_REQ_PEND	  1
  36#define HCI_REQ_CANCELED  2
  37
  38void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
  39{
  40	skb_queue_head_init(&req->cmd_q);
  41	req->hdev = hdev;
  42	req->err = 0;
  43}
  44
  45void hci_req_purge(struct hci_request *req)
  46{
  47	skb_queue_purge(&req->cmd_q);
  48}
  49
  50bool hci_req_status_pend(struct hci_dev *hdev)
  51{
  52	return hdev->req_status == HCI_REQ_PEND;
  53}
  54
  55static int req_run(struct hci_request *req, hci_req_complete_t complete,
  56		   hci_req_complete_skb_t complete_skb)
  57{
  58	struct hci_dev *hdev = req->hdev;
  59	struct sk_buff *skb;
  60	unsigned long flags;
  61
  62	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
  63
  64	/* If an error occurred during request building, remove all HCI
  65	 * commands queued on the HCI request queue.
  66	 */
  67	if (req->err) {
  68		skb_queue_purge(&req->cmd_q);
  69		return req->err;
  70	}
  71
  72	/* Do not allow empty requests */
  73	if (skb_queue_empty(&req->cmd_q))
  74		return -ENODATA;
  75
  76	skb = skb_peek_tail(&req->cmd_q);
  77	if (complete) {
  78		bt_cb(skb)->hci.req_complete = complete;
  79	} else if (complete_skb) {
  80		bt_cb(skb)->hci.req_complete_skb = complete_skb;
  81		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
  82	}
  83
  84	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
  85	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
  86	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
  87
  88	queue_work(hdev->workqueue, &hdev->cmd_work);
  89
  90	return 0;
  91}
  92
  93int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
  94{
  95	return req_run(req, complete, NULL);
  96}
  97
  98int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
  99{
 100	return req_run(req, NULL, complete);
 101}
 102
 103static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
 104				  struct sk_buff *skb)
 105{
 106	bt_dev_dbg(hdev, "result 0x%2.2x", result);
 107
 108	if (hdev->req_status == HCI_REQ_PEND) {
 109		hdev->req_result = result;
 110		hdev->req_status = HCI_REQ_DONE;
 111		if (skb)
 112			hdev->req_skb = skb_get(skb);
 113		wake_up_interruptible(&hdev->req_wait_q);
 114	}
 115}
 116
 117void hci_req_sync_cancel(struct hci_dev *hdev, int err)
 118{
 119	bt_dev_dbg(hdev, "err 0x%2.2x", err);
 120
 121	if (hdev->req_status == HCI_REQ_PEND) {
 122		hdev->req_result = err;
 123		hdev->req_status = HCI_REQ_CANCELED;
 124		wake_up_interruptible(&hdev->req_wait_q);
 125	}
 126}
 127
 128struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
 129				  const void *param, u8 event, u32 timeout)
 130{
 131	struct hci_request req;
 132	struct sk_buff *skb;
 133	int err = 0;
 134
 135	bt_dev_dbg(hdev, "");
 136
 137	hci_req_init(&req, hdev);
 138
 139	hci_req_add_ev(&req, opcode, plen, param, event);
 140
 141	hdev->req_status = HCI_REQ_PEND;
 142
 143	err = hci_req_run_skb(&req, hci_req_sync_complete);
 144	if (err < 0)
 145		return ERR_PTR(err);
 146
 147	err = wait_event_interruptible_timeout(hdev->req_wait_q,
 148			hdev->req_status != HCI_REQ_PEND, timeout);
 149
 150	if (err == -ERESTARTSYS)
 151		return ERR_PTR(-EINTR);
 152
 153	switch (hdev->req_status) {
 154	case HCI_REQ_DONE:
 155		err = -bt_to_errno(hdev->req_result);
 156		break;
 157
 158	case HCI_REQ_CANCELED:
 159		err = -hdev->req_result;
 160		break;
 161
 162	default:
 163		err = -ETIMEDOUT;
 164		break;
 165	}
 166
 167	hdev->req_status = hdev->req_result = 0;
 168	skb = hdev->req_skb;
 169	hdev->req_skb = NULL;
 170
 171	bt_dev_dbg(hdev, "end: err %d", err);
 172
 173	if (err < 0) {
 174		kfree_skb(skb);
 175		return ERR_PTR(err);
 176	}
 177
 178	if (!skb)
 179		return ERR_PTR(-ENODATA);
 180
 181	return skb;
 182}
 183EXPORT_SYMBOL(__hci_cmd_sync_ev);
 184
 185struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
 186			       const void *param, u32 timeout)
 187{
 188	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
 189}
 190EXPORT_SYMBOL(__hci_cmd_sync);
 191
 192/* Execute request and wait for completion. */
 193int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
 194						     unsigned long opt),
 195		   unsigned long opt, u32 timeout, u8 *hci_status)
 196{
 197	struct hci_request req;
 198	int err = 0;
 199
 200	bt_dev_dbg(hdev, "start");
 201
 202	hci_req_init(&req, hdev);
 203
 204	hdev->req_status = HCI_REQ_PEND;
 205
 206	err = func(&req, opt);
 207	if (err) {
 208		if (hci_status)
 209			*hci_status = HCI_ERROR_UNSPECIFIED;
 210		return err;
 211	}
 212
 213	err = hci_req_run_skb(&req, hci_req_sync_complete);
 214	if (err < 0) {
 215		hdev->req_status = 0;
 216
 217		/* ENODATA means the HCI request command queue is empty.
 218		 * This can happen when a request with conditionals doesn't
 219		 * trigger any commands to be sent. This is normal behavior
 220		 * and should not trigger an error return.
 221		 */
 222		if (err == -ENODATA) {
 223			if (hci_status)
 224				*hci_status = 0;
 225			return 0;
 226		}
 227
 228		if (hci_status)
 229			*hci_status = HCI_ERROR_UNSPECIFIED;
 230
 231		return err;
 232	}
 233
 234	err = wait_event_interruptible_timeout(hdev->req_wait_q,
 235			hdev->req_status != HCI_REQ_PEND, timeout);
 236
 237	if (err == -ERESTARTSYS)
 238		return -EINTR;
 239
 240	switch (hdev->req_status) {
 241	case HCI_REQ_DONE:
 242		err = -bt_to_errno(hdev->req_result);
 243		if (hci_status)
 244			*hci_status = hdev->req_result;
 245		break;
 246
 247	case HCI_REQ_CANCELED:
 248		err = -hdev->req_result;
 249		if (hci_status)
 250			*hci_status = HCI_ERROR_UNSPECIFIED;
 251		break;
 252
 253	default:
 254		err = -ETIMEDOUT;
 255		if (hci_status)
 256			*hci_status = HCI_ERROR_UNSPECIFIED;
 257		break;
 258	}
 259
 260	kfree_skb(hdev->req_skb);
 261	hdev->req_skb = NULL;
 262	hdev->req_status = hdev->req_result = 0;
 263
 264	bt_dev_dbg(hdev, "end: err %d", err);
 265
 266	return err;
 267}
 268
 269int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
 270						  unsigned long opt),
 271		 unsigned long opt, u32 timeout, u8 *hci_status)
 272{
 273	int ret;
 274
 
 
 
 275	/* Serialize all requests */
 276	hci_req_sync_lock(hdev);
 277	/* check the state after obtaing the lock to protect the HCI_UP
 278	 * against any races from hci_dev_do_close when the controller
 279	 * gets removed.
 280	 */
 281	if (test_bit(HCI_UP, &hdev->flags))
 282		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
 283	else
 284		ret = -ENETDOWN;
 285	hci_req_sync_unlock(hdev);
 286
 287	return ret;
 288}
 289
 290struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
 291				const void *param)
 292{
 293	int len = HCI_COMMAND_HDR_SIZE + plen;
 294	struct hci_command_hdr *hdr;
 295	struct sk_buff *skb;
 296
 297	skb = bt_skb_alloc(len, GFP_ATOMIC);
 298	if (!skb)
 299		return NULL;
 300
 301	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
 302	hdr->opcode = cpu_to_le16(opcode);
 303	hdr->plen   = plen;
 304
 305	if (plen)
 306		skb_put_data(skb, param, plen);
 307
 308	bt_dev_dbg(hdev, "skb len %d", skb->len);
 309
 310	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
 311	hci_skb_opcode(skb) = opcode;
 312
 313	return skb;
 314}
 315
 316/* Queue a command to an asynchronous HCI request */
 317void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 318		    const void *param, u8 event)
 319{
 320	struct hci_dev *hdev = req->hdev;
 321	struct sk_buff *skb;
 322
 323	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
 324
 325	/* If an error occurred during request building, there is no point in
 326	 * queueing the HCI command. We can simply return.
 327	 */
 328	if (req->err)
 329		return;
 330
 331	skb = hci_prepare_cmd(hdev, opcode, plen, param);
 332	if (!skb) {
 333		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
 334			   opcode);
 335		req->err = -ENOMEM;
 336		return;
 337	}
 338
 339	if (skb_queue_empty(&req->cmd_q))
 340		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
 341
 342	bt_cb(skb)->hci.req_event = event;
 343
 344	skb_queue_tail(&req->cmd_q, skb);
 345}
 346
 347void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
 348		 const void *param)
 349{
 350	hci_req_add_ev(req, opcode, plen, param, 0);
 351}
 352
 353void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
 354{
 355	struct hci_dev *hdev = req->hdev;
 356	struct hci_cp_write_page_scan_activity acp;
 357	u8 type;
 358
 359	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 360		return;
 361
 362	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 363		return;
 364
 365	if (enable) {
 366		type = PAGE_SCAN_TYPE_INTERLACED;
 367
 368		/* 160 msec page scan interval */
 369		acp.interval = cpu_to_le16(0x0100);
 370	} else {
 371		type = hdev->def_page_scan_type;
 372		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
 
 
 373	}
 374
 375	acp.window = cpu_to_le16(hdev->def_page_scan_window);
 376
 377	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
 378	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
 379		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
 380			    sizeof(acp), &acp);
 381
 382	if (hdev->page_scan_type != type)
 383		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 384}
 385
 386static void start_interleave_scan(struct hci_dev *hdev)
 387{
 388	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
 389	queue_delayed_work(hdev->req_workqueue,
 390			   &hdev->interleave_scan, 0);
 391}
 392
 393static bool is_interleave_scanning(struct hci_dev *hdev)
 394{
 395	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
 396}
 397
 398static void cancel_interleave_scan(struct hci_dev *hdev)
 399{
 400	bt_dev_dbg(hdev, "cancelling interleave scan");
 401
 402	cancel_delayed_work_sync(&hdev->interleave_scan);
 403
 404	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
 405}
 406
 407/* Return true if interleave_scan wasn't started until exiting this function,
 408 * otherwise, return false
 409 */
 410static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
 411{
 412	/* Do interleaved scan only if all of the following are true:
 413	 * - There is at least one ADV monitor
 414	 * - At least one pending LE connection or one device to be scanned for
 415	 * - Monitor offloading is not supported
 416	 * If so, we should alternate between allowlist scan and one without
 417	 * any filters to save power.
 418	 */
 419	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
 420				!(list_empty(&hdev->pend_le_conns) &&
 421				  list_empty(&hdev->pend_le_reports)) &&
 422				hci_get_adv_monitor_offload_ext(hdev) ==
 423				    HCI_ADV_MONITOR_EXT_NONE;
 424	bool is_interleaving = is_interleave_scanning(hdev);
 425
 426	if (use_interleaving && !is_interleaving) {
 427		start_interleave_scan(hdev);
 428		bt_dev_dbg(hdev, "starting interleave scan");
 429		return true;
 430	}
 431
 432	if (!use_interleaving && is_interleaving)
 433		cancel_interleave_scan(hdev);
 434
 435	return false;
 436}
 437
 438/* This function controls the background scanning based on hdev->pend_le_conns
 439 * list. If there are pending LE connection we start the background scanning,
 440 * otherwise we stop it.
 441 *
 442 * This function requires the caller holds hdev->lock.
 443 */
 444static void __hci_update_background_scan(struct hci_request *req)
 445{
 446	struct hci_dev *hdev = req->hdev;
 447
 448	if (!test_bit(HCI_UP, &hdev->flags) ||
 449	    test_bit(HCI_INIT, &hdev->flags) ||
 450	    hci_dev_test_flag(hdev, HCI_SETUP) ||
 451	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 452	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
 453	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
 454		return;
 455
 456	/* No point in doing scanning if LE support hasn't been enabled */
 457	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 458		return;
 459
 460	/* If discovery is active don't interfere with it */
 461	if (hdev->discovery.state != DISCOVERY_STOPPED)
 462		return;
 463
 464	/* Reset RSSI and UUID filters when starting background scanning
 465	 * since these filters are meant for service discovery only.
 466	 *
 467	 * The Start Discovery and Start Service Discovery operations
 468	 * ensure to set proper values for RSSI threshold and UUID
 469	 * filter list. So it is safe to just reset them here.
 470	 */
 471	hci_discovery_filter_clear(hdev);
 472
 473	bt_dev_dbg(hdev, "ADV monitoring is %s",
 474		   hci_is_adv_monitoring(hdev) ? "on" : "off");
 475
 476	if (list_empty(&hdev->pend_le_conns) &&
 477	    list_empty(&hdev->pend_le_reports) &&
 478	    !hci_is_adv_monitoring(hdev)) {
 479		/* If there is no pending LE connections or devices
 480		 * to be scanned for or no ADV monitors, we should stop the
 481		 * background scanning.
 482		 */
 483
 484		/* If controller is not scanning we are done. */
 485		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
 486			return;
 487
 488		hci_req_add_le_scan_disable(req, false);
 489
 490		bt_dev_dbg(hdev, "stopping background scanning");
 491	} else {
 492		/* If there is at least one pending LE connection, we should
 493		 * keep the background scan running.
 494		 */
 495
 496		/* If controller is connecting, we should not start scanning
 497		 * since some controllers are not able to scan and connect at
 498		 * the same time.
 499		 */
 500		if (hci_lookup_le_connect(hdev))
 501			return;
 502
 503		/* If controller is currently scanning, we stop it to ensure we
 504		 * don't miss any advertising (due to duplicates filter).
 505		 */
 506		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
 507			hci_req_add_le_scan_disable(req, false);
 508
 509		hci_req_add_le_passive_scan(req);
 510		bt_dev_dbg(hdev, "starting background scanning");
 
 511	}
 512}
 513
 514void __hci_req_update_name(struct hci_request *req)
 515{
 516	struct hci_dev *hdev = req->hdev;
 517	struct hci_cp_write_local_name cp;
 518
 519	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
 520
 521	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
 522}
 523
 524#define PNP_INFO_SVCLASS_ID		0x1200
 525
 526static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 527{
 528	u8 *ptr = data, *uuids_start = NULL;
 529	struct bt_uuid *uuid;
 530
 531	if (len < 4)
 532		return ptr;
 533
 534	list_for_each_entry(uuid, &hdev->uuids, list) {
 535		u16 uuid16;
 536
 537		if (uuid->size != 16)
 538			continue;
 539
 540		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
 541		if (uuid16 < 0x1100)
 542			continue;
 543
 544		if (uuid16 == PNP_INFO_SVCLASS_ID)
 545			continue;
 546
 547		if (!uuids_start) {
 548			uuids_start = ptr;
 549			uuids_start[0] = 1;
 550			uuids_start[1] = EIR_UUID16_ALL;
 551			ptr += 2;
 552		}
 553
 554		/* Stop if not enough space to put next UUID */
 555		if ((ptr - data) + sizeof(u16) > len) {
 556			uuids_start[1] = EIR_UUID16_SOME;
 557			break;
 558		}
 559
 560		*ptr++ = (uuid16 & 0x00ff);
 561		*ptr++ = (uuid16 & 0xff00) >> 8;
 562		uuids_start[0] += sizeof(uuid16);
 563	}
 564
 565	return ptr;
 566}
 567
 568static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 569{
 570	u8 *ptr = data, *uuids_start = NULL;
 571	struct bt_uuid *uuid;
 572
 573	if (len < 6)
 574		return ptr;
 575
 576	list_for_each_entry(uuid, &hdev->uuids, list) {
 577		if (uuid->size != 32)
 578			continue;
 579
 580		if (!uuids_start) {
 581			uuids_start = ptr;
 582			uuids_start[0] = 1;
 583			uuids_start[1] = EIR_UUID32_ALL;
 584			ptr += 2;
 585		}
 586
 587		/* Stop if not enough space to put next UUID */
 588		if ((ptr - data) + sizeof(u32) > len) {
 589			uuids_start[1] = EIR_UUID32_SOME;
 590			break;
 591		}
 592
 593		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
 594		ptr += sizeof(u32);
 595		uuids_start[0] += sizeof(u32);
 596	}
 597
 598	return ptr;
 599}
 600
 601static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 602{
 603	u8 *ptr = data, *uuids_start = NULL;
 604	struct bt_uuid *uuid;
 605
 606	if (len < 18)
 607		return ptr;
 608
 609	list_for_each_entry(uuid, &hdev->uuids, list) {
 610		if (uuid->size != 128)
 611			continue;
 612
 613		if (!uuids_start) {
 614			uuids_start = ptr;
 615			uuids_start[0] = 1;
 616			uuids_start[1] = EIR_UUID128_ALL;
 617			ptr += 2;
 618		}
 619
 620		/* Stop if not enough space to put next UUID */
 621		if ((ptr - data) + 16 > len) {
 622			uuids_start[1] = EIR_UUID128_SOME;
 623			break;
 624		}
 625
 626		memcpy(ptr, uuid->uuid, 16);
 627		ptr += 16;
 628		uuids_start[0] += 16;
 629	}
 630
 631	return ptr;
 632}
 633
 634static void create_eir(struct hci_dev *hdev, u8 *data)
 635{
 636	u8 *ptr = data;
 637	size_t name_len;
 638
 639	name_len = strlen(hdev->dev_name);
 640
 641	if (name_len > 0) {
 642		/* EIR Data type */
 643		if (name_len > 48) {
 644			name_len = 48;
 645			ptr[1] = EIR_NAME_SHORT;
 646		} else
 647			ptr[1] = EIR_NAME_COMPLETE;
 648
 649		/* EIR Data length */
 650		ptr[0] = name_len + 1;
 651
 652		memcpy(ptr + 2, hdev->dev_name, name_len);
 653
 654		ptr += (name_len + 2);
 655	}
 656
 657	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
 658		ptr[0] = 2;
 659		ptr[1] = EIR_TX_POWER;
 660		ptr[2] = (u8) hdev->inq_tx_power;
 661
 662		ptr += 3;
 663	}
 664
 665	if (hdev->devid_source > 0) {
 666		ptr[0] = 9;
 667		ptr[1] = EIR_DEVICE_ID;
 668
 669		put_unaligned_le16(hdev->devid_source, ptr + 2);
 670		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
 671		put_unaligned_le16(hdev->devid_product, ptr + 6);
 672		put_unaligned_le16(hdev->devid_version, ptr + 8);
 673
 674		ptr += 10;
 675	}
 676
 677	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 678	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 679	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 680}
 681
 682void __hci_req_update_eir(struct hci_request *req)
 683{
 684	struct hci_dev *hdev = req->hdev;
 685	struct hci_cp_write_eir cp;
 686
 687	if (!hdev_is_powered(hdev))
 688		return;
 689
 690	if (!lmp_ext_inq_capable(hdev))
 691		return;
 692
 693	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 694		return;
 695
 696	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
 697		return;
 698
 699	memset(&cp, 0, sizeof(cp));
 700
 701	create_eir(hdev, cp.data);
 702
 703	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
 704		return;
 705
 706	memcpy(hdev->eir, cp.data, sizeof(cp.data));
 707
 708	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 709}
 710
 711void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
 712{
 713	struct hci_dev *hdev = req->hdev;
 714
 715	if (hdev->scanning_paused) {
 716		bt_dev_dbg(hdev, "Scanning is paused for suspend");
 717		return;
 718	}
 719
 720	if (hdev->suspended)
 721		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
 722
 723	if (use_ext_scan(hdev)) {
 724		struct hci_cp_le_set_ext_scan_enable cp;
 725
 726		memset(&cp, 0, sizeof(cp));
 727		cp.enable = LE_SCAN_DISABLE;
 728		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
 729			    &cp);
 730	} else {
 731		struct hci_cp_le_set_scan_enable cp;
 732
 733		memset(&cp, 0, sizeof(cp));
 734		cp.enable = LE_SCAN_DISABLE;
 735		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 736	}
 737
 738	/* Disable address resolution */
 739	if (use_ll_privacy(hdev) &&
 740	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 741	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
 742		__u8 enable = 0x00;
 743
 744		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
 745	}
 746}
 747
 748static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
 749				 u8 bdaddr_type)
 750{
 751	struct hci_cp_le_del_from_accept_list cp;
 752
 753	cp.bdaddr_type = bdaddr_type;
 754	bacpy(&cp.bdaddr, bdaddr);
 755
 756	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
 757		   cp.bdaddr_type);
 758	hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
 759
 760	if (use_ll_privacy(req->hdev) &&
 761	    hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
 762		struct smp_irk *irk;
 763
 764		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
 765		if (irk) {
 766			struct hci_cp_le_del_from_resolv_list cp;
 767
 768			cp.bdaddr_type = bdaddr_type;
 769			bacpy(&cp.bdaddr, bdaddr);
 770
 771			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
 772				    sizeof(cp), &cp);
 773		}
 774	}
 775}
 776
 777/* Adds connection to accept list if needed. On error, returns -1. */
 778static int add_to_accept_list(struct hci_request *req,
 779			      struct hci_conn_params *params, u8 *num_entries,
 780			      bool allow_rpa)
 781{
 782	struct hci_cp_le_add_to_accept_list cp;
 783	struct hci_dev *hdev = req->hdev;
 784
 785	/* Already in accept list */
 786	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
 787				   params->addr_type))
 788		return 0;
 789
 790	/* Select filter policy to accept all advertising */
 791	if (*num_entries >= hdev->le_accept_list_size)
 792		return -1;
 793
 794	/* Accept list can not be used with RPAs */
 795	if (!allow_rpa &&
 796	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 797	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
 798		return -1;
 799	}
 800
 801	/* During suspend, only wakeable devices can be in accept list */
 802	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
 803						   params->current_flags))
 804		return 0;
 805
 806	*num_entries += 1;
 807	cp.bdaddr_type = params->addr_type;
 808	bacpy(&cp.bdaddr, &params->addr);
 809
 810	bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
 811		   cp.bdaddr_type);
 812	hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
 813
 814	if (use_ll_privacy(hdev) &&
 815	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
 816		struct smp_irk *irk;
 817
 818		irk = hci_find_irk_by_addr(hdev, &params->addr,
 819					   params->addr_type);
 820		if (irk) {
 821			struct hci_cp_le_add_to_resolv_list cp;
 822
 823			cp.bdaddr_type = params->addr_type;
 824			bacpy(&cp.bdaddr, &params->addr);
 825			memcpy(cp.peer_irk, irk->val, 16);
 826
 827			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
 828				memcpy(cp.local_irk, hdev->irk, 16);
 829			else
 830				memset(cp.local_irk, 0, 16);
 831
 832			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
 833				    sizeof(cp), &cp);
 834		}
 835	}
 836
 837	return 0;
 838}
 839
 840static u8 update_accept_list(struct hci_request *req)
 841{
 842	struct hci_dev *hdev = req->hdev;
 843	struct hci_conn_params *params;
 844	struct bdaddr_list *b;
 845	u8 num_entries = 0;
 846	bool pend_conn, pend_report;
 847	/* We allow usage of accept list even with RPAs in suspend. In the worst
 848	 * case, we won't be able to wake from devices that use the privacy1.2
 849	 * features. Additionally, once we support privacy1.2 and IRK
 850	 * offloading, we can update this to also check for those conditions.
 851	 */
 852	bool allow_rpa = hdev->suspended;
 853
 854	if (use_ll_privacy(hdev) &&
 855	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
 856		allow_rpa = true;
 857
 858	/* Go through the current accept list programmed into the
 859	 * controller one by one and check if that address is still
 860	 * in the list of pending connections or list of devices to
 861	 * report. If not present in either list, then queue the
 862	 * command to remove it from the controller.
 863	 */
 864	list_for_each_entry(b, &hdev->le_accept_list, list) {
 865		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
 866						      &b->bdaddr,
 867						      b->bdaddr_type);
 868		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
 869							&b->bdaddr,
 870							b->bdaddr_type);
 
 
 
 
 
 871
 872		/* If the device is not likely to connect or report,
 873		 * remove it from the accept list.
 874		 */
 875		if (!pend_conn && !pend_report) {
 876			del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
 877			continue;
 878		}
 879
 880		/* Accept list can not be used with RPAs */
 881		if (!allow_rpa &&
 882		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 883		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
 884			return 0x00;
 885		}
 886
 887		num_entries++;
 888	}
 889
 890	/* Since all no longer valid accept list entries have been
 891	 * removed, walk through the list of pending connections
 892	 * and ensure that any new device gets programmed into
 893	 * the controller.
 894	 *
 895	 * If the list of the devices is larger than the list of
 896	 * available accept list entries in the controller, then
 897	 * just abort and return filer policy value to not use the
 898	 * accept list.
 899	 */
 900	list_for_each_entry(params, &hdev->pend_le_conns, action) {
 901		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
 
 
 
 
 
 
 
 
 
 
 
 902			return 0x00;
 
 
 
 
 903	}
 904
 905	/* After adding all new pending connections, walk through
 906	 * the list of pending reports and also add these to the
 907	 * accept list if there is still space. Abort if space runs out.
 908	 */
 909	list_for_each_entry(params, &hdev->pend_le_reports, action) {
 910		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
 
 
 
 
 
 
 
 
 
 
 
 911			return 0x00;
 
 
 
 
 912	}
 913
 914	/* Use the allowlist unless the following conditions are all true:
 915	 * - We are not currently suspending
 916	 * - There are 1 or more ADV monitors registered and it's not offloaded
 917	 * - Interleaved scanning is not currently using the allowlist
 918	 */
 919	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
 920	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
 921	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
 922		return 0x00;
 923
 924	/* Select filter policy to use accept list */
 925	return 0x01;
 926}
 927
 928static bool scan_use_rpa(struct hci_dev *hdev)
 929{
 930	return hci_dev_test_flag(hdev, HCI_PRIVACY);
 931}
 932
 933static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
 934			       u16 window, u8 own_addr_type, u8 filter_policy,
 935			       bool filter_dup, bool addr_resolv)
 936{
 937	struct hci_dev *hdev = req->hdev;
 938
 939	if (hdev->scanning_paused) {
 940		bt_dev_dbg(hdev, "Scanning is paused for suspend");
 941		return;
 942	}
 943
 944	if (use_ll_privacy(hdev) &&
 945	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 946	    addr_resolv) {
 947		u8 enable = 0x01;
 948
 949		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
 950	}
 951
 952	/* Use ext scanning if set ext scan param and ext scan enable is
 953	 * supported
 954	 */
 955	if (use_ext_scan(hdev)) {
 956		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
 957		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
 958		struct hci_cp_le_scan_phy_params *phy_params;
 959		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
 960		u32 plen;
 961
 962		ext_param_cp = (void *)data;
 963		phy_params = (void *)ext_param_cp->data;
 964
 965		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
 966		ext_param_cp->own_addr_type = own_addr_type;
 967		ext_param_cp->filter_policy = filter_policy;
 968
 969		plen = sizeof(*ext_param_cp);
 970
 971		if (scan_1m(hdev) || scan_2m(hdev)) {
 972			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
 973
 974			memset(phy_params, 0, sizeof(*phy_params));
 975			phy_params->type = type;
 976			phy_params->interval = cpu_to_le16(interval);
 977			phy_params->window = cpu_to_le16(window);
 978
 979			plen += sizeof(*phy_params);
 980			phy_params++;
 981		}
 982
 983		if (scan_coded(hdev)) {
 984			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
 985
 986			memset(phy_params, 0, sizeof(*phy_params));
 987			phy_params->type = type;
 988			phy_params->interval = cpu_to_le16(interval);
 989			phy_params->window = cpu_to_le16(window);
 990
 991			plen += sizeof(*phy_params);
 992			phy_params++;
 993		}
 994
 995		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
 996			    plen, ext_param_cp);
 997
 998		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
 999		ext_enable_cp.enable = LE_SCAN_ENABLE;
1000		ext_enable_cp.filter_dup = filter_dup;
1001
1002		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1003			    sizeof(ext_enable_cp), &ext_enable_cp);
1004	} else {
1005		struct hci_cp_le_set_scan_param param_cp;
1006		struct hci_cp_le_set_scan_enable enable_cp;
1007
1008		memset(&param_cp, 0, sizeof(param_cp));
1009		param_cp.type = type;
1010		param_cp.interval = cpu_to_le16(interval);
1011		param_cp.window = cpu_to_le16(window);
1012		param_cp.own_address_type = own_addr_type;
1013		param_cp.filter_policy = filter_policy;
1014		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1015			    &param_cp);
1016
1017		memset(&enable_cp, 0, sizeof(enable_cp));
1018		enable_cp.enable = LE_SCAN_ENABLE;
1019		enable_cp.filter_dup = filter_dup;
1020		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1021			    &enable_cp);
1022	}
1023}
1024
1025/* Returns true if an le connection is in the scanning state */
1026static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1027{
1028	struct hci_conn_hash *h = &hdev->conn_hash;
1029	struct hci_conn  *c;
1030
1031	rcu_read_lock();
1032
1033	list_for_each_entry_rcu(c, &h->list, list) {
1034		if (c->type == LE_LINK && c->state == BT_CONNECT &&
1035		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
1036			rcu_read_unlock();
1037			return true;
1038		}
1039	}
1040
1041	rcu_read_unlock();
1042
1043	return false;
1044}
1045
1046/* Ensure to call hci_req_add_le_scan_disable() first to disable the
1047 * controller based address resolution to be able to reconfigure
1048 * resolving list.
1049 */
1050void hci_req_add_le_passive_scan(struct hci_request *req)
1051{
1052	struct hci_dev *hdev = req->hdev;
1053	u8 own_addr_type;
1054	u8 filter_policy;
1055	u16 window, interval;
1056	/* Default is to enable duplicates filter */
1057	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1058	/* Background scanning should run with address resolution */
1059	bool addr_resolv = true;
1060
1061	if (hdev->scanning_paused) {
1062		bt_dev_dbg(hdev, "Scanning is paused for suspend");
1063		return;
1064	}
1065
1066	/* Set require_privacy to false since no SCAN_REQ are send
1067	 * during passive scanning. Not using an non-resolvable address
1068	 * here is important so that peer devices using direct
1069	 * advertising with our address will be correctly reported
1070	 * by the controller.
1071	 */
1072	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1073				      &own_addr_type))
1074		return;
1075
1076	if (hdev->enable_advmon_interleave_scan &&
1077	    __hci_update_interleaved_scan(hdev))
1078		return;
1079
1080	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1081	/* Adding or removing entries from the accept list must
1082	 * happen before enabling scanning. The controller does
1083	 * not allow accept list modification while scanning.
1084	 */
1085	filter_policy = update_accept_list(req);
1086
1087	/* When the controller is using random resolvable addresses and
1088	 * with that having LE privacy enabled, then controllers with
1089	 * Extended Scanner Filter Policies support can now enable support
1090	 * for handling directed advertising.
1091	 *
1092	 * So instead of using filter polices 0x00 (no accept list)
1093	 * and 0x01 (accept list enabled) use the new filter policies
1094	 * 0x02 (no accept list) and 0x03 (accept list enabled).
1095	 */
1096	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1097	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1098		filter_policy |= 0x02;
1099
1100	if (hdev->suspended) {
1101		window = hdev->le_scan_window_suspend;
1102		interval = hdev->le_scan_int_suspend;
1103
1104		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1105	} else if (hci_is_le_conn_scanning(hdev)) {
1106		window = hdev->le_scan_window_connect;
1107		interval = hdev->le_scan_int_connect;
1108	} else if (hci_is_adv_monitoring(hdev)) {
1109		window = hdev->le_scan_window_adv_monitor;
1110		interval = hdev->le_scan_int_adv_monitor;
1111
1112		/* Disable duplicates filter when scanning for advertisement
1113		 * monitor for the following reasons.
1114		 *
1115		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
1116		 * controllers ignore RSSI_Sampling_Period when the duplicates
1117		 * filter is enabled.
1118		 *
1119		 * For SW pattern filtering, when we're not doing interleaved
1120		 * scanning, it is necessary to disable duplicates filter,
1121		 * otherwise hosts can only receive one advertisement and it's
1122		 * impossible to know if a peer is still in range.
1123		 */
1124		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
1125	} else {
1126		window = hdev->le_scan_window;
1127		interval = hdev->le_scan_interval;
1128	}
1129
1130	bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1131		   filter_policy);
1132	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1133			   own_addr_type, filter_policy, filter_dup,
1134			   addr_resolv);
1135}
1136
1137static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1138{
1139	struct adv_info *adv_instance;
1140
1141	/* Instance 0x00 always set local name */
1142	if (instance == 0x00)
1143		return true;
1144
1145	adv_instance = hci_find_adv_instance(hdev, instance);
1146	if (!adv_instance)
1147		return false;
1148
1149	if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1150	    adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1151		return true;
1152
1153	return adv_instance->scan_rsp_len ? true : false;
1154}
1155
1156static void hci_req_clear_event_filter(struct hci_request *req)
1157{
1158	struct hci_cp_set_event_filter f;
 
1159
1160	if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1161		return;
 
1162
1163	if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1164		memset(&f, 0, sizeof(f));
1165		f.flt_type = HCI_FLT_CLEAR_ALL;
1166		hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1167	}
1168}
1169
1170static void hci_req_set_event_filter(struct hci_request *req)
1171{
1172	struct bdaddr_list_with_flags *b;
1173	struct hci_cp_set_event_filter f;
1174	struct hci_dev *hdev = req->hdev;
1175	u8 scan = SCAN_DISABLED;
1176	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1177
1178	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179		return;
1180
1181	/* Always clear event filter when starting */
1182	hci_req_clear_event_filter(req);
1183
1184	list_for_each_entry(b, &hdev->accept_list, list) {
1185		if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1186					b->current_flags))
1187			continue;
1188
1189		memset(&f, 0, sizeof(f));
1190		bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1191		f.flt_type = HCI_FLT_CONN_SETUP;
1192		f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1193		f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1194
1195		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1196		hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1197		scan = SCAN_PAGE;
1198	}
1199
1200	if (scan && !scanning) {
1201		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1202		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1203	} else if (!scan && scanning) {
1204		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1205		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1206	}
1207}
1208
1209static void cancel_adv_timeout(struct hci_dev *hdev)
1210{
1211	if (hdev->adv_instance_timeout) {
1212		hdev->adv_instance_timeout = 0;
1213		cancel_delayed_work(&hdev->adv_instance_expire);
1214	}
1215}
1216
1217/* This function requires the caller holds hdev->lock */
1218void __hci_req_pause_adv_instances(struct hci_request *req)
1219{
1220	bt_dev_dbg(req->hdev, "Pausing advertising instances");
1221
1222	/* Call to disable any advertisements active on the controller.
1223	 * This will succeed even if no advertisements are configured.
1224	 */
1225	__hci_req_disable_advertising(req);
1226
1227	/* If we are using software rotation, pause the loop */
1228	if (!ext_adv_capable(req->hdev))
1229		cancel_adv_timeout(req->hdev);
1230}
1231
1232/* This function requires the caller holds hdev->lock */
1233static void __hci_req_resume_adv_instances(struct hci_request *req)
1234{
1235	struct adv_info *adv;
1236
1237	bt_dev_dbg(req->hdev, "Resuming advertising instances");
1238
1239	if (ext_adv_capable(req->hdev)) {
1240		/* Call for each tracked instance to be re-enabled */
1241		list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1242			__hci_req_enable_ext_advertising(req,
1243							 adv->instance);
1244		}
1245
1246	} else {
1247		/* Schedule for most recent instance to be restarted and begin
1248		 * the software rotation loop
1249		 */
1250		__hci_req_schedule_adv_instance(req,
1251						req->hdev->cur_adv_instance,
1252						true);
1253	}
1254}
1255
1256/* This function requires the caller holds hdev->lock */
1257int hci_req_resume_adv_instances(struct hci_dev *hdev)
1258{
1259	struct hci_request req;
1260
1261	hci_req_init(&req, hdev);
1262	__hci_req_resume_adv_instances(&req);
1263
1264	return hci_req_run(&req, NULL);
1265}
1266
1267static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1268{
1269	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1270		   status);
1271	if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1272	    test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1273		clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1274		clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1275		wake_up(&hdev->suspend_wait_q);
1276	}
1277
1278	if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1279		clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1280		wake_up(&hdev->suspend_wait_q);
1281	}
1282}
1283
1284static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1285					      bool enable)
1286{
1287	struct hci_dev *hdev = req->hdev;
1288
1289	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1290	case HCI_ADV_MONITOR_EXT_MSFT:
1291		msft_req_add_set_filter_enable(req, enable);
1292		break;
1293	default:
1294		return;
1295	}
1296
1297	/* No need to block when enabling since it's on resume path */
1298	if (hdev->suspended && !enable)
1299		set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1300}
1301
1302/* Call with hci_dev_lock */
1303void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1304{
1305	int old_state;
1306	struct hci_conn *conn;
1307	struct hci_request req;
1308	u8 page_scan;
1309	int disconnect_counter;
1310
1311	if (next == hdev->suspend_state) {
1312		bt_dev_dbg(hdev, "Same state before and after: %d", next);
1313		goto done;
1314	}
1315
1316	hdev->suspend_state = next;
1317	hci_req_init(&req, hdev);
1318
1319	if (next == BT_SUSPEND_DISCONNECT) {
1320		/* Mark device as suspended */
1321		hdev->suspended = true;
1322
1323		/* Pause discovery if not already stopped */
1324		old_state = hdev->discovery.state;
1325		if (old_state != DISCOVERY_STOPPED) {
1326			set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1327			hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1328			queue_work(hdev->req_workqueue, &hdev->discov_update);
1329		}
1330
1331		hdev->discovery_paused = true;
1332		hdev->discovery_old_state = old_state;
1333
1334		/* Stop directed advertising */
1335		old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1336		if (old_state) {
1337			set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1338			cancel_delayed_work(&hdev->discov_off);
1339			queue_delayed_work(hdev->req_workqueue,
1340					   &hdev->discov_off, 0);
1341		}
1342
1343		/* Pause other advertisements */
1344		if (hdev->adv_instance_cnt)
1345			__hci_req_pause_adv_instances(&req);
1346
1347		hdev->advertising_paused = true;
1348		hdev->advertising_old_state = old_state;
1349
1350		/* Disable page scan if enabled */
1351		if (test_bit(HCI_PSCAN, &hdev->flags)) {
1352			page_scan = SCAN_DISABLED;
1353			hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1354				    &page_scan);
1355			set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1356		}
1357
1358		/* Disable LE passive scan if enabled */
1359		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1360			cancel_interleave_scan(hdev);
1361			hci_req_add_le_scan_disable(&req, false);
1362		}
1363
1364		/* Disable advertisement filters */
1365		hci_req_add_set_adv_filter_enable(&req, false);
1366
1367		/* Prevent disconnects from causing scanning to be re-enabled */
1368		hdev->scanning_paused = true;
1369
1370		/* Run commands before disconnecting */
1371		hci_req_run(&req, suspend_req_complete);
1372
1373		disconnect_counter = 0;
1374		/* Soft disconnect everything (power off) */
1375		list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1376			hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1377			disconnect_counter++;
1378		}
1379
1380		if (disconnect_counter > 0) {
1381			bt_dev_dbg(hdev,
1382				   "Had %d disconnects. Will wait on them",
1383				   disconnect_counter);
1384			set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1385		}
1386	} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1387		/* Unpause to take care of updating scanning params */
1388		hdev->scanning_paused = false;
1389		/* Enable event filter for paired devices */
1390		hci_req_set_event_filter(&req);
1391		/* Enable passive scan at lower duty cycle */
1392		__hci_update_background_scan(&req);
1393		/* Pause scan changes again. */
1394		hdev->scanning_paused = true;
1395		hci_req_run(&req, suspend_req_complete);
1396	} else {
1397		hdev->suspended = false;
1398		hdev->scanning_paused = false;
1399
1400		/* Clear any event filters and restore scan state */
1401		hci_req_clear_event_filter(&req);
1402		__hci_req_update_scan(&req);
1403
1404		/* Reset passive/background scanning to normal */
1405		__hci_update_background_scan(&req);
1406		/* Enable all of the advertisement filters */
1407		hci_req_add_set_adv_filter_enable(&req, true);
1408
1409		/* Unpause directed advertising */
1410		hdev->advertising_paused = false;
1411		if (hdev->advertising_old_state) {
1412			set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1413				hdev->suspend_tasks);
1414			hci_dev_set_flag(hdev, HCI_ADVERTISING);
1415			queue_work(hdev->req_workqueue,
1416				   &hdev->discoverable_update);
1417			hdev->advertising_old_state = 0;
1418		}
1419
1420		/* Resume other advertisements */
1421		if (hdev->adv_instance_cnt)
1422			__hci_req_resume_adv_instances(&req);
1423
1424		/* Unpause discovery */
1425		hdev->discovery_paused = false;
1426		if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1427		    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1428			set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1429			hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1430			queue_work(hdev->req_workqueue, &hdev->discov_update);
1431		}
1432
1433		hci_req_run(&req, suspend_req_complete);
1434	}
1435
1436	hdev->suspend_state = next;
1437
1438done:
1439	clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1440	wake_up(&hdev->suspend_wait_q);
1441}
1442
1443static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1444{
1445	return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1446}
1447
1448void __hci_req_disable_advertising(struct hci_request *req)
1449{
1450	if (ext_adv_capable(req->hdev)) {
1451		__hci_req_disable_ext_adv_instance(req, 0x00);
1452
 
1453	} else {
1454		u8 enable = 0x00;
1455
1456		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1457	}
1458}
1459
1460static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1461{
1462	u32 flags;
1463	struct adv_info *adv_instance;
1464
1465	if (instance == 0x00) {
1466		/* Instance 0 always manages the "Tx Power" and "Flags"
1467		 * fields
1468		 */
1469		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1470
1471		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1472		 * corresponds to the "connectable" instance flag.
1473		 */
1474		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1475			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1476
1477		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1478			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1479		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1480			flags |= MGMT_ADV_FLAG_DISCOV;
1481
1482		return flags;
1483	}
1484
1485	adv_instance = hci_find_adv_instance(hdev, instance);
1486
1487	/* Return 0 when we got an invalid instance identifier. */
1488	if (!adv_instance)
1489		return 0;
1490
1491	return adv_instance->flags;
1492}
1493
1494static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1495{
1496	/* If privacy is not enabled don't use RPA */
1497	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1498		return false;
1499
1500	/* If basic privacy mode is enabled use RPA */
1501	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1502		return true;
1503
1504	/* If limited privacy mode is enabled don't use RPA if we're
1505	 * both discoverable and bondable.
1506	 */
1507	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1508	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1509		return false;
1510
1511	/* We're neither bondable nor discoverable in the limited
1512	 * privacy mode, therefore use RPA.
1513	 */
1514	return true;
1515}
1516
1517static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1518{
1519	/* If there is no connection we are OK to advertise. */
1520	if (hci_conn_num(hdev, LE_LINK) == 0)
1521		return true;
1522
1523	/* Check le_states if there is any connection in peripheral role. */
1524	if (hdev->conn_hash.le_num_peripheral > 0) {
1525		/* Peripheral connection state and non connectable mode bit 20.
1526		 */
1527		if (!connectable && !(hdev->le_states[2] & 0x10))
1528			return false;
1529
1530		/* Peripheral connection state and connectable mode bit 38
1531		 * and scannable bit 21.
1532		 */
1533		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1534				    !(hdev->le_states[2] & 0x20)))
1535			return false;
1536	}
1537
1538	/* Check le_states if there is any connection in central role. */
1539	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1540		/* Central connection state and non connectable mode bit 18. */
1541		if (!connectable && !(hdev->le_states[2] & 0x02))
1542			return false;
1543
1544		/* Central connection state and connectable mode bit 35 and
1545		 * scannable 19.
1546		 */
1547		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1548				    !(hdev->le_states[2] & 0x08)))
1549			return false;
1550	}
1551
1552	return true;
1553}
1554
1555void __hci_req_enable_advertising(struct hci_request *req)
1556{
1557	struct hci_dev *hdev = req->hdev;
1558	struct adv_info *adv_instance;
1559	struct hci_cp_le_set_adv_param cp;
1560	u8 own_addr_type, enable = 0x01;
1561	bool connectable;
1562	u16 adv_min_interval, adv_max_interval;
1563	u32 flags;
1564
1565	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1566	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1567
1568	/* If the "connectable" instance flag was not set, then choose between
1569	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1570	 */
1571	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1572		      mgmt_get_connectable(hdev);
1573
1574	if (!is_advertising_allowed(hdev, connectable))
1575		return;
1576
1577	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1578		__hci_req_disable_advertising(req);
1579
1580	/* Clear the HCI_LE_ADV bit temporarily so that the
1581	 * hci_update_random_address knows that it's safe to go ahead
1582	 * and write a new random address. The flag will be set back on
1583	 * as soon as the SET_ADV_ENABLE HCI command completes.
1584	 */
1585	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1586
1587	/* Set require_privacy to true only when non-connectable
1588	 * advertising is used. In that case it is fine to use a
1589	 * non-resolvable private address.
1590	 */
1591	if (hci_update_random_address(req, !connectable,
1592				      adv_use_rpa(hdev, flags),
1593				      &own_addr_type) < 0)
1594		return;
1595
1596	memset(&cp, 0, sizeof(cp));
1597
1598	if (adv_instance) {
1599		adv_min_interval = adv_instance->min_interval;
1600		adv_max_interval = adv_instance->max_interval;
1601	} else {
1602		adv_min_interval = hdev->le_adv_min_interval;
1603		adv_max_interval = hdev->le_adv_max_interval;
1604	}
1605
1606	if (connectable) {
1607		cp.type = LE_ADV_IND;
1608	} else {
1609		if (adv_cur_instance_is_scannable(hdev))
1610			cp.type = LE_ADV_SCAN_IND;
1611		else
1612			cp.type = LE_ADV_NONCONN_IND;
1613
1614		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1615		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1616			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1617			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
 
 
 
1618		}
1619	}
1620
1621	cp.min_interval = cpu_to_le16(adv_min_interval);
1622	cp.max_interval = cpu_to_le16(adv_max_interval);
1623	cp.own_address_type = own_addr_type;
1624	cp.channel_map = hdev->le_adv_channel_map;
1625
1626	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1627
1628	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1629}
1630
1631u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1632{
1633	size_t short_len;
1634	size_t complete_len;
1635
1636	/* no space left for name (+ NULL + type + len) */
1637	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1638		return ad_len;
1639
1640	/* use complete name if present and fits */
1641	complete_len = strlen(hdev->dev_name);
1642	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1643		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1644				       hdev->dev_name, complete_len + 1);
1645
1646	/* use short name if present */
1647	short_len = strlen(hdev->short_name);
1648	if (short_len)
1649		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1650				       hdev->short_name, short_len + 1);
1651
1652	/* use shortened full name if present, we already know that name
1653	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1654	 */
1655	if (complete_len) {
1656		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1657
1658		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1659		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1660
1661		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1662				       sizeof(name));
1663	}
1664
1665	return ad_len;
1666}
1667
1668static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1669{
1670	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1671}
1672
1673static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1674{
1675	u8 scan_rsp_len = 0;
1676
1677	if (hdev->appearance)
1678		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
 
1679
1680	return append_local_name(hdev, ptr, scan_rsp_len);
1681}
1682
1683static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1684					u8 *ptr)
1685{
1686	struct adv_info *adv_instance;
1687	u32 instance_flags;
1688	u8 scan_rsp_len = 0;
1689
1690	adv_instance = hci_find_adv_instance(hdev, instance);
1691	if (!adv_instance)
1692		return 0;
1693
1694	instance_flags = adv_instance->flags;
1695
1696	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1697		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
 
1698
1699	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1700	       adv_instance->scan_rsp_len);
1701
1702	scan_rsp_len += adv_instance->scan_rsp_len;
1703
1704	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1705		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1706
1707	return scan_rsp_len;
1708}
1709
1710void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1711{
1712	struct hci_dev *hdev = req->hdev;
1713	u8 len;
1714
1715	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1716		return;
1717
1718	if (ext_adv_capable(hdev)) {
1719		struct {
1720			struct hci_cp_le_set_ext_scan_rsp_data cp;
1721			u8 data[HCI_MAX_EXT_AD_LENGTH];
1722		} pdu;
1723
1724		memset(&pdu, 0, sizeof(pdu));
1725
1726		if (instance)
1727			len = create_instance_scan_rsp_data(hdev, instance,
1728							    pdu.data);
1729		else
1730			len = create_default_scan_rsp_data(hdev, pdu.data);
1731
1732		if (hdev->scan_rsp_data_len == len &&
1733		    !memcmp(pdu.data, hdev->scan_rsp_data, len))
1734			return;
1735
1736		memcpy(hdev->scan_rsp_data, pdu.data, len);
1737		hdev->scan_rsp_data_len = len;
1738
1739		pdu.cp.handle = instance;
1740		pdu.cp.length = len;
1741		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1742		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1743
1744		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1745			    sizeof(pdu.cp) + len, &pdu.cp);
1746	} else {
1747		struct hci_cp_le_set_scan_rsp_data cp;
1748
1749		memset(&cp, 0, sizeof(cp));
1750
1751		if (instance)
1752			len = create_instance_scan_rsp_data(hdev, instance,
1753							    cp.data);
1754		else
1755			len = create_default_scan_rsp_data(hdev, cp.data);
1756
1757		if (hdev->scan_rsp_data_len == len &&
1758		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1759			return;
1760
1761		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1762		hdev->scan_rsp_data_len = len;
1763
1764		cp.length = len;
1765
1766		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1767	}
1768}
1769
1770static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1771{
1772	struct adv_info *adv_instance = NULL;
1773	u8 ad_len = 0, flags = 0;
1774	u32 instance_flags;
1775
1776	/* Return 0 when the current instance identifier is invalid. */
1777	if (instance) {
1778		adv_instance = hci_find_adv_instance(hdev, instance);
1779		if (!adv_instance)
1780			return 0;
1781	}
1782
1783	instance_flags = get_adv_instance_flags(hdev, instance);
1784
1785	/* If instance already has the flags set skip adding it once
1786	 * again.
1787	 */
1788	if (adv_instance && eir_get_data(adv_instance->adv_data,
1789					 adv_instance->adv_data_len, EIR_FLAGS,
1790					 NULL))
1791		goto skip_flags;
1792
1793	/* The Add Advertising command allows userspace to set both the general
1794	 * and limited discoverable flags.
1795	 */
1796	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1797		flags |= LE_AD_GENERAL;
1798
1799	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1800		flags |= LE_AD_LIMITED;
1801
1802	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1803		flags |= LE_AD_NO_BREDR;
1804
1805	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1806		/* If a discovery flag wasn't provided, simply use the global
1807		 * settings.
1808		 */
1809		if (!flags)
1810			flags |= mgmt_get_adv_discov_flags(hdev);
1811
1812		/* If flags would still be empty, then there is no need to
1813		 * include the "Flags" AD field".
1814		 */
1815		if (flags) {
1816			ptr[0] = 0x02;
1817			ptr[1] = EIR_FLAGS;
1818			ptr[2] = flags;
1819
1820			ad_len += 3;
1821			ptr += 3;
1822		}
1823	}
1824
1825skip_flags:
1826	if (adv_instance) {
1827		memcpy(ptr, adv_instance->adv_data,
1828		       adv_instance->adv_data_len);
1829		ad_len += adv_instance->adv_data_len;
1830		ptr += adv_instance->adv_data_len;
1831	}
1832
1833	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1834		s8 adv_tx_power;
1835
1836		if (ext_adv_capable(hdev)) {
1837			if (adv_instance)
1838				adv_tx_power = adv_instance->tx_power;
1839			else
1840				adv_tx_power = hdev->adv_tx_power;
1841		} else {
1842			adv_tx_power = hdev->adv_tx_power;
1843		}
1844
1845		/* Provide Tx Power only if we can provide a valid value for it */
1846		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1847			ptr[0] = 0x02;
1848			ptr[1] = EIR_TX_POWER;
1849			ptr[2] = (u8)adv_tx_power;
1850
1851			ad_len += 3;
1852			ptr += 3;
1853		}
1854	}
1855
1856	return ad_len;
1857}
1858
1859void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1860{
1861	struct hci_dev *hdev = req->hdev;
1862	u8 len;
1863
1864	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1865		return;
1866
1867	if (ext_adv_capable(hdev)) {
1868		struct {
1869			struct hci_cp_le_set_ext_adv_data cp;
1870			u8 data[HCI_MAX_EXT_AD_LENGTH];
1871		} pdu;
1872
1873		memset(&pdu, 0, sizeof(pdu));
1874
1875		len = create_instance_adv_data(hdev, instance, pdu.data);
1876
1877		/* There's nothing to do if the data hasn't changed */
1878		if (hdev->adv_data_len == len &&
1879		    memcmp(pdu.data, hdev->adv_data, len) == 0)
1880			return;
1881
1882		memcpy(hdev->adv_data, pdu.data, len);
1883		hdev->adv_data_len = len;
1884
1885		pdu.cp.length = len;
1886		pdu.cp.handle = instance;
1887		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1888		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1889
1890		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1891			    sizeof(pdu.cp) + len, &pdu.cp);
1892	} else {
1893		struct hci_cp_le_set_adv_data cp;
1894
1895		memset(&cp, 0, sizeof(cp));
1896
1897		len = create_instance_adv_data(hdev, instance, cp.data);
1898
1899		/* There's nothing to do if the data hasn't changed */
1900		if (hdev->adv_data_len == len &&
1901		    memcmp(cp.data, hdev->adv_data, len) == 0)
1902			return;
1903
1904		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1905		hdev->adv_data_len = len;
1906
1907		cp.length = len;
1908
1909		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1910	}
1911}
1912
1913int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1914{
1915	struct hci_request req;
1916
1917	hci_req_init(&req, hdev);
1918	__hci_req_update_adv_data(&req, instance);
1919
1920	return hci_req_run(&req, NULL);
1921}
1922
1923static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1924					    u16 opcode)
1925{
1926	BT_DBG("%s status %u", hdev->name, status);
1927}
1928
1929void hci_req_disable_address_resolution(struct hci_dev *hdev)
1930{
1931	struct hci_request req;
1932	__u8 enable = 0x00;
1933
1934	if (!use_ll_privacy(hdev) &&
1935	    !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1936		return;
1937
1938	hci_req_init(&req, hdev);
1939
1940	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1941
1942	hci_req_run(&req, enable_addr_resolution_complete);
1943}
1944
1945static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1946{
1947	bt_dev_dbg(hdev, "status %u", status);
1948}
1949
1950void hci_req_reenable_advertising(struct hci_dev *hdev)
1951{
1952	struct hci_request req;
1953
1954	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1955	    list_empty(&hdev->adv_instances))
1956		return;
1957
1958	hci_req_init(&req, hdev);
1959
1960	if (hdev->cur_adv_instance) {
1961		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1962						true);
1963	} else {
1964		if (ext_adv_capable(hdev)) {
1965			__hci_req_start_ext_adv(&req, 0x00);
1966		} else {
1967			__hci_req_update_adv_data(&req, 0x00);
1968			__hci_req_update_scan_rsp_data(&req, 0x00);
1969			__hci_req_enable_advertising(&req);
1970		}
1971	}
1972
1973	hci_req_run(&req, adv_enable_complete);
1974}
1975
1976static void adv_timeout_expire(struct work_struct *work)
1977{
1978	struct hci_dev *hdev = container_of(work, struct hci_dev,
1979					    adv_instance_expire.work);
1980
1981	struct hci_request req;
1982	u8 instance;
1983
1984	bt_dev_dbg(hdev, "");
1985
1986	hci_dev_lock(hdev);
1987
1988	hdev->adv_instance_timeout = 0;
1989
1990	instance = hdev->cur_adv_instance;
1991	if (instance == 0x00)
1992		goto unlock;
1993
1994	hci_req_init(&req, hdev);
1995
1996	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1997
1998	if (list_empty(&hdev->adv_instances))
1999		__hci_req_disable_advertising(&req);
2000
2001	hci_req_run(&req, NULL);
2002
2003unlock:
2004	hci_dev_unlock(hdev);
2005}
2006
2007static int hci_req_add_le_interleaved_scan(struct hci_request *req,
2008					   unsigned long opt)
2009{
2010	struct hci_dev *hdev = req->hdev;
2011	int ret = 0;
2012
2013	hci_dev_lock(hdev);
2014
2015	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2016		hci_req_add_le_scan_disable(req, false);
2017	hci_req_add_le_passive_scan(req);
2018
2019	switch (hdev->interleave_scan_state) {
2020	case INTERLEAVE_SCAN_ALLOWLIST:
2021		bt_dev_dbg(hdev, "next state: allowlist");
2022		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2023		break;
2024	case INTERLEAVE_SCAN_NO_FILTER:
2025		bt_dev_dbg(hdev, "next state: no filter");
2026		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2027		break;
2028	case INTERLEAVE_SCAN_NONE:
2029		BT_ERR("unexpected error");
2030		ret = -1;
2031	}
2032
2033	hci_dev_unlock(hdev);
2034
2035	return ret;
2036}
2037
2038static void interleave_scan_work(struct work_struct *work)
2039{
2040	struct hci_dev *hdev = container_of(work, struct hci_dev,
2041					    interleave_scan.work);
2042	u8 status;
2043	unsigned long timeout;
2044
2045	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2046		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2047	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2048		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2049	} else {
2050		bt_dev_err(hdev, "unexpected error");
2051		return;
2052	}
2053
2054	hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2055		     HCI_CMD_TIMEOUT, &status);
2056
2057	/* Don't continue interleaving if it was canceled */
2058	if (is_interleave_scanning(hdev))
2059		queue_delayed_work(hdev->req_workqueue,
2060				   &hdev->interleave_scan, timeout);
2061}
2062
2063int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2064			   bool use_rpa, struct adv_info *adv_instance,
2065			   u8 *own_addr_type, bdaddr_t *rand_addr)
2066{
2067	int err;
2068
2069	bacpy(rand_addr, BDADDR_ANY);
2070
2071	/* If privacy is enabled use a resolvable private address. If
2072	 * current RPA has expired then generate a new one.
2073	 */
2074	if (use_rpa) {
2075		/* If Controller supports LL Privacy use own address type is
2076		 * 0x03
2077		 */
2078		if (use_ll_privacy(hdev) &&
2079		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2080			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2081		else
2082			*own_addr_type = ADDR_LE_DEV_RANDOM;
2083
2084		if (adv_instance) {
2085			if (adv_rpa_valid(adv_instance))
 
2086				return 0;
 
 
2087		} else {
2088			if (rpa_valid(hdev))
 
2089				return 0;
2090		}
2091
2092		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2093		if (err < 0) {
2094			bt_dev_err(hdev, "failed to generate new RPA");
2095			return err;
2096		}
2097
2098		bacpy(rand_addr, &hdev->rpa);
2099
 
 
 
 
 
 
 
 
2100		return 0;
2101	}
2102
2103	/* In case of required privacy without resolvable private address,
2104	 * use an non-resolvable private address. This is useful for
2105	 * non-connectable advertising.
2106	 */
2107	if (require_privacy) {
2108		bdaddr_t nrpa;
2109
2110		while (true) {
2111			/* The non-resolvable private address is generated
2112			 * from random six bytes with the two most significant
2113			 * bits cleared.
2114			 */
2115			get_random_bytes(&nrpa, 6);
2116			nrpa.b[5] &= 0x3f;
2117
2118			/* The non-resolvable private address shall not be
2119			 * equal to the public address.
2120			 */
2121			if (bacmp(&hdev->bdaddr, &nrpa))
2122				break;
2123		}
2124
2125		*own_addr_type = ADDR_LE_DEV_RANDOM;
2126		bacpy(rand_addr, &nrpa);
2127
2128		return 0;
2129	}
2130
2131	/* No privacy so use a public address. */
2132	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2133
2134	return 0;
2135}
2136
2137void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2138{
2139	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2140}
2141
2142static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2143{
2144	struct hci_dev *hdev = req->hdev;
2145
2146	/* If we're advertising or initiating an LE connection we can't
2147	 * go ahead and change the random address at this time. This is
2148	 * because the eventual initiator address used for the
2149	 * subsequently created connection will be undefined (some
2150	 * controllers use the new address and others the one we had
2151	 * when the operation started).
2152	 *
2153	 * In this kind of scenario skip the update and let the random
2154	 * address be updated at the next cycle.
2155	 */
2156	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2157	    hci_lookup_le_connect(hdev)) {
2158		bt_dev_dbg(hdev, "Deferring random address update");
2159		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2160		return;
2161	}
2162
2163	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2164}
2165
2166int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2167{
2168	struct hci_cp_le_set_ext_adv_params cp;
2169	struct hci_dev *hdev = req->hdev;
2170	bool connectable;
2171	u32 flags;
2172	bdaddr_t random_addr;
2173	u8 own_addr_type;
2174	int err;
2175	struct adv_info *adv_instance;
2176	bool secondary_adv;
 
 
2177
2178	if (instance > 0) {
2179		adv_instance = hci_find_adv_instance(hdev, instance);
2180		if (!adv_instance)
2181			return -EINVAL;
2182	} else {
2183		adv_instance = NULL;
2184	}
2185
2186	flags = get_adv_instance_flags(hdev, instance);
2187
2188	/* If the "connectable" instance flag was not set, then choose between
2189	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2190	 */
2191	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2192		      mgmt_get_connectable(hdev);
2193
2194	if (!is_advertising_allowed(hdev, connectable))
2195		return -EPERM;
2196
2197	/* Set require_privacy to true only when non-connectable
2198	 * advertising is used. In that case it is fine to use a
2199	 * non-resolvable private address.
2200	 */
2201	err = hci_get_random_address(hdev, !connectable,
2202				     adv_use_rpa(hdev, flags), adv_instance,
2203				     &own_addr_type, &random_addr);
2204	if (err < 0)
2205		return err;
2206
2207	memset(&cp, 0, sizeof(cp));
2208
2209	if (adv_instance) {
2210		hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2211		hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2212		cp.tx_power = adv_instance->tx_power;
2213	} else {
2214		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2215		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2216		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2217	}
2218
2219	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2220
2221	if (connectable) {
2222		if (secondary_adv)
2223			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2224		else
2225			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2226	} else if (adv_instance_is_scannable(hdev, instance) ||
2227		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2228		if (secondary_adv)
2229			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2230		else
2231			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2232	} else {
2233		if (secondary_adv)
2234			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2235		else
2236			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2237	}
2238
2239	cp.own_addr_type = own_addr_type;
2240	cp.channel_map = hdev->le_adv_channel_map;
 
2241	cp.handle = instance;
2242
2243	if (flags & MGMT_ADV_FLAG_SEC_2M) {
2244		cp.primary_phy = HCI_ADV_PHY_1M;
2245		cp.secondary_phy = HCI_ADV_PHY_2M;
2246	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2247		cp.primary_phy = HCI_ADV_PHY_CODED;
2248		cp.secondary_phy = HCI_ADV_PHY_CODED;
2249	} else {
2250		/* In all other cases use 1M */
2251		cp.primary_phy = HCI_ADV_PHY_1M;
2252		cp.secondary_phy = HCI_ADV_PHY_1M;
2253	}
2254
2255	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2256
2257	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2258	    bacmp(&random_addr, BDADDR_ANY)) {
2259		struct hci_cp_le_set_adv_set_rand_addr cp;
2260
2261		/* Check if random address need to be updated */
2262		if (adv_instance) {
2263			if (!bacmp(&random_addr, &adv_instance->random_addr))
2264				return 0;
2265		} else {
2266			if (!bacmp(&random_addr, &hdev->random_addr))
2267				return 0;
2268			/* Instance 0x00 doesn't have an adv_info, instead it
2269			 * uses hdev->random_addr to track its address so
2270			 * whenever it needs to be updated this also set the
2271			 * random address since hdev->random_addr is shared with
2272			 * scan state machine.
2273			 */
2274			set_random_addr(req, &random_addr);
2275		}
2276
2277		memset(&cp, 0, sizeof(cp));
2278
2279		cp.handle = instance;
2280		bacpy(&cp.bdaddr, &random_addr);
2281
2282		hci_req_add(req,
2283			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2284			    sizeof(cp), &cp);
2285	}
2286
2287	return 0;
2288}
2289
2290int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2291{
2292	struct hci_dev *hdev = req->hdev;
2293	struct hci_cp_le_set_ext_adv_enable *cp;
2294	struct hci_cp_ext_adv_set *adv_set;
2295	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2296	struct adv_info *adv_instance;
2297
2298	if (instance > 0) {
2299		adv_instance = hci_find_adv_instance(hdev, instance);
2300		if (!adv_instance)
2301			return -EINVAL;
2302	} else {
2303		adv_instance = NULL;
2304	}
2305
2306	cp = (void *) data;
2307	adv_set = (void *) cp->data;
2308
2309	memset(cp, 0, sizeof(*cp));
2310
2311	cp->enable = 0x01;
2312	cp->num_of_sets = 0x01;
2313
2314	memset(adv_set, 0, sizeof(*adv_set));
2315
2316	adv_set->handle = instance;
2317
2318	/* Set duration per instance since controller is responsible for
2319	 * scheduling it.
2320	 */
2321	if (adv_instance && adv_instance->duration) {
2322		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2323
2324		/* Time = N * 10 ms */
2325		adv_set->duration = cpu_to_le16(duration / 10);
2326	}
2327
2328	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2329		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2330		    data);
2331
2332	return 0;
2333}
2334
2335int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2336{
2337	struct hci_dev *hdev = req->hdev;
2338	struct hci_cp_le_set_ext_adv_enable *cp;
2339	struct hci_cp_ext_adv_set *adv_set;
2340	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2341	u8 req_size;
2342
2343	/* If request specifies an instance that doesn't exist, fail */
2344	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2345		return -EINVAL;
2346
2347	memset(data, 0, sizeof(data));
2348
2349	cp = (void *)data;
2350	adv_set = (void *)cp->data;
2351
2352	/* Instance 0x00 indicates all advertising instances will be disabled */
2353	cp->num_of_sets = !!instance;
2354	cp->enable = 0x00;
2355
2356	adv_set->handle = instance;
2357
2358	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2359	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2360
2361	return 0;
2362}
2363
2364int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2365{
2366	struct hci_dev *hdev = req->hdev;
2367
2368	/* If request specifies an instance that doesn't exist, fail */
2369	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2370		return -EINVAL;
2371
2372	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2373
2374	return 0;
2375}
2376
2377int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2378{
2379	struct hci_dev *hdev = req->hdev;
2380	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2381	int err;
2382
2383	/* If instance isn't pending, the chip knows about it, and it's safe to
2384	 * disable
2385	 */
2386	if (adv_instance && !adv_instance->pending)
2387		__hci_req_disable_ext_adv_instance(req, instance);
2388
2389	err = __hci_req_setup_ext_adv_instance(req, instance);
2390	if (err < 0)
2391		return err;
2392
2393	__hci_req_update_scan_rsp_data(req, instance);
2394	__hci_req_enable_ext_advertising(req, instance);
2395
2396	return 0;
2397}
2398
2399int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2400				    bool force)
2401{
2402	struct hci_dev *hdev = req->hdev;
2403	struct adv_info *adv_instance = NULL;
2404	u16 timeout;
2405
2406	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2407	    list_empty(&hdev->adv_instances))
2408		return -EPERM;
2409
2410	if (hdev->adv_instance_timeout)
2411		return -EBUSY;
2412
2413	adv_instance = hci_find_adv_instance(hdev, instance);
2414	if (!adv_instance)
2415		return -ENOENT;
2416
2417	/* A zero timeout means unlimited advertising. As long as there is
2418	 * only one instance, duration should be ignored. We still set a timeout
2419	 * in case further instances are being added later on.
2420	 *
2421	 * If the remaining lifetime of the instance is more than the duration
2422	 * then the timeout corresponds to the duration, otherwise it will be
2423	 * reduced to the remaining instance lifetime.
2424	 */
2425	if (adv_instance->timeout == 0 ||
2426	    adv_instance->duration <= adv_instance->remaining_time)
2427		timeout = adv_instance->duration;
2428	else
2429		timeout = adv_instance->remaining_time;
2430
2431	/* The remaining time is being reduced unless the instance is being
2432	 * advertised without time limit.
2433	 */
2434	if (adv_instance->timeout)
2435		adv_instance->remaining_time =
2436				adv_instance->remaining_time - timeout;
2437
2438	/* Only use work for scheduling instances with legacy advertising */
2439	if (!ext_adv_capable(hdev)) {
2440		hdev->adv_instance_timeout = timeout;
2441		queue_delayed_work(hdev->req_workqueue,
2442			   &hdev->adv_instance_expire,
2443			   msecs_to_jiffies(timeout * 1000));
2444	}
2445
2446	/* If we're just re-scheduling the same instance again then do not
2447	 * execute any HCI commands. This happens when a single instance is
2448	 * being advertised.
2449	 */
2450	if (!force && hdev->cur_adv_instance == instance &&
2451	    hci_dev_test_flag(hdev, HCI_LE_ADV))
2452		return 0;
2453
2454	hdev->cur_adv_instance = instance;
2455	if (ext_adv_capable(hdev)) {
2456		__hci_req_start_ext_adv(req, instance);
2457	} else {
2458		__hci_req_update_adv_data(req, instance);
2459		__hci_req_update_scan_rsp_data(req, instance);
2460		__hci_req_enable_advertising(req);
2461	}
2462
2463	return 0;
2464}
2465
 
 
 
 
 
 
 
 
2466/* For a single instance:
2467 * - force == true: The instance will be removed even when its remaining
2468 *   lifetime is not zero.
2469 * - force == false: the instance will be deactivated but kept stored unless
2470 *   the remaining lifetime is zero.
2471 *
2472 * For instance == 0x00:
2473 * - force == true: All instances will be removed regardless of their timeout
2474 *   setting.
2475 * - force == false: Only instances that have a timeout will be removed.
2476 */
2477void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2478				struct hci_request *req, u8 instance,
2479				bool force)
2480{
2481	struct adv_info *adv_instance, *n, *next_instance = NULL;
2482	int err;
2483	u8 rem_inst;
2484
2485	/* Cancel any timeout concerning the removed instance(s). */
2486	if (!instance || hdev->cur_adv_instance == instance)
2487		cancel_adv_timeout(hdev);
2488
2489	/* Get the next instance to advertise BEFORE we remove
2490	 * the current one. This can be the same instance again
2491	 * if there is only one instance.
2492	 */
2493	if (instance && hdev->cur_adv_instance == instance)
2494		next_instance = hci_get_next_instance(hdev, instance);
2495
2496	if (instance == 0x00) {
2497		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2498					 list) {
2499			if (!(force || adv_instance->timeout))
2500				continue;
2501
2502			rem_inst = adv_instance->instance;
2503			err = hci_remove_adv_instance(hdev, rem_inst);
2504			if (!err)
2505				mgmt_advertising_removed(sk, hdev, rem_inst);
2506		}
2507	} else {
2508		adv_instance = hci_find_adv_instance(hdev, instance);
2509
2510		if (force || (adv_instance && adv_instance->timeout &&
2511			      !adv_instance->remaining_time)) {
2512			/* Don't advertise a removed instance. */
2513			if (next_instance &&
2514			    next_instance->instance == instance)
2515				next_instance = NULL;
2516
2517			err = hci_remove_adv_instance(hdev, instance);
2518			if (!err)
2519				mgmt_advertising_removed(sk, hdev, instance);
2520		}
2521	}
2522
2523	if (!req || !hdev_is_powered(hdev) ||
2524	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
2525		return;
2526
2527	if (next_instance && !ext_adv_capable(hdev))
2528		__hci_req_schedule_adv_instance(req, next_instance->instance,
2529						false);
2530}
2531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2532int hci_update_random_address(struct hci_request *req, bool require_privacy,
2533			      bool use_rpa, u8 *own_addr_type)
2534{
2535	struct hci_dev *hdev = req->hdev;
2536	int err;
2537
2538	/* If privacy is enabled use a resolvable private address. If
2539	 * current RPA has expired or there is something else than
2540	 * the current RPA in use, then generate a new one.
2541	 */
2542	if (use_rpa) {
2543		/* If Controller supports LL Privacy use own address type is
2544		 * 0x03
2545		 */
2546		if (use_ll_privacy(hdev) &&
2547		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2548			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2549		else
2550			*own_addr_type = ADDR_LE_DEV_RANDOM;
2551
2552		if (rpa_valid(hdev))
 
2553			return 0;
2554
2555		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2556		if (err < 0) {
2557			bt_dev_err(hdev, "failed to generate new RPA");
2558			return err;
2559		}
2560
2561		set_random_addr(req, &hdev->rpa);
2562
 
 
 
2563		return 0;
2564	}
2565
2566	/* In case of required privacy without resolvable private address,
2567	 * use an non-resolvable private address. This is useful for active
2568	 * scanning and non-connectable advertising.
2569	 */
2570	if (require_privacy) {
2571		bdaddr_t nrpa;
2572
2573		while (true) {
2574			/* The non-resolvable private address is generated
2575			 * from random six bytes with the two most significant
2576			 * bits cleared.
2577			 */
2578			get_random_bytes(&nrpa, 6);
2579			nrpa.b[5] &= 0x3f;
2580
2581			/* The non-resolvable private address shall not be
2582			 * equal to the public address.
2583			 */
2584			if (bacmp(&hdev->bdaddr, &nrpa))
2585				break;
2586		}
2587
2588		*own_addr_type = ADDR_LE_DEV_RANDOM;
2589		set_random_addr(req, &nrpa);
2590		return 0;
2591	}
2592
2593	/* If forcing static address is in use or there is no public
2594	 * address use the static address as random address (but skip
2595	 * the HCI command if the current random address is already the
2596	 * static one.
2597	 *
2598	 * In case BR/EDR has been disabled on a dual-mode controller
2599	 * and a static address has been configured, then use that
2600	 * address instead of the public BR/EDR address.
2601	 */
2602	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2603	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2604	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2605	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2606		*own_addr_type = ADDR_LE_DEV_RANDOM;
2607		if (bacmp(&hdev->static_addr, &hdev->random_addr))
2608			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2609				    &hdev->static_addr);
2610		return 0;
2611	}
2612
2613	/* Neither privacy nor static address is being used so use a
2614	 * public address.
2615	 */
2616	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2617
2618	return 0;
2619}
2620
2621static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2622{
2623	struct bdaddr_list *b;
2624
2625	list_for_each_entry(b, &hdev->accept_list, list) {
2626		struct hci_conn *conn;
2627
2628		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2629		if (!conn)
2630			return true;
2631
2632		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2633			return true;
2634	}
2635
2636	return false;
2637}
2638
2639void __hci_req_update_scan(struct hci_request *req)
2640{
2641	struct hci_dev *hdev = req->hdev;
2642	u8 scan;
2643
2644	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2645		return;
2646
2647	if (!hdev_is_powered(hdev))
2648		return;
2649
2650	if (mgmt_powering_down(hdev))
2651		return;
2652
2653	if (hdev->scanning_paused)
2654		return;
2655
2656	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2657	    disconnected_accept_list_entries(hdev))
2658		scan = SCAN_PAGE;
2659	else
2660		scan = SCAN_DISABLED;
2661
2662	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2663		scan |= SCAN_INQUIRY;
2664
2665	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2666	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2667		return;
2668
2669	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2670}
2671
2672static int update_scan(struct hci_request *req, unsigned long opt)
2673{
2674	hci_dev_lock(req->hdev);
2675	__hci_req_update_scan(req);
2676	hci_dev_unlock(req->hdev);
2677	return 0;
2678}
2679
2680static void scan_update_work(struct work_struct *work)
2681{
2682	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2683
2684	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2685}
2686
2687static int connectable_update(struct hci_request *req, unsigned long opt)
2688{
2689	struct hci_dev *hdev = req->hdev;
2690
2691	hci_dev_lock(hdev);
2692
2693	__hci_req_update_scan(req);
2694
2695	/* If BR/EDR is not enabled and we disable advertising as a
2696	 * by-product of disabling connectable, we need to update the
2697	 * advertising flags.
2698	 */
2699	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2700		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2701
2702	/* Update the advertising parameters if necessary */
2703	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2704	    !list_empty(&hdev->adv_instances)) {
2705		if (ext_adv_capable(hdev))
2706			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2707		else
2708			__hci_req_enable_advertising(req);
2709	}
2710
2711	__hci_update_background_scan(req);
2712
2713	hci_dev_unlock(hdev);
2714
2715	return 0;
2716}
2717
2718static void connectable_update_work(struct work_struct *work)
2719{
2720	struct hci_dev *hdev = container_of(work, struct hci_dev,
2721					    connectable_update);
2722	u8 status;
2723
2724	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2725	mgmt_set_connectable_complete(hdev, status);
2726}
2727
2728static u8 get_service_classes(struct hci_dev *hdev)
2729{
2730	struct bt_uuid *uuid;
2731	u8 val = 0;
2732
2733	list_for_each_entry(uuid, &hdev->uuids, list)
2734		val |= uuid->svc_hint;
2735
2736	return val;
2737}
2738
2739void __hci_req_update_class(struct hci_request *req)
2740{
2741	struct hci_dev *hdev = req->hdev;
2742	u8 cod[3];
2743
2744	bt_dev_dbg(hdev, "");
2745
2746	if (!hdev_is_powered(hdev))
2747		return;
2748
2749	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2750		return;
2751
2752	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2753		return;
2754
2755	cod[0] = hdev->minor_class;
2756	cod[1] = hdev->major_class;
2757	cod[2] = get_service_classes(hdev);
2758
2759	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2760		cod[1] |= 0x20;
2761
2762	if (memcmp(cod, hdev->dev_class, 3) == 0)
2763		return;
2764
2765	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2766}
2767
2768static void write_iac(struct hci_request *req)
2769{
2770	struct hci_dev *hdev = req->hdev;
2771	struct hci_cp_write_current_iac_lap cp;
2772
2773	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2774		return;
2775
2776	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2777		/* Limited discoverable mode */
2778		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2779		cp.iac_lap[0] = 0x00;	/* LIAC */
2780		cp.iac_lap[1] = 0x8b;
2781		cp.iac_lap[2] = 0x9e;
2782		cp.iac_lap[3] = 0x33;	/* GIAC */
2783		cp.iac_lap[4] = 0x8b;
2784		cp.iac_lap[5] = 0x9e;
2785	} else {
2786		/* General discoverable mode */
2787		cp.num_iac = 1;
2788		cp.iac_lap[0] = 0x33;	/* GIAC */
2789		cp.iac_lap[1] = 0x8b;
2790		cp.iac_lap[2] = 0x9e;
2791	}
2792
2793	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2794		    (cp.num_iac * 3) + 1, &cp);
2795}
2796
2797static int discoverable_update(struct hci_request *req, unsigned long opt)
2798{
2799	struct hci_dev *hdev = req->hdev;
2800
2801	hci_dev_lock(hdev);
2802
2803	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2804		write_iac(req);
2805		__hci_req_update_scan(req);
2806		__hci_req_update_class(req);
2807	}
2808
2809	/* Advertising instances don't use the global discoverable setting, so
2810	 * only update AD if advertising was enabled using Set Advertising.
2811	 */
2812	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2813		__hci_req_update_adv_data(req, 0x00);
2814
2815		/* Discoverable mode affects the local advertising
2816		 * address in limited privacy mode.
2817		 */
2818		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2819			if (ext_adv_capable(hdev))
2820				__hci_req_start_ext_adv(req, 0x00);
2821			else
2822				__hci_req_enable_advertising(req);
2823		}
2824	}
2825
2826	hci_dev_unlock(hdev);
2827
2828	return 0;
2829}
2830
2831static void discoverable_update_work(struct work_struct *work)
2832{
2833	struct hci_dev *hdev = container_of(work, struct hci_dev,
2834					    discoverable_update);
2835	u8 status;
2836
2837	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2838	mgmt_set_discoverable_complete(hdev, status);
2839}
2840
2841void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2842		      u8 reason)
2843{
2844	switch (conn->state) {
2845	case BT_CONNECTED:
2846	case BT_CONFIG:
2847		if (conn->type == AMP_LINK) {
2848			struct hci_cp_disconn_phy_link cp;
2849
2850			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2851			cp.reason = reason;
2852			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2853				    &cp);
2854		} else {
2855			struct hci_cp_disconnect dc;
2856
2857			dc.handle = cpu_to_le16(conn->handle);
2858			dc.reason = reason;
2859			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2860		}
2861
2862		conn->state = BT_DISCONN;
2863
2864		break;
2865	case BT_CONNECT:
2866		if (conn->type == LE_LINK) {
2867			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2868				break;
2869			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2870				    0, NULL);
2871		} else if (conn->type == ACL_LINK) {
2872			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2873				break;
2874			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2875				    6, &conn->dst);
2876		}
2877		break;
2878	case BT_CONNECT2:
2879		if (conn->type == ACL_LINK) {
2880			struct hci_cp_reject_conn_req rej;
2881
2882			bacpy(&rej.bdaddr, &conn->dst);
2883			rej.reason = reason;
2884
2885			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2886				    sizeof(rej), &rej);
2887		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2888			struct hci_cp_reject_sync_conn_req rej;
2889
2890			bacpy(&rej.bdaddr, &conn->dst);
2891
2892			/* SCO rejection has its own limited set of
2893			 * allowed error values (0x0D-0x0F) which isn't
2894			 * compatible with most values passed to this
2895			 * function. To be safe hard-code one of the
2896			 * values that's suitable for SCO.
2897			 */
2898			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2899
2900			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2901				    sizeof(rej), &rej);
2902		}
2903		break;
2904	default:
2905		conn->state = BT_CLOSED;
2906		break;
2907	}
2908}
2909
2910static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2911{
2912	if (status)
2913		bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2914}
2915
2916int hci_abort_conn(struct hci_conn *conn, u8 reason)
2917{
2918	struct hci_request req;
2919	int err;
2920
2921	hci_req_init(&req, conn->hdev);
2922
2923	__hci_abort_conn(&req, conn, reason);
2924
2925	err = hci_req_run(&req, abort_conn_complete);
2926	if (err && err != -ENODATA) {
2927		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2928		return err;
2929	}
2930
2931	return 0;
2932}
2933
2934static int update_bg_scan(struct hci_request *req, unsigned long opt)
2935{
2936	hci_dev_lock(req->hdev);
2937	__hci_update_background_scan(req);
2938	hci_dev_unlock(req->hdev);
2939	return 0;
2940}
2941
2942static void bg_scan_update(struct work_struct *work)
2943{
2944	struct hci_dev *hdev = container_of(work, struct hci_dev,
2945					    bg_scan_update);
2946	struct hci_conn *conn;
2947	u8 status;
2948	int err;
2949
2950	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2951	if (!err)
2952		return;
2953
2954	hci_dev_lock(hdev);
2955
2956	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2957	if (conn)
2958		hci_le_conn_failed(conn, status);
2959
2960	hci_dev_unlock(hdev);
2961}
2962
2963static int le_scan_disable(struct hci_request *req, unsigned long opt)
2964{
2965	hci_req_add_le_scan_disable(req, false);
2966	return 0;
2967}
2968
2969static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2970{
2971	u8 length = opt;
2972	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2973	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2974	struct hci_cp_inquiry cp;
2975
2976	if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2977		return 0;
2978
2979	bt_dev_dbg(req->hdev, "");
2980
2981	hci_dev_lock(req->hdev);
2982	hci_inquiry_cache_flush(req->hdev);
2983	hci_dev_unlock(req->hdev);
2984
2985	memset(&cp, 0, sizeof(cp));
2986
2987	if (req->hdev->discovery.limited)
2988		memcpy(&cp.lap, liac, sizeof(cp.lap));
2989	else
2990		memcpy(&cp.lap, giac, sizeof(cp.lap));
2991
2992	cp.length = length;
2993
2994	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2995
2996	return 0;
2997}
2998
2999static void le_scan_disable_work(struct work_struct *work)
3000{
3001	struct hci_dev *hdev = container_of(work, struct hci_dev,
3002					    le_scan_disable.work);
3003	u8 status;
3004
3005	bt_dev_dbg(hdev, "");
3006
3007	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3008		return;
3009
3010	cancel_delayed_work(&hdev->le_scan_restart);
3011
3012	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3013	if (status) {
3014		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3015			   status);
3016		return;
3017	}
3018
3019	hdev->discovery.scan_start = 0;
3020
3021	/* If we were running LE only scan, change discovery state. If
3022	 * we were running both LE and BR/EDR inquiry simultaneously,
3023	 * and BR/EDR inquiry is already finished, stop discovery,
3024	 * otherwise BR/EDR inquiry will stop discovery when finished.
3025	 * If we will resolve remote device name, do not change
3026	 * discovery state.
3027	 */
3028
3029	if (hdev->discovery.type == DISCOV_TYPE_LE)
3030		goto discov_stopped;
3031
3032	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3033		return;
3034
3035	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3036		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3037		    hdev->discovery.state != DISCOVERY_RESOLVING)
3038			goto discov_stopped;
3039
3040		return;
3041	}
3042
3043	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3044		     HCI_CMD_TIMEOUT, &status);
3045	if (status) {
3046		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3047		goto discov_stopped;
3048	}
3049
3050	return;
3051
3052discov_stopped:
3053	hci_dev_lock(hdev);
3054	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3055	hci_dev_unlock(hdev);
3056}
3057
3058static int le_scan_restart(struct hci_request *req, unsigned long opt)
3059{
3060	struct hci_dev *hdev = req->hdev;
3061
3062	/* If controller is not scanning we are done. */
3063	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3064		return 0;
3065
3066	if (hdev->scanning_paused) {
3067		bt_dev_dbg(hdev, "Scanning is paused for suspend");
3068		return 0;
3069	}
3070
3071	hci_req_add_le_scan_disable(req, false);
3072
3073	if (use_ext_scan(hdev)) {
3074		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3075
3076		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3077		ext_enable_cp.enable = LE_SCAN_ENABLE;
3078		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3079
3080		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3081			    sizeof(ext_enable_cp), &ext_enable_cp);
3082	} else {
3083		struct hci_cp_le_set_scan_enable cp;
3084
3085		memset(&cp, 0, sizeof(cp));
3086		cp.enable = LE_SCAN_ENABLE;
3087		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3088		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3089	}
3090
3091	return 0;
3092}
3093
3094static void le_scan_restart_work(struct work_struct *work)
3095{
3096	struct hci_dev *hdev = container_of(work, struct hci_dev,
3097					    le_scan_restart.work);
3098	unsigned long timeout, duration, scan_start, now;
3099	u8 status;
3100
3101	bt_dev_dbg(hdev, "");
3102
3103	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3104	if (status) {
3105		bt_dev_err(hdev, "failed to restart LE scan: status %d",
3106			   status);
3107		return;
3108	}
3109
3110	hci_dev_lock(hdev);
3111
3112	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3113	    !hdev->discovery.scan_start)
3114		goto unlock;
3115
3116	/* When the scan was started, hdev->le_scan_disable has been queued
3117	 * after duration from scan_start. During scan restart this job
3118	 * has been canceled, and we need to queue it again after proper
3119	 * timeout, to make sure that scan does not run indefinitely.
3120	 */
3121	duration = hdev->discovery.scan_duration;
3122	scan_start = hdev->discovery.scan_start;
3123	now = jiffies;
3124	if (now - scan_start <= duration) {
3125		int elapsed;
3126
3127		if (now >= scan_start)
3128			elapsed = now - scan_start;
3129		else
3130			elapsed = ULONG_MAX - scan_start + now;
3131
3132		timeout = duration - elapsed;
3133	} else {
3134		timeout = 0;
3135	}
3136
3137	queue_delayed_work(hdev->req_workqueue,
3138			   &hdev->le_scan_disable, timeout);
3139
3140unlock:
3141	hci_dev_unlock(hdev);
3142}
3143
3144static int active_scan(struct hci_request *req, unsigned long opt)
3145{
3146	uint16_t interval = opt;
3147	struct hci_dev *hdev = req->hdev;
3148	u8 own_addr_type;
3149	/* Accept list is not used for discovery */
3150	u8 filter_policy = 0x00;
3151	/* Default is to enable duplicates filter */
3152	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3153	/* Discovery doesn't require controller address resolution */
3154	bool addr_resolv = false;
3155	int err;
3156
3157	bt_dev_dbg(hdev, "");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3158
3159	/* If controller is scanning, it means the background scanning is
3160	 * running. Thus, we should temporarily stop it in order to set the
3161	 * discovery scanning parameters.
3162	 */
3163	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3164		hci_req_add_le_scan_disable(req, false);
3165		cancel_interleave_scan(hdev);
3166	}
3167
3168	/* All active scans will be done with either a resolvable private
3169	 * address (when privacy feature has been enabled) or non-resolvable
3170	 * private address.
3171	 */
3172	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3173					&own_addr_type);
3174	if (err < 0)
3175		own_addr_type = ADDR_LE_DEV_PUBLIC;
3176
3177	if (hci_is_adv_monitoring(hdev)) {
3178		/* Duplicate filter should be disabled when some advertisement
3179		 * monitor is activated, otherwise AdvMon can only receive one
3180		 * advertisement for one peer(*) during active scanning, and
3181		 * might report loss to these peers.
3182		 *
3183		 * Note that different controllers have different meanings of
3184		 * |duplicate|. Some of them consider packets with the same
3185		 * address as duplicate, and others consider packets with the
3186		 * same address and the same RSSI as duplicate. Although in the
3187		 * latter case we don't need to disable duplicate filter, but
3188		 * it is common to have active scanning for a short period of
3189		 * time, the power impact should be neglectable.
3190		 */
3191		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
3192	}
3193
3194	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3195			   hdev->le_scan_window_discovery, own_addr_type,
3196			   filter_policy, filter_dup, addr_resolv);
3197	return 0;
3198}
3199
3200static int interleaved_discov(struct hci_request *req, unsigned long opt)
3201{
3202	int err;
3203
3204	bt_dev_dbg(req->hdev, "");
3205
3206	err = active_scan(req, opt);
3207	if (err)
3208		return err;
3209
3210	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3211}
3212
3213static void start_discovery(struct hci_dev *hdev, u8 *status)
3214{
3215	unsigned long timeout;
3216
3217	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3218
3219	switch (hdev->discovery.type) {
3220	case DISCOV_TYPE_BREDR:
3221		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3222			hci_req_sync(hdev, bredr_inquiry,
3223				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3224				     status);
3225		return;
3226	case DISCOV_TYPE_INTERLEAVED:
3227		/* When running simultaneous discovery, the LE scanning time
3228		 * should occupy the whole discovery time sine BR/EDR inquiry
3229		 * and LE scanning are scheduled by the controller.
3230		 *
3231		 * For interleaving discovery in comparison, BR/EDR inquiry
3232		 * and LE scanning are done sequentially with separate
3233		 * timeouts.
3234		 */
3235		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3236			     &hdev->quirks)) {
3237			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3238			/* During simultaneous discovery, we double LE scan
3239			 * interval. We must leave some time for the controller
3240			 * to do BR/EDR inquiry.
3241			 */
3242			hci_req_sync(hdev, interleaved_discov,
3243				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3244				     status);
3245			break;
3246		}
3247
3248		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3249		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3250			     HCI_CMD_TIMEOUT, status);
3251		break;
3252	case DISCOV_TYPE_LE:
3253		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3254		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3255			     HCI_CMD_TIMEOUT, status);
3256		break;
3257	default:
3258		*status = HCI_ERROR_UNSPECIFIED;
3259		return;
3260	}
3261
3262	if (*status)
3263		return;
3264
3265	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3266
3267	/* When service discovery is used and the controller has a
3268	 * strict duplicate filter, it is important to remember the
3269	 * start and duration of the scan. This is required for
3270	 * restarting scanning during the discovery phase.
3271	 */
3272	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3273		     hdev->discovery.result_filtering) {
3274		hdev->discovery.scan_start = jiffies;
3275		hdev->discovery.scan_duration = timeout;
3276	}
3277
3278	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3279			   timeout);
3280}
3281
3282bool hci_req_stop_discovery(struct hci_request *req)
3283{
3284	struct hci_dev *hdev = req->hdev;
3285	struct discovery_state *d = &hdev->discovery;
3286	struct hci_cp_remote_name_req_cancel cp;
3287	struct inquiry_entry *e;
3288	bool ret = false;
3289
3290	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3291
3292	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3293		if (test_bit(HCI_INQUIRY, &hdev->flags))
3294			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3295
3296		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3297			cancel_delayed_work(&hdev->le_scan_disable);
3298			cancel_delayed_work(&hdev->le_scan_restart);
3299			hci_req_add_le_scan_disable(req, false);
3300		}
3301
3302		ret = true;
3303	} else {
3304		/* Passive scanning */
3305		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3306			hci_req_add_le_scan_disable(req, false);
3307			ret = true;
3308		}
3309	}
3310
3311	/* No further actions needed for LE-only discovery */
3312	if (d->type == DISCOV_TYPE_LE)
3313		return ret;
3314
3315	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3316		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3317						     NAME_PENDING);
3318		if (!e)
3319			return ret;
3320
3321		bacpy(&cp.bdaddr, &e->data.bdaddr);
3322		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3323			    &cp);
3324		ret = true;
3325	}
3326
3327	return ret;
3328}
3329
3330static int stop_discovery(struct hci_request *req, unsigned long opt)
3331{
3332	hci_dev_lock(req->hdev);
3333	hci_req_stop_discovery(req);
3334	hci_dev_unlock(req->hdev);
3335
3336	return 0;
3337}
3338
3339static void discov_update(struct work_struct *work)
3340{
3341	struct hci_dev *hdev = container_of(work, struct hci_dev,
3342					    discov_update);
3343	u8 status = 0;
3344
3345	switch (hdev->discovery.state) {
3346	case DISCOVERY_STARTING:
3347		start_discovery(hdev, &status);
3348		mgmt_start_discovery_complete(hdev, status);
3349		if (status)
3350			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3351		else
3352			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3353		break;
3354	case DISCOVERY_STOPPING:
3355		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3356		mgmt_stop_discovery_complete(hdev, status);
3357		if (!status)
3358			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3359		break;
3360	case DISCOVERY_STOPPED:
3361	default:
3362		return;
3363	}
3364}
3365
3366static void discov_off(struct work_struct *work)
3367{
3368	struct hci_dev *hdev = container_of(work, struct hci_dev,
3369					    discov_off.work);
3370
3371	bt_dev_dbg(hdev, "");
3372
3373	hci_dev_lock(hdev);
3374
3375	/* When discoverable timeout triggers, then just make sure
3376	 * the limited discoverable flag is cleared. Even in the case
3377	 * of a timeout triggered from general discoverable, it is
3378	 * safe to unconditionally clear the flag.
3379	 */
3380	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3381	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3382	hdev->discov_timeout = 0;
3383
3384	hci_dev_unlock(hdev);
3385
3386	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3387	mgmt_new_settings(hdev);
3388}
3389
3390static int powered_update_hci(struct hci_request *req, unsigned long opt)
3391{
3392	struct hci_dev *hdev = req->hdev;
3393	u8 link_sec;
3394
3395	hci_dev_lock(hdev);
3396
3397	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3398	    !lmp_host_ssp_capable(hdev)) {
3399		u8 mode = 0x01;
3400
3401		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3402
3403		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3404			u8 support = 0x01;
3405
3406			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3407				    sizeof(support), &support);
3408		}
3409	}
3410
3411	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3412	    lmp_bredr_capable(hdev)) {
3413		struct hci_cp_write_le_host_supported cp;
3414
3415		cp.le = 0x01;
3416		cp.simul = 0x00;
3417
3418		/* Check first if we already have the right
3419		 * host state (host features set)
3420		 */
3421		if (cp.le != lmp_host_le_capable(hdev) ||
3422		    cp.simul != lmp_host_le_br_capable(hdev))
3423			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3424				    sizeof(cp), &cp);
3425	}
3426
3427	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3428		/* Make sure the controller has a good default for
3429		 * advertising data. This also applies to the case
3430		 * where BR/EDR was toggled during the AUTO_OFF phase.
3431		 */
3432		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3433		    list_empty(&hdev->adv_instances)) {
3434			int err;
3435
3436			if (ext_adv_capable(hdev)) {
3437				err = __hci_req_setup_ext_adv_instance(req,
3438								       0x00);
3439				if (!err)
3440					__hci_req_update_scan_rsp_data(req,
3441								       0x00);
3442			} else {
3443				err = 0;
3444				__hci_req_update_adv_data(req, 0x00);
3445				__hci_req_update_scan_rsp_data(req, 0x00);
3446			}
3447
3448			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3449				if (!ext_adv_capable(hdev))
3450					__hci_req_enable_advertising(req);
3451				else if (!err)
3452					__hci_req_enable_ext_advertising(req,
3453									 0x00);
3454			}
3455		} else if (!list_empty(&hdev->adv_instances)) {
3456			struct adv_info *adv_instance;
3457
3458			adv_instance = list_first_entry(&hdev->adv_instances,
3459							struct adv_info, list);
3460			__hci_req_schedule_adv_instance(req,
3461							adv_instance->instance,
3462							true);
3463		}
3464	}
3465
3466	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3467	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3468		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3469			    sizeof(link_sec), &link_sec);
3470
3471	if (lmp_bredr_capable(hdev)) {
3472		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3473			__hci_req_write_fast_connectable(req, true);
3474		else
3475			__hci_req_write_fast_connectable(req, false);
3476		__hci_req_update_scan(req);
3477		__hci_req_update_class(req);
3478		__hci_req_update_name(req);
3479		__hci_req_update_eir(req);
3480	}
3481
3482	hci_dev_unlock(hdev);
3483	return 0;
3484}
3485
3486int __hci_req_hci_power_on(struct hci_dev *hdev)
3487{
3488	/* Register the available SMP channels (BR/EDR and LE) only when
3489	 * successfully powering on the controller. This late
3490	 * registration is required so that LE SMP can clearly decide if
3491	 * the public address or static address is used.
3492	 */
3493	smp_register(hdev);
3494
3495	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3496			      NULL);
3497}
3498
3499void hci_request_setup(struct hci_dev *hdev)
3500{
3501	INIT_WORK(&hdev->discov_update, discov_update);
3502	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3503	INIT_WORK(&hdev->scan_update, scan_update_work);
3504	INIT_WORK(&hdev->connectable_update, connectable_update_work);
3505	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3506	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3507	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3508	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3509	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3510	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3511}
3512
3513void hci_request_cancel_all(struct hci_dev *hdev)
3514{
3515	hci_req_sync_cancel(hdev, ENODEV);
3516
3517	cancel_work_sync(&hdev->discov_update);
3518	cancel_work_sync(&hdev->bg_scan_update);
3519	cancel_work_sync(&hdev->scan_update);
3520	cancel_work_sync(&hdev->connectable_update);
3521	cancel_work_sync(&hdev->discoverable_update);
3522	cancel_delayed_work_sync(&hdev->discov_off);
3523	cancel_delayed_work_sync(&hdev->le_scan_disable);
3524	cancel_delayed_work_sync(&hdev->le_scan_restart);
3525
3526	if (hdev->adv_instance_timeout) {
3527		cancel_delayed_work_sync(&hdev->adv_instance_expire);
3528		hdev->adv_instance_timeout = 0;
3529	}
3530
3531	cancel_interleave_scan(hdev);
3532}