Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v5.4
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2010  Nokia Corporation
   5   Copyright (C) 2011-2012 Intel Corporation
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI Management interface */
  26
  27#include <linux/module.h>
  28#include <asm/unaligned.h>
  29
  30#include <net/bluetooth/bluetooth.h>
  31#include <net/bluetooth/hci_core.h>
  32#include <net/bluetooth/hci_sock.h>
  33#include <net/bluetooth/l2cap.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "hci_request.h"
  37#include "smp.h"
  38#include "mgmt_util.h"
 
 
  39
  40#define MGMT_VERSION	1
  41#define MGMT_REVISION	14
  42
  43static const u16 mgmt_commands[] = {
  44	MGMT_OP_READ_INDEX_LIST,
  45	MGMT_OP_READ_INFO,
  46	MGMT_OP_SET_POWERED,
  47	MGMT_OP_SET_DISCOVERABLE,
  48	MGMT_OP_SET_CONNECTABLE,
  49	MGMT_OP_SET_FAST_CONNECTABLE,
  50	MGMT_OP_SET_BONDABLE,
  51	MGMT_OP_SET_LINK_SECURITY,
  52	MGMT_OP_SET_SSP,
  53	MGMT_OP_SET_HS,
  54	MGMT_OP_SET_LE,
  55	MGMT_OP_SET_DEV_CLASS,
  56	MGMT_OP_SET_LOCAL_NAME,
  57	MGMT_OP_ADD_UUID,
  58	MGMT_OP_REMOVE_UUID,
  59	MGMT_OP_LOAD_LINK_KEYS,
  60	MGMT_OP_LOAD_LONG_TERM_KEYS,
  61	MGMT_OP_DISCONNECT,
  62	MGMT_OP_GET_CONNECTIONS,
  63	MGMT_OP_PIN_CODE_REPLY,
  64	MGMT_OP_PIN_CODE_NEG_REPLY,
  65	MGMT_OP_SET_IO_CAPABILITY,
  66	MGMT_OP_PAIR_DEVICE,
  67	MGMT_OP_CANCEL_PAIR_DEVICE,
  68	MGMT_OP_UNPAIR_DEVICE,
  69	MGMT_OP_USER_CONFIRM_REPLY,
  70	MGMT_OP_USER_CONFIRM_NEG_REPLY,
  71	MGMT_OP_USER_PASSKEY_REPLY,
  72	MGMT_OP_USER_PASSKEY_NEG_REPLY,
  73	MGMT_OP_READ_LOCAL_OOB_DATA,
  74	MGMT_OP_ADD_REMOTE_OOB_DATA,
  75	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
  76	MGMT_OP_START_DISCOVERY,
  77	MGMT_OP_STOP_DISCOVERY,
  78	MGMT_OP_CONFIRM_NAME,
  79	MGMT_OP_BLOCK_DEVICE,
  80	MGMT_OP_UNBLOCK_DEVICE,
  81	MGMT_OP_SET_DEVICE_ID,
  82	MGMT_OP_SET_ADVERTISING,
  83	MGMT_OP_SET_BREDR,
  84	MGMT_OP_SET_STATIC_ADDRESS,
  85	MGMT_OP_SET_SCAN_PARAMS,
  86	MGMT_OP_SET_SECURE_CONN,
  87	MGMT_OP_SET_DEBUG_KEYS,
  88	MGMT_OP_SET_PRIVACY,
  89	MGMT_OP_LOAD_IRKS,
  90	MGMT_OP_GET_CONN_INFO,
  91	MGMT_OP_GET_CLOCK_INFO,
  92	MGMT_OP_ADD_DEVICE,
  93	MGMT_OP_REMOVE_DEVICE,
  94	MGMT_OP_LOAD_CONN_PARAM,
  95	MGMT_OP_READ_UNCONF_INDEX_LIST,
  96	MGMT_OP_READ_CONFIG_INFO,
  97	MGMT_OP_SET_EXTERNAL_CONFIG,
  98	MGMT_OP_SET_PUBLIC_ADDRESS,
  99	MGMT_OP_START_SERVICE_DISCOVERY,
 100	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
 101	MGMT_OP_READ_EXT_INDEX_LIST,
 102	MGMT_OP_READ_ADV_FEATURES,
 103	MGMT_OP_ADD_ADVERTISING,
 104	MGMT_OP_REMOVE_ADVERTISING,
 105	MGMT_OP_GET_ADV_SIZE_INFO,
 106	MGMT_OP_START_LIMITED_DISCOVERY,
 107	MGMT_OP_READ_EXT_INFO,
 108	MGMT_OP_SET_APPEARANCE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 109};
 110
 111static const u16 mgmt_events[] = {
 112	MGMT_EV_CONTROLLER_ERROR,
 113	MGMT_EV_INDEX_ADDED,
 114	MGMT_EV_INDEX_REMOVED,
 115	MGMT_EV_NEW_SETTINGS,
 116	MGMT_EV_CLASS_OF_DEV_CHANGED,
 117	MGMT_EV_LOCAL_NAME_CHANGED,
 118	MGMT_EV_NEW_LINK_KEY,
 119	MGMT_EV_NEW_LONG_TERM_KEY,
 120	MGMT_EV_DEVICE_CONNECTED,
 121	MGMT_EV_DEVICE_DISCONNECTED,
 122	MGMT_EV_CONNECT_FAILED,
 123	MGMT_EV_PIN_CODE_REQUEST,
 124	MGMT_EV_USER_CONFIRM_REQUEST,
 125	MGMT_EV_USER_PASSKEY_REQUEST,
 126	MGMT_EV_AUTH_FAILED,
 127	MGMT_EV_DEVICE_FOUND,
 128	MGMT_EV_DISCOVERING,
 129	MGMT_EV_DEVICE_BLOCKED,
 130	MGMT_EV_DEVICE_UNBLOCKED,
 131	MGMT_EV_DEVICE_UNPAIRED,
 132	MGMT_EV_PASSKEY_NOTIFY,
 133	MGMT_EV_NEW_IRK,
 134	MGMT_EV_NEW_CSRK,
 135	MGMT_EV_DEVICE_ADDED,
 136	MGMT_EV_DEVICE_REMOVED,
 137	MGMT_EV_NEW_CONN_PARAM,
 138	MGMT_EV_UNCONF_INDEX_ADDED,
 139	MGMT_EV_UNCONF_INDEX_REMOVED,
 140	MGMT_EV_NEW_CONFIG_OPTIONS,
 141	MGMT_EV_EXT_INDEX_ADDED,
 142	MGMT_EV_EXT_INDEX_REMOVED,
 143	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
 144	MGMT_EV_ADVERTISING_ADDED,
 145	MGMT_EV_ADVERTISING_REMOVED,
 146	MGMT_EV_EXT_INFO_CHANGED,
 
 
 
 147};
 148
 149static const u16 mgmt_untrusted_commands[] = {
 150	MGMT_OP_READ_INDEX_LIST,
 151	MGMT_OP_READ_INFO,
 152	MGMT_OP_READ_UNCONF_INDEX_LIST,
 153	MGMT_OP_READ_CONFIG_INFO,
 154	MGMT_OP_READ_EXT_INDEX_LIST,
 155	MGMT_OP_READ_EXT_INFO,
 
 
 
 
 156};
 157
 158static const u16 mgmt_untrusted_events[] = {
 159	MGMT_EV_INDEX_ADDED,
 160	MGMT_EV_INDEX_REMOVED,
 161	MGMT_EV_NEW_SETTINGS,
 162	MGMT_EV_CLASS_OF_DEV_CHANGED,
 163	MGMT_EV_LOCAL_NAME_CHANGED,
 164	MGMT_EV_UNCONF_INDEX_ADDED,
 165	MGMT_EV_UNCONF_INDEX_REMOVED,
 166	MGMT_EV_NEW_CONFIG_OPTIONS,
 167	MGMT_EV_EXT_INDEX_ADDED,
 168	MGMT_EV_EXT_INDEX_REMOVED,
 169	MGMT_EV_EXT_INFO_CHANGED,
 
 
 
 170};
 171
 172#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
 173
 174#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
 175		 "\x00\x00\x00\x00\x00\x00\x00\x00"
 176
 177/* HCI to MGMT error code conversion table */
 178static u8 mgmt_status_table[] = {
 179	MGMT_STATUS_SUCCESS,
 180	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
 181	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
 182	MGMT_STATUS_FAILED,		/* Hardware Failure */
 183	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
 184	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
 185	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
 186	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
 187	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
 188	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
 189	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
 190	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
 191	MGMT_STATUS_BUSY,		/* Command Disallowed */
 192	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
 193	MGMT_STATUS_REJECTED,		/* Rejected Security */
 194	MGMT_STATUS_REJECTED,		/* Rejected Personal */
 195	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
 196	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
 197	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
 198	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
 199	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
 200	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
 201	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
 202	MGMT_STATUS_BUSY,		/* Repeated Attempts */
 203	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
 204	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
 205	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
 206	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
 207	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
 208	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
 209	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
 210	MGMT_STATUS_FAILED,		/* Unspecified Error */
 211	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
 212	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
 213	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
 214	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
 215	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
 216	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
 217	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
 218	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
 219	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
 220	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
 221	MGMT_STATUS_FAILED,		/* Transaction Collision */
 222	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
 223	MGMT_STATUS_REJECTED,		/* QoS Rejected */
 224	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
 225	MGMT_STATUS_REJECTED,		/* Insufficient Security */
 226	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
 227	MGMT_STATUS_BUSY,		/* Role Switch Pending */
 228	MGMT_STATUS_FAILED,		/* Slot Violation */
 229	MGMT_STATUS_FAILED,		/* Role Switch Failed */
 230	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
 231	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
 232	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
 233	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
 234	MGMT_STATUS_BUSY,		/* Controller Busy */
 235	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
 236	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
 237	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
 238	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
 239	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
 240};
 241
 242static u8 mgmt_status(u8 hci_status)
 243{
 244	if (hci_status < ARRAY_SIZE(mgmt_status_table))
 245		return mgmt_status_table[hci_status];
 246
 247	return MGMT_STATUS_FAILED;
 248}
 249
 250static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
 251			    u16 len, int flag)
 252{
 253	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
 254			       flag, NULL);
 255}
 256
 257static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
 258			      u16 len, int flag, struct sock *skip_sk)
 259{
 260	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
 261			       flag, skip_sk);
 262}
 263
 264static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
 265		      struct sock *skip_sk)
 266{
 267	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
 268			       HCI_SOCK_TRUSTED, skip_sk);
 269}
 270
 271static u8 le_addr_type(u8 mgmt_addr_type)
 272{
 273	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
 274		return ADDR_LE_DEV_PUBLIC;
 275	else
 276		return ADDR_LE_DEV_RANDOM;
 277}
 278
 279void mgmt_fill_version_info(void *ver)
 280{
 281	struct mgmt_rp_read_version *rp = ver;
 282
 283	rp->version = MGMT_VERSION;
 284	rp->revision = cpu_to_le16(MGMT_REVISION);
 285}
 286
 287static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
 288			u16 data_len)
 289{
 290	struct mgmt_rp_read_version rp;
 291
 292	BT_DBG("sock %p", sk);
 293
 294	mgmt_fill_version_info(&rp);
 295
 296	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
 297				 &rp, sizeof(rp));
 298}
 299
 300static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
 301			 u16 data_len)
 302{
 303	struct mgmt_rp_read_commands *rp;
 304	u16 num_commands, num_events;
 305	size_t rp_size;
 306	int i, err;
 307
 308	BT_DBG("sock %p", sk);
 309
 310	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
 311		num_commands = ARRAY_SIZE(mgmt_commands);
 312		num_events = ARRAY_SIZE(mgmt_events);
 313	} else {
 314		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
 315		num_events = ARRAY_SIZE(mgmt_untrusted_events);
 316	}
 317
 318	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
 319
 320	rp = kmalloc(rp_size, GFP_KERNEL);
 321	if (!rp)
 322		return -ENOMEM;
 323
 324	rp->num_commands = cpu_to_le16(num_commands);
 325	rp->num_events = cpu_to_le16(num_events);
 326
 327	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
 328		__le16 *opcode = rp->opcodes;
 329
 330		for (i = 0; i < num_commands; i++, opcode++)
 331			put_unaligned_le16(mgmt_commands[i], opcode);
 332
 333		for (i = 0; i < num_events; i++, opcode++)
 334			put_unaligned_le16(mgmt_events[i], opcode);
 335	} else {
 336		__le16 *opcode = rp->opcodes;
 337
 338		for (i = 0; i < num_commands; i++, opcode++)
 339			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
 340
 341		for (i = 0; i < num_events; i++, opcode++)
 342			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
 343	}
 344
 345	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
 346				rp, rp_size);
 347	kfree(rp);
 348
 349	return err;
 350}
 351
 352static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
 353			   u16 data_len)
 354{
 355	struct mgmt_rp_read_index_list *rp;
 356	struct hci_dev *d;
 357	size_t rp_len;
 358	u16 count;
 359	int err;
 360
 361	BT_DBG("sock %p", sk);
 362
 363	read_lock(&hci_dev_list_lock);
 364
 365	count = 0;
 366	list_for_each_entry(d, &hci_dev_list, list) {
 367		if (d->dev_type == HCI_PRIMARY &&
 368		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
 369			count++;
 370	}
 371
 372	rp_len = sizeof(*rp) + (2 * count);
 373	rp = kmalloc(rp_len, GFP_ATOMIC);
 374	if (!rp) {
 375		read_unlock(&hci_dev_list_lock);
 376		return -ENOMEM;
 377	}
 378
 379	count = 0;
 380	list_for_each_entry(d, &hci_dev_list, list) {
 381		if (hci_dev_test_flag(d, HCI_SETUP) ||
 382		    hci_dev_test_flag(d, HCI_CONFIG) ||
 383		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
 384			continue;
 385
 386		/* Devices marked as raw-only are neither configured
 387		 * nor unconfigured controllers.
 388		 */
 389		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
 390			continue;
 391
 392		if (d->dev_type == HCI_PRIMARY &&
 393		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
 394			rp->index[count++] = cpu_to_le16(d->id);
 395			BT_DBG("Added hci%u", d->id);
 396		}
 397	}
 398
 399	rp->num_controllers = cpu_to_le16(count);
 400	rp_len = sizeof(*rp) + (2 * count);
 401
 402	read_unlock(&hci_dev_list_lock);
 403
 404	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
 405				0, rp, rp_len);
 406
 407	kfree(rp);
 408
 409	return err;
 410}
 411
 412static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
 413				  void *data, u16 data_len)
 414{
 415	struct mgmt_rp_read_unconf_index_list *rp;
 416	struct hci_dev *d;
 417	size_t rp_len;
 418	u16 count;
 419	int err;
 420
 421	BT_DBG("sock %p", sk);
 422
 423	read_lock(&hci_dev_list_lock);
 424
 425	count = 0;
 426	list_for_each_entry(d, &hci_dev_list, list) {
 427		if (d->dev_type == HCI_PRIMARY &&
 428		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
 429			count++;
 430	}
 431
 432	rp_len = sizeof(*rp) + (2 * count);
 433	rp = kmalloc(rp_len, GFP_ATOMIC);
 434	if (!rp) {
 435		read_unlock(&hci_dev_list_lock);
 436		return -ENOMEM;
 437	}
 438
 439	count = 0;
 440	list_for_each_entry(d, &hci_dev_list, list) {
 441		if (hci_dev_test_flag(d, HCI_SETUP) ||
 442		    hci_dev_test_flag(d, HCI_CONFIG) ||
 443		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
 444			continue;
 445
 446		/* Devices marked as raw-only are neither configured
 447		 * nor unconfigured controllers.
 448		 */
 449		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
 450			continue;
 451
 452		if (d->dev_type == HCI_PRIMARY &&
 453		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
 454			rp->index[count++] = cpu_to_le16(d->id);
 455			BT_DBG("Added hci%u", d->id);
 456		}
 457	}
 458
 459	rp->num_controllers = cpu_to_le16(count);
 460	rp_len = sizeof(*rp) + (2 * count);
 461
 462	read_unlock(&hci_dev_list_lock);
 463
 464	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
 465				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
 466
 467	kfree(rp);
 468
 469	return err;
 470}
 471
 472static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
 473			       void *data, u16 data_len)
 474{
 475	struct mgmt_rp_read_ext_index_list *rp;
 476	struct hci_dev *d;
 477	u16 count;
 478	int err;
 479
 480	BT_DBG("sock %p", sk);
 481
 482	read_lock(&hci_dev_list_lock);
 483
 484	count = 0;
 485	list_for_each_entry(d, &hci_dev_list, list) {
 486		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
 487			count++;
 488	}
 489
 490	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
 491	if (!rp) {
 492		read_unlock(&hci_dev_list_lock);
 493		return -ENOMEM;
 494	}
 495
 496	count = 0;
 497	list_for_each_entry(d, &hci_dev_list, list) {
 498		if (hci_dev_test_flag(d, HCI_SETUP) ||
 499		    hci_dev_test_flag(d, HCI_CONFIG) ||
 500		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
 501			continue;
 502
 503		/* Devices marked as raw-only are neither configured
 504		 * nor unconfigured controllers.
 505		 */
 506		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
 507			continue;
 508
 509		if (d->dev_type == HCI_PRIMARY) {
 510			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
 511				rp->entry[count].type = 0x01;
 512			else
 513				rp->entry[count].type = 0x00;
 514		} else if (d->dev_type == HCI_AMP) {
 515			rp->entry[count].type = 0x02;
 516		} else {
 517			continue;
 518		}
 519
 520		rp->entry[count].bus = d->bus;
 521		rp->entry[count++].index = cpu_to_le16(d->id);
 522		BT_DBG("Added hci%u", d->id);
 523	}
 524
 525	rp->num_controllers = cpu_to_le16(count);
 526
 527	read_unlock(&hci_dev_list_lock);
 528
 529	/* If this command is called at least once, then all the
 530	 * default index and unconfigured index events are disabled
 531	 * and from now on only extended index events are used.
 532	 */
 533	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
 534	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
 535	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
 536
 537	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
 538				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
 539				struct_size(rp, entry, count));
 540
 541	kfree(rp);
 542
 543	return err;
 544}
 545
 546static bool is_configured(struct hci_dev *hdev)
 547{
 548	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
 549	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
 550		return false;
 551
 552	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
 553	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
 554	    !bacmp(&hdev->public_addr, BDADDR_ANY))
 555		return false;
 556
 557	return true;
 558}
 559
 560static __le32 get_missing_options(struct hci_dev *hdev)
 561{
 562	u32 options = 0;
 563
 564	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
 565	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
 566		options |= MGMT_OPTION_EXTERNAL_CONFIG;
 567
 568	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
 569	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
 570	    !bacmp(&hdev->public_addr, BDADDR_ANY))
 571		options |= MGMT_OPTION_PUBLIC_ADDRESS;
 572
 573	return cpu_to_le32(options);
 574}
 575
 576static int new_options(struct hci_dev *hdev, struct sock *skip)
 577{
 578	__le32 options = get_missing_options(hdev);
 579
 580	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
 581				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
 582}
 583
 584static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
 585{
 586	__le32 options = get_missing_options(hdev);
 587
 588	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
 589				 sizeof(options));
 590}
 591
 592static int read_config_info(struct sock *sk, struct hci_dev *hdev,
 593			    void *data, u16 data_len)
 594{
 595	struct mgmt_rp_read_config_info rp;
 596	u32 options = 0;
 597
 598	BT_DBG("sock %p %s", sk, hdev->name);
 599
 600	hci_dev_lock(hdev);
 601
 602	memset(&rp, 0, sizeof(rp));
 603	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
 604
 605	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
 606		options |= MGMT_OPTION_EXTERNAL_CONFIG;
 607
 608	if (hdev->set_bdaddr)
 609		options |= MGMT_OPTION_PUBLIC_ADDRESS;
 610
 611	rp.supported_options = cpu_to_le32(options);
 612	rp.missing_options = get_missing_options(hdev);
 613
 614	hci_dev_unlock(hdev);
 615
 616	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
 617				 &rp, sizeof(rp));
 618}
 619
 620static u32 get_supported_phys(struct hci_dev *hdev)
 621{
 622	u32 supported_phys = 0;
 623
 624	if (lmp_bredr_capable(hdev)) {
 625		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
 626
 627		if (hdev->features[0][0] & LMP_3SLOT)
 628			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
 629
 630		if (hdev->features[0][0] & LMP_5SLOT)
 631			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
 632
 633		if (lmp_edr_2m_capable(hdev)) {
 634			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
 635
 636			if (lmp_edr_3slot_capable(hdev))
 637				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
 638
 639			if (lmp_edr_5slot_capable(hdev))
 640				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
 641
 642			if (lmp_edr_3m_capable(hdev)) {
 643				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
 644
 645				if (lmp_edr_3slot_capable(hdev))
 646					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
 647
 648				if (lmp_edr_5slot_capable(hdev))
 649					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
 650			}
 651		}
 652	}
 653
 654	if (lmp_le_capable(hdev)) {
 655		supported_phys |= MGMT_PHY_LE_1M_TX;
 656		supported_phys |= MGMT_PHY_LE_1M_RX;
 657
 658		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
 659			supported_phys |= MGMT_PHY_LE_2M_TX;
 660			supported_phys |= MGMT_PHY_LE_2M_RX;
 661		}
 662
 663		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
 664			supported_phys |= MGMT_PHY_LE_CODED_TX;
 665			supported_phys |= MGMT_PHY_LE_CODED_RX;
 666		}
 667	}
 668
 669	return supported_phys;
 670}
 671
 672static u32 get_selected_phys(struct hci_dev *hdev)
 673{
 674	u32 selected_phys = 0;
 675
 676	if (lmp_bredr_capable(hdev)) {
 677		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
 678
 679		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
 680			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
 681
 682		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
 683			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
 684
 685		if (lmp_edr_2m_capable(hdev)) {
 686			if (!(hdev->pkt_type & HCI_2DH1))
 687				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
 688
 689			if (lmp_edr_3slot_capable(hdev) &&
 690			    !(hdev->pkt_type & HCI_2DH3))
 691				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
 692
 693			if (lmp_edr_5slot_capable(hdev) &&
 694			    !(hdev->pkt_type & HCI_2DH5))
 695				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
 696
 697			if (lmp_edr_3m_capable(hdev)) {
 698				if (!(hdev->pkt_type & HCI_3DH1))
 699					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
 700
 701				if (lmp_edr_3slot_capable(hdev) &&
 702				    !(hdev->pkt_type & HCI_3DH3))
 703					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
 704
 705				if (lmp_edr_5slot_capable(hdev) &&
 706				    !(hdev->pkt_type & HCI_3DH5))
 707					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
 708			}
 709		}
 710	}
 711
 712	if (lmp_le_capable(hdev)) {
 713		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
 714			selected_phys |= MGMT_PHY_LE_1M_TX;
 715
 716		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
 717			selected_phys |= MGMT_PHY_LE_1M_RX;
 718
 719		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
 720			selected_phys |= MGMT_PHY_LE_2M_TX;
 721
 722		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
 723			selected_phys |= MGMT_PHY_LE_2M_RX;
 724
 725		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
 726			selected_phys |= MGMT_PHY_LE_CODED_TX;
 727
 728		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
 729			selected_phys |= MGMT_PHY_LE_CODED_RX;
 730	}
 731
 732	return selected_phys;
 733}
 734
 735static u32 get_configurable_phys(struct hci_dev *hdev)
 736{
 737	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
 738		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
 739}
 740
 741static u32 get_supported_settings(struct hci_dev *hdev)
 742{
 743	u32 settings = 0;
 744
 745	settings |= MGMT_SETTING_POWERED;
 746	settings |= MGMT_SETTING_BONDABLE;
 747	settings |= MGMT_SETTING_DEBUG_KEYS;
 748	settings |= MGMT_SETTING_CONNECTABLE;
 749	settings |= MGMT_SETTING_DISCOVERABLE;
 750
 751	if (lmp_bredr_capable(hdev)) {
 752		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
 753			settings |= MGMT_SETTING_FAST_CONNECTABLE;
 754		settings |= MGMT_SETTING_BREDR;
 755		settings |= MGMT_SETTING_LINK_SECURITY;
 756
 757		if (lmp_ssp_capable(hdev)) {
 758			settings |= MGMT_SETTING_SSP;
 759			settings |= MGMT_SETTING_HS;
 760		}
 761
 762		if (lmp_sc_capable(hdev))
 763			settings |= MGMT_SETTING_SECURE_CONN;
 
 
 
 
 764	}
 765
 766	if (lmp_le_capable(hdev)) {
 767		settings |= MGMT_SETTING_LE;
 768		settings |= MGMT_SETTING_ADVERTISING;
 769		settings |= MGMT_SETTING_SECURE_CONN;
 770		settings |= MGMT_SETTING_PRIVACY;
 771		settings |= MGMT_SETTING_STATIC_ADDRESS;
 
 
 
 
 
 
 772	}
 773
 774	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
 775	    hdev->set_bdaddr)
 776		settings |= MGMT_SETTING_CONFIGURATION;
 777
 778	settings |= MGMT_SETTING_PHY_CONFIGURATION;
 779
 780	return settings;
 781}
 782
 783static u32 get_current_settings(struct hci_dev *hdev)
 784{
 785	u32 settings = 0;
 786
 787	if (hdev_is_powered(hdev))
 788		settings |= MGMT_SETTING_POWERED;
 789
 790	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
 791		settings |= MGMT_SETTING_CONNECTABLE;
 792
 793	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
 794		settings |= MGMT_SETTING_FAST_CONNECTABLE;
 795
 796	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
 797		settings |= MGMT_SETTING_DISCOVERABLE;
 798
 799	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
 800		settings |= MGMT_SETTING_BONDABLE;
 801
 802	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 803		settings |= MGMT_SETTING_BREDR;
 804
 805	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 806		settings |= MGMT_SETTING_LE;
 807
 808	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
 809		settings |= MGMT_SETTING_LINK_SECURITY;
 810
 811	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 812		settings |= MGMT_SETTING_SSP;
 813
 814	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
 815		settings |= MGMT_SETTING_HS;
 816
 817	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
 818		settings |= MGMT_SETTING_ADVERTISING;
 819
 820	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
 821		settings |= MGMT_SETTING_SECURE_CONN;
 822
 823	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
 824		settings |= MGMT_SETTING_DEBUG_KEYS;
 825
 826	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
 827		settings |= MGMT_SETTING_PRIVACY;
 828
 829	/* The current setting for static address has two purposes. The
 830	 * first is to indicate if the static address will be used and
 831	 * the second is to indicate if it is actually set.
 832	 *
 833	 * This means if the static address is not configured, this flag
 834	 * will never be set. If the address is configured, then if the
 835	 * address is actually used decides if the flag is set or not.
 836	 *
 837	 * For single mode LE only controllers and dual-mode controllers
 838	 * with BR/EDR disabled, the existence of the static address will
 839	 * be evaluated.
 840	 */
 841	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
 842	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
 843	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
 844		if (bacmp(&hdev->static_addr, BDADDR_ANY))
 845			settings |= MGMT_SETTING_STATIC_ADDRESS;
 846	}
 847
 
 
 
 848	return settings;
 849}
 850
 851static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
 852{
 853	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
 854}
 855
 856static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
 857						  struct hci_dev *hdev,
 858						  const void *data)
 859{
 860	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
 861}
 862
 863u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
 864{
 865	struct mgmt_pending_cmd *cmd;
 866
 867	/* If there's a pending mgmt command the flags will not yet have
 868	 * their final values, so check for this first.
 869	 */
 870	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
 871	if (cmd) {
 872		struct mgmt_mode *cp = cmd->param;
 873		if (cp->val == 0x01)
 874			return LE_AD_GENERAL;
 875		else if (cp->val == 0x02)
 876			return LE_AD_LIMITED;
 877	} else {
 878		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
 879			return LE_AD_LIMITED;
 880		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
 881			return LE_AD_GENERAL;
 882	}
 883
 884	return 0;
 885}
 886
 887bool mgmt_get_connectable(struct hci_dev *hdev)
 888{
 889	struct mgmt_pending_cmd *cmd;
 890
 891	/* If there's a pending mgmt command the flag will not yet have
 892	 * it's final value, so check for this first.
 893	 */
 894	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
 895	if (cmd) {
 896		struct mgmt_mode *cp = cmd->param;
 897
 898		return cp->val;
 899	}
 900
 901	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
 902}
 903
 904static void service_cache_off(struct work_struct *work)
 905{
 906	struct hci_dev *hdev = container_of(work, struct hci_dev,
 907					    service_cache.work);
 908	struct hci_request req;
 909
 910	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
 911		return;
 912
 913	hci_req_init(&req, hdev);
 914
 915	hci_dev_lock(hdev);
 916
 917	__hci_req_update_eir(&req);
 918	__hci_req_update_class(&req);
 919
 920	hci_dev_unlock(hdev);
 921
 922	hci_req_run(&req, NULL);
 923}
 924
 925static void rpa_expired(struct work_struct *work)
 926{
 927	struct hci_dev *hdev = container_of(work, struct hci_dev,
 928					    rpa_expired.work);
 929	struct hci_request req;
 930
 931	BT_DBG("");
 932
 933	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 934
 935	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
 936		return;
 937
 938	/* The generation of a new RPA and programming it into the
 939	 * controller happens in the hci_req_enable_advertising()
 940	 * function.
 941	 */
 942	hci_req_init(&req, hdev);
 943	if (ext_adv_capable(hdev))
 944		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
 945	else
 946		__hci_req_enable_advertising(&req);
 947	hci_req_run(&req, NULL);
 948}
 949
 950static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
 951{
 952	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
 953		return;
 954
 955	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
 956	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
 957
 958	/* Non-mgmt controlled devices get this bit set
 959	 * implicitly so that pairing works for them, however
 960	 * for mgmt we require user-space to explicitly enable
 961	 * it
 962	 */
 963	hci_dev_clear_flag(hdev, HCI_BONDABLE);
 964}
 965
 966static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
 967				void *data, u16 data_len)
 968{
 969	struct mgmt_rp_read_info rp;
 970
 971	BT_DBG("sock %p %s", sk, hdev->name);
 972
 973	hci_dev_lock(hdev);
 974
 975	memset(&rp, 0, sizeof(rp));
 976
 977	bacpy(&rp.bdaddr, &hdev->bdaddr);
 978
 979	rp.version = hdev->hci_ver;
 980	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
 981
 982	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
 983	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
 984
 985	memcpy(rp.dev_class, hdev->dev_class, 3);
 986
 987	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
 988	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
 989
 990	hci_dev_unlock(hdev);
 991
 992	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
 993				 sizeof(rp));
 994}
 995
 996static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
 997{
 998	u16 eir_len = 0;
 999	size_t name_len;
1000
1001	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1002		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1003					  hdev->dev_class, 3);
1004
1005	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1006		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1007					  hdev->appearance);
1008
1009	name_len = strlen(hdev->dev_name);
1010	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1011				  hdev->dev_name, name_len);
1012
1013	name_len = strlen(hdev->short_name);
1014	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1015				  hdev->short_name, name_len);
1016
1017	return eir_len;
1018}
1019
1020static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1021				    void *data, u16 data_len)
1022{
1023	char buf[512];
1024	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1025	u16 eir_len;
1026
1027	BT_DBG("sock %p %s", sk, hdev->name);
1028
1029	memset(&buf, 0, sizeof(buf));
1030
1031	hci_dev_lock(hdev);
1032
1033	bacpy(&rp->bdaddr, &hdev->bdaddr);
1034
1035	rp->version = hdev->hci_ver;
1036	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1037
1038	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1039	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1040
1041
1042	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1043	rp->eir_len = cpu_to_le16(eir_len);
1044
1045	hci_dev_unlock(hdev);
1046
1047	/* If this command is called at least once, then the events
1048	 * for class of device and local name changes are disabled
1049	 * and only the new extended controller information event
1050	 * is used.
1051	 */
1052	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1053	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1054	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1055
1056	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1057				 sizeof(*rp) + eir_len);
1058}
1059
1060static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1061{
1062	char buf[512];
1063	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1064	u16 eir_len;
1065
1066	memset(buf, 0, sizeof(buf));
1067
1068	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1069	ev->eir_len = cpu_to_le16(eir_len);
1070
1071	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1072				  sizeof(*ev) + eir_len,
1073				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1074}
1075
1076static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1077{
1078	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1079
1080	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1081				 sizeof(settings));
1082}
1083
1084static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1085{
1086	BT_DBG("%s status 0x%02x", hdev->name, status);
1087
1088	if (hci_conn_count(hdev) == 0) {
1089		cancel_delayed_work(&hdev->power_off);
1090		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1091	}
1092}
1093
1094void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1095{
1096	struct mgmt_ev_advertising_added ev;
1097
1098	ev.instance = instance;
1099
1100	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1101}
1102
1103void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1104			      u8 instance)
1105{
1106	struct mgmt_ev_advertising_removed ev;
1107
1108	ev.instance = instance;
1109
1110	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1111}
1112
1113static void cancel_adv_timeout(struct hci_dev *hdev)
1114{
1115	if (hdev->adv_instance_timeout) {
1116		hdev->adv_instance_timeout = 0;
1117		cancel_delayed_work(&hdev->adv_instance_expire);
1118	}
1119}
1120
1121static int clean_up_hci_state(struct hci_dev *hdev)
1122{
1123	struct hci_request req;
1124	struct hci_conn *conn;
1125	bool discov_stopped;
1126	int err;
1127
1128	hci_req_init(&req, hdev);
1129
1130	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1131	    test_bit(HCI_PSCAN, &hdev->flags)) {
1132		u8 scan = 0x00;
1133		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1134	}
1135
1136	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1137
1138	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1139		__hci_req_disable_advertising(&req);
1140
1141	discov_stopped = hci_req_stop_discovery(&req);
1142
1143	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1144		/* 0x15 == Terminated due to Power Off */
1145		__hci_abort_conn(&req, conn, 0x15);
1146	}
1147
1148	err = hci_req_run(&req, clean_up_hci_complete);
1149	if (!err && discov_stopped)
1150		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1151
1152	return err;
1153}
1154
1155static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1156		       u16 len)
1157{
1158	struct mgmt_mode *cp = data;
1159	struct mgmt_pending_cmd *cmd;
1160	int err;
1161
1162	BT_DBG("request for %s", hdev->name);
1163
1164	if (cp->val != 0x00 && cp->val != 0x01)
1165		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1166				       MGMT_STATUS_INVALID_PARAMS);
1167
1168	hci_dev_lock(hdev);
1169
1170	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1171		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1172				      MGMT_STATUS_BUSY);
1173		goto failed;
1174	}
1175
1176	if (!!cp->val == hdev_is_powered(hdev)) {
1177		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1178		goto failed;
1179	}
1180
1181	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1182	if (!cmd) {
1183		err = -ENOMEM;
1184		goto failed;
1185	}
1186
1187	if (cp->val) {
1188		queue_work(hdev->req_workqueue, &hdev->power_on);
1189		err = 0;
1190	} else {
1191		/* Disconnect connections, stop scans, etc */
1192		err = clean_up_hci_state(hdev);
1193		if (!err)
1194			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1195					   HCI_POWER_OFF_TIMEOUT);
1196
1197		/* ENODATA means there were no HCI commands queued */
1198		if (err == -ENODATA) {
1199			cancel_delayed_work(&hdev->power_off);
1200			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1201			err = 0;
1202		}
1203	}
1204
1205failed:
1206	hci_dev_unlock(hdev);
1207	return err;
1208}
1209
1210static int new_settings(struct hci_dev *hdev, struct sock *skip)
1211{
1212	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1213
1214	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1215				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1216}
1217
1218int mgmt_new_settings(struct hci_dev *hdev)
1219{
1220	return new_settings(hdev, NULL);
1221}
1222
1223struct cmd_lookup {
1224	struct sock *sk;
1225	struct hci_dev *hdev;
1226	u8 mgmt_status;
1227};
1228
1229static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1230{
1231	struct cmd_lookup *match = data;
1232
1233	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1234
1235	list_del(&cmd->list);
1236
1237	if (match->sk == NULL) {
1238		match->sk = cmd->sk;
1239		sock_hold(match->sk);
1240	}
1241
1242	mgmt_pending_free(cmd);
1243}
1244
1245static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1246{
1247	u8 *status = data;
1248
1249	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1250	mgmt_pending_remove(cmd);
1251}
1252
1253static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1254{
1255	if (cmd->cmd_complete) {
1256		u8 *status = data;
1257
1258		cmd->cmd_complete(cmd, *status);
1259		mgmt_pending_remove(cmd);
1260
1261		return;
1262	}
1263
1264	cmd_status_rsp(cmd, data);
1265}
1266
1267static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1268{
1269	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1270				 cmd->param, cmd->param_len);
1271}
1272
1273static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1274{
1275	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1276				 cmd->param, sizeof(struct mgmt_addr_info));
1277}
1278
1279static u8 mgmt_bredr_support(struct hci_dev *hdev)
1280{
1281	if (!lmp_bredr_capable(hdev))
1282		return MGMT_STATUS_NOT_SUPPORTED;
1283	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1284		return MGMT_STATUS_REJECTED;
1285	else
1286		return MGMT_STATUS_SUCCESS;
1287}
1288
1289static u8 mgmt_le_support(struct hci_dev *hdev)
1290{
1291	if (!lmp_le_capable(hdev))
1292		return MGMT_STATUS_NOT_SUPPORTED;
1293	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1294		return MGMT_STATUS_REJECTED;
1295	else
1296		return MGMT_STATUS_SUCCESS;
1297}
1298
1299void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1300{
1301	struct mgmt_pending_cmd *cmd;
1302
1303	BT_DBG("status 0x%02x", status);
1304
1305	hci_dev_lock(hdev);
1306
1307	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1308	if (!cmd)
1309		goto unlock;
1310
1311	if (status) {
1312		u8 mgmt_err = mgmt_status(status);
1313		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1314		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1315		goto remove_cmd;
1316	}
1317
1318	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1319	    hdev->discov_timeout > 0) {
1320		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1321		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1322	}
1323
1324	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1325	new_settings(hdev, cmd->sk);
1326
1327remove_cmd:
1328	mgmt_pending_remove(cmd);
1329
1330unlock:
1331	hci_dev_unlock(hdev);
1332}
1333
1334static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1335			    u16 len)
1336{
1337	struct mgmt_cp_set_discoverable *cp = data;
1338	struct mgmt_pending_cmd *cmd;
1339	u16 timeout;
1340	int err;
1341
1342	BT_DBG("request for %s", hdev->name);
1343
1344	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1345	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1346		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1347				       MGMT_STATUS_REJECTED);
1348
1349	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1350		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1351				       MGMT_STATUS_INVALID_PARAMS);
1352
1353	timeout = __le16_to_cpu(cp->timeout);
1354
1355	/* Disabling discoverable requires that no timeout is set,
1356	 * and enabling limited discoverable requires a timeout.
1357	 */
1358	if ((cp->val == 0x00 && timeout > 0) ||
1359	    (cp->val == 0x02 && timeout == 0))
1360		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1361				       MGMT_STATUS_INVALID_PARAMS);
1362
1363	hci_dev_lock(hdev);
1364
1365	if (!hdev_is_powered(hdev) && timeout > 0) {
1366		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1367				      MGMT_STATUS_NOT_POWERED);
1368		goto failed;
1369	}
1370
1371	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1372	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1373		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1374				      MGMT_STATUS_BUSY);
1375		goto failed;
1376	}
1377
1378	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1379		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1380				      MGMT_STATUS_REJECTED);
1381		goto failed;
1382	}
1383
 
 
 
 
 
 
1384	if (!hdev_is_powered(hdev)) {
1385		bool changed = false;
1386
1387		/* Setting limited discoverable when powered off is
1388		 * not a valid operation since it requires a timeout
1389		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1390		 */
1391		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1392			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1393			changed = true;
1394		}
1395
1396		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1397		if (err < 0)
1398			goto failed;
1399
1400		if (changed)
1401			err = new_settings(hdev, sk);
1402
1403		goto failed;
1404	}
1405
1406	/* If the current mode is the same, then just update the timeout
1407	 * value with the new value. And if only the timeout gets updated,
1408	 * then no need for any HCI transactions.
1409	 */
1410	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1412						   HCI_LIMITED_DISCOVERABLE)) {
1413		cancel_delayed_work(&hdev->discov_off);
1414		hdev->discov_timeout = timeout;
1415
1416		if (cp->val && hdev->discov_timeout > 0) {
1417			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1418			queue_delayed_work(hdev->req_workqueue,
1419					   &hdev->discov_off, to);
1420		}
1421
1422		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1423		goto failed;
1424	}
1425
1426	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1427	if (!cmd) {
1428		err = -ENOMEM;
1429		goto failed;
1430	}
1431
1432	/* Cancel any potential discoverable timeout that might be
1433	 * still active and store new timeout value. The arming of
1434	 * the timeout happens in the complete handler.
1435	 */
1436	cancel_delayed_work(&hdev->discov_off);
1437	hdev->discov_timeout = timeout;
1438
1439	if (cp->val)
1440		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1441	else
1442		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1443
1444	/* Limited discoverable mode */
1445	if (cp->val == 0x02)
1446		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1447	else
1448		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1449
1450	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1451	err = 0;
1452
1453failed:
1454	hci_dev_unlock(hdev);
1455	return err;
1456}
1457
1458void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1459{
1460	struct mgmt_pending_cmd *cmd;
1461
1462	BT_DBG("status 0x%02x", status);
1463
1464	hci_dev_lock(hdev);
1465
1466	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1467	if (!cmd)
1468		goto unlock;
1469
1470	if (status) {
1471		u8 mgmt_err = mgmt_status(status);
1472		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1473		goto remove_cmd;
1474	}
1475
1476	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1477	new_settings(hdev, cmd->sk);
1478
1479remove_cmd:
1480	mgmt_pending_remove(cmd);
1481
1482unlock:
1483	hci_dev_unlock(hdev);
1484}
1485
1486static int set_connectable_update_settings(struct hci_dev *hdev,
1487					   struct sock *sk, u8 val)
1488{
1489	bool changed = false;
1490	int err;
1491
1492	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1493		changed = true;
1494
1495	if (val) {
1496		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1497	} else {
1498		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1499		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1500	}
1501
1502	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1503	if (err < 0)
1504		return err;
1505
1506	if (changed) {
1507		hci_req_update_scan(hdev);
1508		hci_update_background_scan(hdev);
1509		return new_settings(hdev, sk);
1510	}
1511
1512	return 0;
1513}
1514
1515static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1516			   u16 len)
1517{
1518	struct mgmt_mode *cp = data;
1519	struct mgmt_pending_cmd *cmd;
1520	int err;
1521
1522	BT_DBG("request for %s", hdev->name);
1523
1524	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1525	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1526		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1527				       MGMT_STATUS_REJECTED);
1528
1529	if (cp->val != 0x00 && cp->val != 0x01)
1530		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1531				       MGMT_STATUS_INVALID_PARAMS);
1532
1533	hci_dev_lock(hdev);
1534
1535	if (!hdev_is_powered(hdev)) {
1536		err = set_connectable_update_settings(hdev, sk, cp->val);
1537		goto failed;
1538	}
1539
1540	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1541	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1542		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1543				      MGMT_STATUS_BUSY);
1544		goto failed;
1545	}
1546
1547	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1548	if (!cmd) {
1549		err = -ENOMEM;
1550		goto failed;
1551	}
1552
1553	if (cp->val) {
1554		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1555	} else {
1556		if (hdev->discov_timeout > 0)
1557			cancel_delayed_work(&hdev->discov_off);
1558
1559		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1560		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1562	}
1563
1564	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1565	err = 0;
1566
1567failed:
1568	hci_dev_unlock(hdev);
1569	return err;
1570}
1571
1572static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1573			u16 len)
1574{
1575	struct mgmt_mode *cp = data;
1576	bool changed;
1577	int err;
1578
1579	BT_DBG("request for %s", hdev->name);
1580
1581	if (cp->val != 0x00 && cp->val != 0x01)
1582		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1583				       MGMT_STATUS_INVALID_PARAMS);
1584
1585	hci_dev_lock(hdev);
1586
1587	if (cp->val)
1588		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1589	else
1590		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1591
1592	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1593	if (err < 0)
1594		goto unlock;
1595
1596	if (changed) {
1597		/* In limited privacy mode the change of bondable mode
1598		 * may affect the local advertising address.
1599		 */
1600		if (hdev_is_powered(hdev) &&
1601		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1602		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1603		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1604			queue_work(hdev->req_workqueue,
1605				   &hdev->discoverable_update);
1606
1607		err = new_settings(hdev, sk);
1608	}
1609
1610unlock:
1611	hci_dev_unlock(hdev);
1612	return err;
1613}
1614
1615static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1616			     u16 len)
1617{
1618	struct mgmt_mode *cp = data;
1619	struct mgmt_pending_cmd *cmd;
1620	u8 val, status;
1621	int err;
1622
1623	BT_DBG("request for %s", hdev->name);
1624
1625	status = mgmt_bredr_support(hdev);
1626	if (status)
1627		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1628				       status);
1629
1630	if (cp->val != 0x00 && cp->val != 0x01)
1631		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1632				       MGMT_STATUS_INVALID_PARAMS);
1633
1634	hci_dev_lock(hdev);
1635
1636	if (!hdev_is_powered(hdev)) {
1637		bool changed = false;
1638
1639		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1640			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1641			changed = true;
1642		}
1643
1644		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1645		if (err < 0)
1646			goto failed;
1647
1648		if (changed)
1649			err = new_settings(hdev, sk);
1650
1651		goto failed;
1652	}
1653
1654	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1655		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1656				      MGMT_STATUS_BUSY);
1657		goto failed;
1658	}
1659
1660	val = !!cp->val;
1661
1662	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1663		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1664		goto failed;
1665	}
1666
1667	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1668	if (!cmd) {
1669		err = -ENOMEM;
1670		goto failed;
1671	}
1672
1673	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1674	if (err < 0) {
1675		mgmt_pending_remove(cmd);
1676		goto failed;
1677	}
1678
1679failed:
1680	hci_dev_unlock(hdev);
1681	return err;
1682}
1683
1684static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1685{
1686	struct mgmt_mode *cp = data;
1687	struct mgmt_pending_cmd *cmd;
1688	u8 status;
1689	int err;
1690
1691	BT_DBG("request for %s", hdev->name);
1692
1693	status = mgmt_bredr_support(hdev);
1694	if (status)
1695		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1696
1697	if (!lmp_ssp_capable(hdev))
1698		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1699				       MGMT_STATUS_NOT_SUPPORTED);
1700
1701	if (cp->val != 0x00 && cp->val != 0x01)
1702		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1703				       MGMT_STATUS_INVALID_PARAMS);
1704
1705	hci_dev_lock(hdev);
1706
1707	if (!hdev_is_powered(hdev)) {
1708		bool changed;
1709
1710		if (cp->val) {
1711			changed = !hci_dev_test_and_set_flag(hdev,
1712							     HCI_SSP_ENABLED);
1713		} else {
1714			changed = hci_dev_test_and_clear_flag(hdev,
1715							      HCI_SSP_ENABLED);
1716			if (!changed)
1717				changed = hci_dev_test_and_clear_flag(hdev,
1718								      HCI_HS_ENABLED);
1719			else
1720				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1721		}
1722
1723		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1724		if (err < 0)
1725			goto failed;
1726
1727		if (changed)
1728			err = new_settings(hdev, sk);
1729
1730		goto failed;
1731	}
1732
1733	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1734		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1735				      MGMT_STATUS_BUSY);
1736		goto failed;
1737	}
1738
1739	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1740		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1741		goto failed;
1742	}
1743
1744	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1745	if (!cmd) {
1746		err = -ENOMEM;
1747		goto failed;
1748	}
1749
1750	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1751		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1752			     sizeof(cp->val), &cp->val);
1753
1754	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1755	if (err < 0) {
1756		mgmt_pending_remove(cmd);
1757		goto failed;
1758	}
1759
1760failed:
1761	hci_dev_unlock(hdev);
1762	return err;
1763}
1764
1765static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1766{
1767	struct mgmt_mode *cp = data;
1768	bool changed;
1769	u8 status;
1770	int err;
1771
1772	BT_DBG("request for %s", hdev->name);
1773
1774	status = mgmt_bredr_support(hdev);
1775	if (status)
1776		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1777
1778	if (!lmp_ssp_capable(hdev))
1779		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1780				       MGMT_STATUS_NOT_SUPPORTED);
1781
1782	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1783		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1784				       MGMT_STATUS_REJECTED);
1785
1786	if (cp->val != 0x00 && cp->val != 0x01)
1787		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1788				       MGMT_STATUS_INVALID_PARAMS);
1789
1790	hci_dev_lock(hdev);
1791
1792	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1793		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1794				      MGMT_STATUS_BUSY);
1795		goto unlock;
1796	}
1797
1798	if (cp->val) {
1799		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1800	} else {
1801		if (hdev_is_powered(hdev)) {
1802			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1803					      MGMT_STATUS_REJECTED);
1804			goto unlock;
1805		}
1806
1807		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1808	}
1809
1810	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1811	if (err < 0)
1812		goto unlock;
1813
1814	if (changed)
1815		err = new_settings(hdev, sk);
1816
1817unlock:
1818	hci_dev_unlock(hdev);
1819	return err;
1820}
1821
1822static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1823{
1824	struct cmd_lookup match = { NULL, hdev };
1825
1826	hci_dev_lock(hdev);
1827
1828	if (status) {
1829		u8 mgmt_err = mgmt_status(status);
1830
1831		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1832				     &mgmt_err);
1833		goto unlock;
1834	}
1835
1836	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1837
1838	new_settings(hdev, match.sk);
1839
1840	if (match.sk)
1841		sock_put(match.sk);
1842
1843	/* Make sure the controller has a good default for
1844	 * advertising data. Restrict the update to when LE
1845	 * has actually been enabled. During power on, the
1846	 * update in powered_update_hci will take care of it.
1847	 */
1848	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1849		struct hci_request req;
1850		hci_req_init(&req, hdev);
1851		if (ext_adv_capable(hdev)) {
1852			int err;
1853
1854			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1855			if (!err)
1856				__hci_req_update_scan_rsp_data(&req, 0x00);
1857		} else {
1858			__hci_req_update_adv_data(&req, 0x00);
1859			__hci_req_update_scan_rsp_data(&req, 0x00);
1860		}
1861		hci_req_run(&req, NULL);
1862		hci_update_background_scan(hdev);
1863	}
1864
1865unlock:
1866	hci_dev_unlock(hdev);
1867}
1868
1869static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1870{
1871	struct mgmt_mode *cp = data;
1872	struct hci_cp_write_le_host_supported hci_cp;
1873	struct mgmt_pending_cmd *cmd;
1874	struct hci_request req;
1875	int err;
1876	u8 val, enabled;
1877
1878	BT_DBG("request for %s", hdev->name);
1879
1880	if (!lmp_le_capable(hdev))
1881		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1882				       MGMT_STATUS_NOT_SUPPORTED);
1883
1884	if (cp->val != 0x00 && cp->val != 0x01)
1885		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1886				       MGMT_STATUS_INVALID_PARAMS);
1887
1888	/* Bluetooth single mode LE only controllers or dual-mode
1889	 * controllers configured as LE only devices, do not allow
1890	 * switching LE off. These have either LE enabled explicitly
1891	 * or BR/EDR has been previously switched off.
1892	 *
1893	 * When trying to enable an already enabled LE, then gracefully
1894	 * send a positive response. Trying to disable it however will
1895	 * result into rejection.
1896	 */
1897	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1898		if (cp->val == 0x01)
1899			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1900
1901		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1902				       MGMT_STATUS_REJECTED);
1903	}
1904
1905	hci_dev_lock(hdev);
1906
1907	val = !!cp->val;
1908	enabled = lmp_host_le_capable(hdev);
1909
1910	if (!val)
1911		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1912
1913	if (!hdev_is_powered(hdev) || val == enabled) {
1914		bool changed = false;
1915
1916		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1917			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1918			changed = true;
1919		}
1920
1921		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1922			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1923			changed = true;
1924		}
1925
1926		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1927		if (err < 0)
1928			goto unlock;
1929
1930		if (changed)
1931			err = new_settings(hdev, sk);
1932
1933		goto unlock;
1934	}
1935
1936	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1937	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1938		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1939				      MGMT_STATUS_BUSY);
1940		goto unlock;
1941	}
1942
1943	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1944	if (!cmd) {
1945		err = -ENOMEM;
1946		goto unlock;
1947	}
1948
1949	hci_req_init(&req, hdev);
1950
1951	memset(&hci_cp, 0, sizeof(hci_cp));
1952
1953	if (val) {
1954		hci_cp.le = val;
1955		hci_cp.simul = 0x00;
1956	} else {
1957		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1958			__hci_req_disable_advertising(&req);
1959
1960		if (ext_adv_capable(hdev))
1961			__hci_req_clear_ext_adv_sets(&req);
1962	}
1963
1964	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1965		    &hci_cp);
1966
1967	err = hci_req_run(&req, le_enable_complete);
1968	if (err < 0)
1969		mgmt_pending_remove(cmd);
1970
1971unlock:
1972	hci_dev_unlock(hdev);
1973	return err;
1974}
1975
1976/* This is a helper function to test for pending mgmt commands that can
1977 * cause CoD or EIR HCI commands. We can only allow one such pending
1978 * mgmt command at a time since otherwise we cannot easily track what
1979 * the current values are, will be, and based on that calculate if a new
1980 * HCI command needs to be sent and if yes with what value.
1981 */
1982static bool pending_eir_or_class(struct hci_dev *hdev)
1983{
1984	struct mgmt_pending_cmd *cmd;
1985
1986	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1987		switch (cmd->opcode) {
1988		case MGMT_OP_ADD_UUID:
1989		case MGMT_OP_REMOVE_UUID:
1990		case MGMT_OP_SET_DEV_CLASS:
1991		case MGMT_OP_SET_POWERED:
1992			return true;
1993		}
1994	}
1995
1996	return false;
1997}
1998
1999static const u8 bluetooth_base_uuid[] = {
2000			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2001			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2002};
2003
2004static u8 get_uuid_size(const u8 *uuid)
2005{
2006	u32 val;
2007
2008	if (memcmp(uuid, bluetooth_base_uuid, 12))
2009		return 128;
2010
2011	val = get_unaligned_le32(&uuid[12]);
2012	if (val > 0xffff)
2013		return 32;
2014
2015	return 16;
2016}
2017
2018static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2019{
2020	struct mgmt_pending_cmd *cmd;
2021
2022	hci_dev_lock(hdev);
2023
2024	cmd = pending_find(mgmt_op, hdev);
2025	if (!cmd)
2026		goto unlock;
2027
2028	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2029			  mgmt_status(status), hdev->dev_class, 3);
2030
2031	mgmt_pending_remove(cmd);
2032
2033unlock:
2034	hci_dev_unlock(hdev);
2035}
2036
2037static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2038{
2039	BT_DBG("status 0x%02x", status);
2040
2041	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2042}
2043
2044static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2045{
2046	struct mgmt_cp_add_uuid *cp = data;
2047	struct mgmt_pending_cmd *cmd;
2048	struct hci_request req;
2049	struct bt_uuid *uuid;
2050	int err;
2051
2052	BT_DBG("request for %s", hdev->name);
2053
2054	hci_dev_lock(hdev);
2055
2056	if (pending_eir_or_class(hdev)) {
2057		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2058				      MGMT_STATUS_BUSY);
2059		goto failed;
2060	}
2061
2062	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2063	if (!uuid) {
2064		err = -ENOMEM;
2065		goto failed;
2066	}
2067
2068	memcpy(uuid->uuid, cp->uuid, 16);
2069	uuid->svc_hint = cp->svc_hint;
2070	uuid->size = get_uuid_size(cp->uuid);
2071
2072	list_add_tail(&uuid->list, &hdev->uuids);
2073
2074	hci_req_init(&req, hdev);
2075
2076	__hci_req_update_class(&req);
2077	__hci_req_update_eir(&req);
2078
2079	err = hci_req_run(&req, add_uuid_complete);
2080	if (err < 0) {
2081		if (err != -ENODATA)
2082			goto failed;
2083
2084		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2085					hdev->dev_class, 3);
2086		goto failed;
2087	}
2088
2089	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2090	if (!cmd) {
2091		err = -ENOMEM;
2092		goto failed;
2093	}
2094
2095	err = 0;
2096
2097failed:
2098	hci_dev_unlock(hdev);
2099	return err;
2100}
2101
2102static bool enable_service_cache(struct hci_dev *hdev)
2103{
2104	if (!hdev_is_powered(hdev))
2105		return false;
2106
2107	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2108		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2109				   CACHE_TIMEOUT);
2110		return true;
2111	}
2112
2113	return false;
2114}
2115
2116static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2117{
2118	BT_DBG("status 0x%02x", status);
2119
2120	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2121}
2122
2123static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2124		       u16 len)
2125{
2126	struct mgmt_cp_remove_uuid *cp = data;
2127	struct mgmt_pending_cmd *cmd;
2128	struct bt_uuid *match, *tmp;
2129	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2130	struct hci_request req;
2131	int err, found;
2132
2133	BT_DBG("request for %s", hdev->name);
2134
2135	hci_dev_lock(hdev);
2136
2137	if (pending_eir_or_class(hdev)) {
2138		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2139				      MGMT_STATUS_BUSY);
2140		goto unlock;
2141	}
2142
2143	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2144		hci_uuids_clear(hdev);
2145
2146		if (enable_service_cache(hdev)) {
2147			err = mgmt_cmd_complete(sk, hdev->id,
2148						MGMT_OP_REMOVE_UUID,
2149						0, hdev->dev_class, 3);
2150			goto unlock;
2151		}
2152
2153		goto update_class;
2154	}
2155
2156	found = 0;
2157
2158	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2159		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2160			continue;
2161
2162		list_del(&match->list);
2163		kfree(match);
2164		found++;
2165	}
2166
2167	if (found == 0) {
2168		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2169				      MGMT_STATUS_INVALID_PARAMS);
2170		goto unlock;
2171	}
2172
2173update_class:
2174	hci_req_init(&req, hdev);
2175
2176	__hci_req_update_class(&req);
2177	__hci_req_update_eir(&req);
2178
2179	err = hci_req_run(&req, remove_uuid_complete);
2180	if (err < 0) {
2181		if (err != -ENODATA)
2182			goto unlock;
2183
2184		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2185					hdev->dev_class, 3);
2186		goto unlock;
2187	}
2188
2189	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2190	if (!cmd) {
2191		err = -ENOMEM;
2192		goto unlock;
2193	}
2194
2195	err = 0;
2196
2197unlock:
2198	hci_dev_unlock(hdev);
2199	return err;
2200}
2201
2202static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2203{
2204	BT_DBG("status 0x%02x", status);
2205
2206	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2207}
2208
2209static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2210			 u16 len)
2211{
2212	struct mgmt_cp_set_dev_class *cp = data;
2213	struct mgmt_pending_cmd *cmd;
2214	struct hci_request req;
2215	int err;
2216
2217	BT_DBG("request for %s", hdev->name);
2218
2219	if (!lmp_bredr_capable(hdev))
2220		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2221				       MGMT_STATUS_NOT_SUPPORTED);
2222
2223	hci_dev_lock(hdev);
2224
2225	if (pending_eir_or_class(hdev)) {
2226		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2227				      MGMT_STATUS_BUSY);
2228		goto unlock;
2229	}
2230
2231	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2232		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2233				      MGMT_STATUS_INVALID_PARAMS);
2234		goto unlock;
2235	}
2236
2237	hdev->major_class = cp->major;
2238	hdev->minor_class = cp->minor;
2239
2240	if (!hdev_is_powered(hdev)) {
2241		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2242					hdev->dev_class, 3);
2243		goto unlock;
2244	}
2245
2246	hci_req_init(&req, hdev);
2247
2248	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2249		hci_dev_unlock(hdev);
2250		cancel_delayed_work_sync(&hdev->service_cache);
2251		hci_dev_lock(hdev);
2252		__hci_req_update_eir(&req);
2253	}
2254
2255	__hci_req_update_class(&req);
2256
2257	err = hci_req_run(&req, set_class_complete);
2258	if (err < 0) {
2259		if (err != -ENODATA)
2260			goto unlock;
2261
2262		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2263					hdev->dev_class, 3);
2264		goto unlock;
2265	}
2266
2267	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2268	if (!cmd) {
2269		err = -ENOMEM;
2270		goto unlock;
2271	}
2272
2273	err = 0;
2274
2275unlock:
2276	hci_dev_unlock(hdev);
2277	return err;
2278}
2279
2280static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2281			  u16 len)
2282{
2283	struct mgmt_cp_load_link_keys *cp = data;
2284	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2285				   sizeof(struct mgmt_link_key_info));
2286	u16 key_count, expected_len;
2287	bool changed;
2288	int i;
2289
2290	BT_DBG("request for %s", hdev->name);
2291
2292	if (!lmp_bredr_capable(hdev))
2293		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2294				       MGMT_STATUS_NOT_SUPPORTED);
2295
2296	key_count = __le16_to_cpu(cp->key_count);
2297	if (key_count > max_key_count) {
2298		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2299			   key_count);
2300		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2301				       MGMT_STATUS_INVALID_PARAMS);
2302	}
2303
2304	expected_len = struct_size(cp, keys, key_count);
2305	if (expected_len != len) {
2306		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2307			   expected_len, len);
2308		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2309				       MGMT_STATUS_INVALID_PARAMS);
2310	}
2311
2312	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2313		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2314				       MGMT_STATUS_INVALID_PARAMS);
2315
2316	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2317	       key_count);
2318
2319	for (i = 0; i < key_count; i++) {
2320		struct mgmt_link_key_info *key = &cp->keys[i];
2321
2322		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2323			return mgmt_cmd_status(sk, hdev->id,
2324					       MGMT_OP_LOAD_LINK_KEYS,
2325					       MGMT_STATUS_INVALID_PARAMS);
2326	}
2327
2328	hci_dev_lock(hdev);
2329
2330	hci_link_keys_clear(hdev);
2331
2332	if (cp->debug_keys)
2333		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2334	else
2335		changed = hci_dev_test_and_clear_flag(hdev,
2336						      HCI_KEEP_DEBUG_KEYS);
2337
2338	if (changed)
2339		new_settings(hdev, NULL);
2340
2341	for (i = 0; i < key_count; i++) {
2342		struct mgmt_link_key_info *key = &cp->keys[i];
2343
 
 
 
 
 
 
 
 
2344		/* Always ignore debug keys and require a new pairing if
2345		 * the user wants to use them.
2346		 */
2347		if (key->type == HCI_LK_DEBUG_COMBINATION)
2348			continue;
2349
2350		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2351				 key->type, key->pin_len, NULL);
2352	}
2353
2354	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2355
2356	hci_dev_unlock(hdev);
2357
2358	return 0;
2359}
2360
2361static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2362			   u8 addr_type, struct sock *skip_sk)
2363{
2364	struct mgmt_ev_device_unpaired ev;
2365
2366	bacpy(&ev.addr.bdaddr, bdaddr);
2367	ev.addr.type = addr_type;
2368
2369	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2370			  skip_sk);
2371}
2372
2373static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2374			 u16 len)
2375{
2376	struct mgmt_cp_unpair_device *cp = data;
2377	struct mgmt_rp_unpair_device rp;
2378	struct hci_conn_params *params;
2379	struct mgmt_pending_cmd *cmd;
2380	struct hci_conn *conn;
2381	u8 addr_type;
2382	int err;
2383
2384	memset(&rp, 0, sizeof(rp));
2385	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2386	rp.addr.type = cp->addr.type;
2387
2388	if (!bdaddr_type_is_valid(cp->addr.type))
2389		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2390					 MGMT_STATUS_INVALID_PARAMS,
2391					 &rp, sizeof(rp));
2392
2393	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2394		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2395					 MGMT_STATUS_INVALID_PARAMS,
2396					 &rp, sizeof(rp));
2397
2398	hci_dev_lock(hdev);
2399
2400	if (!hdev_is_powered(hdev)) {
2401		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2402					MGMT_STATUS_NOT_POWERED, &rp,
2403					sizeof(rp));
2404		goto unlock;
2405	}
2406
2407	if (cp->addr.type == BDADDR_BREDR) {
2408		/* If disconnection is requested, then look up the
2409		 * connection. If the remote device is connected, it
2410		 * will be later used to terminate the link.
2411		 *
2412		 * Setting it to NULL explicitly will cause no
2413		 * termination of the link.
2414		 */
2415		if (cp->disconnect)
2416			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2417						       &cp->addr.bdaddr);
2418		else
2419			conn = NULL;
2420
2421		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2422		if (err < 0) {
2423			err = mgmt_cmd_complete(sk, hdev->id,
2424						MGMT_OP_UNPAIR_DEVICE,
2425						MGMT_STATUS_NOT_PAIRED, &rp,
2426						sizeof(rp));
2427			goto unlock;
2428		}
2429
2430		goto done;
2431	}
2432
2433	/* LE address type */
2434	addr_type = le_addr_type(cp->addr.type);
2435
2436	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2437	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2438	if (err < 0) {
2439		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2440					MGMT_STATUS_NOT_PAIRED, &rp,
2441					sizeof(rp));
2442		goto unlock;
2443	}
2444
2445	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2446	if (!conn) {
2447		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2448		goto done;
2449	}
2450
2451
2452	/* Defer clearing up the connection parameters until closing to
2453	 * give a chance of keeping them if a repairing happens.
2454	 */
2455	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2456
2457	/* Disable auto-connection parameters if present */
2458	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2459	if (params) {
2460		if (params->explicit_connect)
2461			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2462		else
2463			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2464	}
2465
2466	/* If disconnection is not requested, then clear the connection
2467	 * variable so that the link is not terminated.
2468	 */
2469	if (!cp->disconnect)
2470		conn = NULL;
2471
2472done:
2473	/* If the connection variable is set, then termination of the
2474	 * link is requested.
2475	 */
2476	if (!conn) {
2477		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2478					&rp, sizeof(rp));
2479		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2480		goto unlock;
2481	}
2482
2483	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2484			       sizeof(*cp));
2485	if (!cmd) {
2486		err = -ENOMEM;
2487		goto unlock;
2488	}
2489
2490	cmd->cmd_complete = addr_cmd_complete;
2491
2492	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2493	if (err < 0)
2494		mgmt_pending_remove(cmd);
2495
2496unlock:
2497	hci_dev_unlock(hdev);
2498	return err;
2499}
2500
2501static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2502		      u16 len)
2503{
2504	struct mgmt_cp_disconnect *cp = data;
2505	struct mgmt_rp_disconnect rp;
2506	struct mgmt_pending_cmd *cmd;
2507	struct hci_conn *conn;
2508	int err;
2509
2510	BT_DBG("");
2511
2512	memset(&rp, 0, sizeof(rp));
2513	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2514	rp.addr.type = cp->addr.type;
2515
2516	if (!bdaddr_type_is_valid(cp->addr.type))
2517		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2518					 MGMT_STATUS_INVALID_PARAMS,
2519					 &rp, sizeof(rp));
2520
2521	hci_dev_lock(hdev);
2522
2523	if (!test_bit(HCI_UP, &hdev->flags)) {
2524		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2525					MGMT_STATUS_NOT_POWERED, &rp,
2526					sizeof(rp));
2527		goto failed;
2528	}
2529
2530	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2531		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2532					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2533		goto failed;
2534	}
2535
2536	if (cp->addr.type == BDADDR_BREDR)
2537		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2538					       &cp->addr.bdaddr);
2539	else
2540		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2541					       le_addr_type(cp->addr.type));
2542
2543	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2544		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2545					MGMT_STATUS_NOT_CONNECTED, &rp,
2546					sizeof(rp));
2547		goto failed;
2548	}
2549
2550	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2551	if (!cmd) {
2552		err = -ENOMEM;
2553		goto failed;
2554	}
2555
2556	cmd->cmd_complete = generic_cmd_complete;
2557
2558	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2559	if (err < 0)
2560		mgmt_pending_remove(cmd);
2561
2562failed:
2563	hci_dev_unlock(hdev);
2564	return err;
2565}
2566
2567static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2568{
2569	switch (link_type) {
2570	case LE_LINK:
2571		switch (addr_type) {
2572		case ADDR_LE_DEV_PUBLIC:
2573			return BDADDR_LE_PUBLIC;
2574
2575		default:
2576			/* Fallback to LE Random address type */
2577			return BDADDR_LE_RANDOM;
2578		}
2579
2580	default:
2581		/* Fallback to BR/EDR type */
2582		return BDADDR_BREDR;
2583	}
2584}
2585
2586static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2587			   u16 data_len)
2588{
2589	struct mgmt_rp_get_connections *rp;
2590	struct hci_conn *c;
2591	int err;
2592	u16 i;
2593
2594	BT_DBG("");
2595
2596	hci_dev_lock(hdev);
2597
2598	if (!hdev_is_powered(hdev)) {
2599		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2600				      MGMT_STATUS_NOT_POWERED);
2601		goto unlock;
2602	}
2603
2604	i = 0;
2605	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2606		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2607			i++;
2608	}
2609
2610	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2611	if (!rp) {
2612		err = -ENOMEM;
2613		goto unlock;
2614	}
2615
2616	i = 0;
2617	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2618		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2619			continue;
2620		bacpy(&rp->addr[i].bdaddr, &c->dst);
2621		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2622		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2623			continue;
2624		i++;
2625	}
2626
2627	rp->conn_count = cpu_to_le16(i);
2628
2629	/* Recalculate length in case of filtered SCO connections, etc */
2630	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2631				struct_size(rp, addr, i));
2632
2633	kfree(rp);
2634
2635unlock:
2636	hci_dev_unlock(hdev);
2637	return err;
2638}
2639
2640static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2641				   struct mgmt_cp_pin_code_neg_reply *cp)
2642{
2643	struct mgmt_pending_cmd *cmd;
2644	int err;
2645
2646	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2647			       sizeof(*cp));
2648	if (!cmd)
2649		return -ENOMEM;
2650
2651	cmd->cmd_complete = addr_cmd_complete;
2652
2653	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2654			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2655	if (err < 0)
2656		mgmt_pending_remove(cmd);
2657
2658	return err;
2659}
2660
2661static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2662			  u16 len)
2663{
2664	struct hci_conn *conn;
2665	struct mgmt_cp_pin_code_reply *cp = data;
2666	struct hci_cp_pin_code_reply reply;
2667	struct mgmt_pending_cmd *cmd;
2668	int err;
2669
2670	BT_DBG("");
2671
2672	hci_dev_lock(hdev);
2673
2674	if (!hdev_is_powered(hdev)) {
2675		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2676				      MGMT_STATUS_NOT_POWERED);
2677		goto failed;
2678	}
2679
2680	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2681	if (!conn) {
2682		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2683				      MGMT_STATUS_NOT_CONNECTED);
2684		goto failed;
2685	}
2686
2687	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2688		struct mgmt_cp_pin_code_neg_reply ncp;
2689
2690		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2691
2692		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2693
2694		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2695		if (err >= 0)
2696			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2697					      MGMT_STATUS_INVALID_PARAMS);
2698
2699		goto failed;
2700	}
2701
2702	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2703	if (!cmd) {
2704		err = -ENOMEM;
2705		goto failed;
2706	}
2707
2708	cmd->cmd_complete = addr_cmd_complete;
2709
2710	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2711	reply.pin_len = cp->pin_len;
2712	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2713
2714	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2715	if (err < 0)
2716		mgmt_pending_remove(cmd);
2717
2718failed:
2719	hci_dev_unlock(hdev);
2720	return err;
2721}
2722
2723static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2724			     u16 len)
2725{
2726	struct mgmt_cp_set_io_capability *cp = data;
2727
2728	BT_DBG("");
2729
2730	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2731		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2732				       MGMT_STATUS_INVALID_PARAMS);
2733
2734	hci_dev_lock(hdev);
2735
2736	hdev->io_capability = cp->io_capability;
2737
2738	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2739	       hdev->io_capability);
2740
2741	hci_dev_unlock(hdev);
2742
2743	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2744				 NULL, 0);
2745}
2746
2747static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2748{
2749	struct hci_dev *hdev = conn->hdev;
2750	struct mgmt_pending_cmd *cmd;
2751
2752	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2753		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2754			continue;
2755
2756		if (cmd->user_data != conn)
2757			continue;
2758
2759		return cmd;
2760	}
2761
2762	return NULL;
2763}
2764
2765static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2766{
2767	struct mgmt_rp_pair_device rp;
2768	struct hci_conn *conn = cmd->user_data;
2769	int err;
2770
2771	bacpy(&rp.addr.bdaddr, &conn->dst);
2772	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2773
2774	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2775				status, &rp, sizeof(rp));
2776
2777	/* So we don't get further callbacks for this connection */
2778	conn->connect_cfm_cb = NULL;
2779	conn->security_cfm_cb = NULL;
2780	conn->disconn_cfm_cb = NULL;
2781
2782	hci_conn_drop(conn);
2783
2784	/* The device is paired so there is no need to remove
2785	 * its connection parameters anymore.
2786	 */
2787	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2788
2789	hci_conn_put(conn);
2790
2791	return err;
2792}
2793
2794void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2795{
2796	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2797	struct mgmt_pending_cmd *cmd;
2798
2799	cmd = find_pairing(conn);
2800	if (cmd) {
2801		cmd->cmd_complete(cmd, status);
2802		mgmt_pending_remove(cmd);
2803	}
2804}
2805
2806static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2807{
2808	struct mgmt_pending_cmd *cmd;
2809
2810	BT_DBG("status %u", status);
2811
2812	cmd = find_pairing(conn);
2813	if (!cmd) {
2814		BT_DBG("Unable to find a pending command");
2815		return;
2816	}
2817
2818	cmd->cmd_complete(cmd, mgmt_status(status));
2819	mgmt_pending_remove(cmd);
2820}
2821
2822static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2823{
2824	struct mgmt_pending_cmd *cmd;
2825
2826	BT_DBG("status %u", status);
2827
2828	if (!status)
2829		return;
2830
2831	cmd = find_pairing(conn);
2832	if (!cmd) {
2833		BT_DBG("Unable to find a pending command");
2834		return;
2835	}
2836
2837	cmd->cmd_complete(cmd, mgmt_status(status));
2838	mgmt_pending_remove(cmd);
2839}
2840
2841static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2842		       u16 len)
2843{
2844	struct mgmt_cp_pair_device *cp = data;
2845	struct mgmt_rp_pair_device rp;
2846	struct mgmt_pending_cmd *cmd;
2847	u8 sec_level, auth_type;
2848	struct hci_conn *conn;
2849	int err;
2850
2851	BT_DBG("");
2852
2853	memset(&rp, 0, sizeof(rp));
2854	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2855	rp.addr.type = cp->addr.type;
2856
2857	if (!bdaddr_type_is_valid(cp->addr.type))
2858		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2859					 MGMT_STATUS_INVALID_PARAMS,
2860					 &rp, sizeof(rp));
2861
2862	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2863		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2864					 MGMT_STATUS_INVALID_PARAMS,
2865					 &rp, sizeof(rp));
2866
2867	hci_dev_lock(hdev);
2868
2869	if (!hdev_is_powered(hdev)) {
2870		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2871					MGMT_STATUS_NOT_POWERED, &rp,
2872					sizeof(rp));
2873		goto unlock;
2874	}
2875
2876	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2877		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2878					MGMT_STATUS_ALREADY_PAIRED, &rp,
2879					sizeof(rp));
2880		goto unlock;
2881	}
2882
2883	sec_level = BT_SECURITY_MEDIUM;
2884	auth_type = HCI_AT_DEDICATED_BONDING;
2885
2886	if (cp->addr.type == BDADDR_BREDR) {
2887		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2888				       auth_type);
2889	} else {
2890		u8 addr_type = le_addr_type(cp->addr.type);
2891		struct hci_conn_params *p;
2892
2893		/* When pairing a new device, it is expected to remember
2894		 * this device for future connections. Adding the connection
2895		 * parameter information ahead of time allows tracking
2896		 * of the slave preferred values and will speed up any
2897		 * further connection establishment.
2898		 *
2899		 * If connection parameters already exist, then they
2900		 * will be kept and this function does nothing.
2901		 */
2902		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2903
2904		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2905			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2906
2907		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2908					   addr_type, sec_level,
2909					   HCI_LE_CONN_TIMEOUT);
2910	}
2911
2912	if (IS_ERR(conn)) {
2913		int status;
2914
2915		if (PTR_ERR(conn) == -EBUSY)
2916			status = MGMT_STATUS_BUSY;
2917		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2918			status = MGMT_STATUS_NOT_SUPPORTED;
2919		else if (PTR_ERR(conn) == -ECONNREFUSED)
2920			status = MGMT_STATUS_REJECTED;
2921		else
2922			status = MGMT_STATUS_CONNECT_FAILED;
2923
2924		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2925					status, &rp, sizeof(rp));
2926		goto unlock;
2927	}
2928
2929	if (conn->connect_cfm_cb) {
2930		hci_conn_drop(conn);
2931		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2932					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2933		goto unlock;
2934	}
2935
2936	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2937	if (!cmd) {
2938		err = -ENOMEM;
2939		hci_conn_drop(conn);
2940		goto unlock;
2941	}
2942
2943	cmd->cmd_complete = pairing_complete;
2944
2945	/* For LE, just connecting isn't a proof that the pairing finished */
2946	if (cp->addr.type == BDADDR_BREDR) {
2947		conn->connect_cfm_cb = pairing_complete_cb;
2948		conn->security_cfm_cb = pairing_complete_cb;
2949		conn->disconn_cfm_cb = pairing_complete_cb;
2950	} else {
2951		conn->connect_cfm_cb = le_pairing_complete_cb;
2952		conn->security_cfm_cb = le_pairing_complete_cb;
2953		conn->disconn_cfm_cb = le_pairing_complete_cb;
2954	}
2955
2956	conn->io_capability = cp->io_cap;
2957	cmd->user_data = hci_conn_get(conn);
2958
2959	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2960	    hci_conn_security(conn, sec_level, auth_type, true)) {
2961		cmd->cmd_complete(cmd, 0);
2962		mgmt_pending_remove(cmd);
2963	}
2964
2965	err = 0;
2966
2967unlock:
2968	hci_dev_unlock(hdev);
2969	return err;
2970}
2971
2972static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2973			      u16 len)
2974{
2975	struct mgmt_addr_info *addr = data;
2976	struct mgmt_pending_cmd *cmd;
2977	struct hci_conn *conn;
2978	int err;
2979
2980	BT_DBG("");
2981
2982	hci_dev_lock(hdev);
2983
2984	if (!hdev_is_powered(hdev)) {
2985		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2986				      MGMT_STATUS_NOT_POWERED);
2987		goto unlock;
2988	}
2989
2990	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2991	if (!cmd) {
2992		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2993				      MGMT_STATUS_INVALID_PARAMS);
2994		goto unlock;
2995	}
2996
2997	conn = cmd->user_data;
2998
2999	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3000		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3001				      MGMT_STATUS_INVALID_PARAMS);
3002		goto unlock;
3003	}
3004
3005	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3006	mgmt_pending_remove(cmd);
3007
3008	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3009				addr, sizeof(*addr));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3010unlock:
3011	hci_dev_unlock(hdev);
3012	return err;
3013}
3014
3015static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3016			     struct mgmt_addr_info *addr, u16 mgmt_op,
3017			     u16 hci_op, __le32 passkey)
3018{
3019	struct mgmt_pending_cmd *cmd;
3020	struct hci_conn *conn;
3021	int err;
3022
3023	hci_dev_lock(hdev);
3024
3025	if (!hdev_is_powered(hdev)) {
3026		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3027					MGMT_STATUS_NOT_POWERED, addr,
3028					sizeof(*addr));
3029		goto done;
3030	}
3031
3032	if (addr->type == BDADDR_BREDR)
3033		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3034	else
3035		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3036					       le_addr_type(addr->type));
3037
3038	if (!conn) {
3039		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3040					MGMT_STATUS_NOT_CONNECTED, addr,
3041					sizeof(*addr));
3042		goto done;
3043	}
3044
3045	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3046		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3047		if (!err)
3048			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3049						MGMT_STATUS_SUCCESS, addr,
3050						sizeof(*addr));
3051		else
3052			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3053						MGMT_STATUS_FAILED, addr,
3054						sizeof(*addr));
3055
3056		goto done;
3057	}
3058
3059	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3060	if (!cmd) {
3061		err = -ENOMEM;
3062		goto done;
3063	}
3064
3065	cmd->cmd_complete = addr_cmd_complete;
3066
3067	/* Continue with pairing via HCI */
3068	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3069		struct hci_cp_user_passkey_reply cp;
3070
3071		bacpy(&cp.bdaddr, &addr->bdaddr);
3072		cp.passkey = passkey;
3073		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3074	} else
3075		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3076				   &addr->bdaddr);
3077
3078	if (err < 0)
3079		mgmt_pending_remove(cmd);
3080
3081done:
3082	hci_dev_unlock(hdev);
3083	return err;
3084}
3085
3086static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3087			      void *data, u16 len)
3088{
3089	struct mgmt_cp_pin_code_neg_reply *cp = data;
3090
3091	BT_DBG("");
3092
3093	return user_pairing_resp(sk, hdev, &cp->addr,
3094				MGMT_OP_PIN_CODE_NEG_REPLY,
3095				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3096}
3097
3098static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3099			      u16 len)
3100{
3101	struct mgmt_cp_user_confirm_reply *cp = data;
3102
3103	BT_DBG("");
3104
3105	if (len != sizeof(*cp))
3106		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3107				       MGMT_STATUS_INVALID_PARAMS);
3108
3109	return user_pairing_resp(sk, hdev, &cp->addr,
3110				 MGMT_OP_USER_CONFIRM_REPLY,
3111				 HCI_OP_USER_CONFIRM_REPLY, 0);
3112}
3113
3114static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3115				  void *data, u16 len)
3116{
3117	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3118
3119	BT_DBG("");
3120
3121	return user_pairing_resp(sk, hdev, &cp->addr,
3122				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3123				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3124}
3125
3126static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3127			      u16 len)
3128{
3129	struct mgmt_cp_user_passkey_reply *cp = data;
3130
3131	BT_DBG("");
3132
3133	return user_pairing_resp(sk, hdev, &cp->addr,
3134				 MGMT_OP_USER_PASSKEY_REPLY,
3135				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3136}
3137
3138static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3139				  void *data, u16 len)
3140{
3141	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3142
3143	BT_DBG("");
3144
3145	return user_pairing_resp(sk, hdev, &cp->addr,
3146				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3147				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3148}
3149
3150static void adv_expire(struct hci_dev *hdev, u32 flags)
3151{
3152	struct adv_info *adv_instance;
3153	struct hci_request req;
3154	int err;
3155
3156	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3157	if (!adv_instance)
3158		return;
3159
3160	/* stop if current instance doesn't need to be changed */
3161	if (!(adv_instance->flags & flags))
3162		return;
3163
3164	cancel_adv_timeout(hdev);
3165
3166	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3167	if (!adv_instance)
3168		return;
3169
3170	hci_req_init(&req, hdev);
3171	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3172					      true);
3173	if (err)
3174		return;
3175
3176	hci_req_run(&req, NULL);
3177}
3178
3179static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3180{
3181	struct mgmt_cp_set_local_name *cp;
3182	struct mgmt_pending_cmd *cmd;
3183
3184	BT_DBG("status 0x%02x", status);
3185
3186	hci_dev_lock(hdev);
3187
3188	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3189	if (!cmd)
3190		goto unlock;
3191
3192	cp = cmd->param;
3193
3194	if (status) {
3195		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3196			        mgmt_status(status));
3197	} else {
3198		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3199				  cp, sizeof(*cp));
3200
3201		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3202			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3203	}
3204
3205	mgmt_pending_remove(cmd);
3206
3207unlock:
3208	hci_dev_unlock(hdev);
3209}
3210
3211static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3212			  u16 len)
3213{
3214	struct mgmt_cp_set_local_name *cp = data;
3215	struct mgmt_pending_cmd *cmd;
3216	struct hci_request req;
3217	int err;
3218
3219	BT_DBG("");
3220
3221	hci_dev_lock(hdev);
3222
3223	/* If the old values are the same as the new ones just return a
3224	 * direct command complete event.
3225	 */
3226	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3227	    !memcmp(hdev->short_name, cp->short_name,
3228		    sizeof(hdev->short_name))) {
3229		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3230					data, len);
3231		goto failed;
3232	}
3233
3234	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3235
3236	if (!hdev_is_powered(hdev)) {
3237		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3238
3239		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3240					data, len);
3241		if (err < 0)
3242			goto failed;
3243
3244		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3245					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3246		ext_info_changed(hdev, sk);
3247
3248		goto failed;
3249	}
3250
3251	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3252	if (!cmd) {
3253		err = -ENOMEM;
3254		goto failed;
3255	}
3256
3257	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3258
3259	hci_req_init(&req, hdev);
3260
3261	if (lmp_bredr_capable(hdev)) {
3262		__hci_req_update_name(&req);
3263		__hci_req_update_eir(&req);
3264	}
3265
3266	/* The name is stored in the scan response data and so
3267	 * no need to udpate the advertising data here.
3268	 */
3269	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3270		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3271
3272	err = hci_req_run(&req, set_name_complete);
3273	if (err < 0)
3274		mgmt_pending_remove(cmd);
3275
3276failed:
3277	hci_dev_unlock(hdev);
3278	return err;
3279}
3280
3281static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3282			  u16 len)
3283{
3284	struct mgmt_cp_set_appearance *cp = data;
3285	u16 apperance;
3286	int err;
3287
3288	BT_DBG("");
3289
3290	if (!lmp_le_capable(hdev))
3291		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3292				       MGMT_STATUS_NOT_SUPPORTED);
3293
3294	apperance = le16_to_cpu(cp->appearance);
3295
3296	hci_dev_lock(hdev);
3297
3298	if (hdev->appearance != apperance) {
3299		hdev->appearance = apperance;
3300
3301		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3302			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3303
3304		ext_info_changed(hdev, sk);
3305	}
3306
3307	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3308				0);
3309
3310	hci_dev_unlock(hdev);
3311
3312	return err;
3313}
3314
3315static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3316				 void *data, u16 len)
3317{
3318	struct mgmt_rp_get_phy_confguration rp;
3319
3320	BT_DBG("sock %p %s", sk, hdev->name);
3321
3322	hci_dev_lock(hdev);
3323
3324	memset(&rp, 0, sizeof(rp));
3325
3326	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3327	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3328	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3329
3330	hci_dev_unlock(hdev);
3331
3332	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3333				 &rp, sizeof(rp));
3334}
3335
3336int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3337{
3338	struct mgmt_ev_phy_configuration_changed ev;
3339
3340	memset(&ev, 0, sizeof(ev));
3341
3342	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3343
3344	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3345			  sizeof(ev), skip);
3346}
3347
3348static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3349				     u16 opcode, struct sk_buff *skb)
3350{
3351	struct mgmt_pending_cmd *cmd;
3352
3353	BT_DBG("status 0x%02x", status);
3354
3355	hci_dev_lock(hdev);
3356
3357	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3358	if (!cmd)
3359		goto unlock;
3360
3361	if (status) {
3362		mgmt_cmd_status(cmd->sk, hdev->id,
3363				MGMT_OP_SET_PHY_CONFIGURATION,
3364				mgmt_status(status));
3365	} else {
3366		mgmt_cmd_complete(cmd->sk, hdev->id,
3367				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3368				  NULL, 0);
3369
3370		mgmt_phy_configuration_changed(hdev, cmd->sk);
3371	}
3372
3373	mgmt_pending_remove(cmd);
3374
3375unlock:
3376	hci_dev_unlock(hdev);
3377}
3378
3379static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3380				 void *data, u16 len)
3381{
3382	struct mgmt_cp_set_phy_confguration *cp = data;
3383	struct hci_cp_le_set_default_phy cp_phy;
3384	struct mgmt_pending_cmd *cmd;
3385	struct hci_request req;
3386	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3387	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3388	bool changed = false;
3389	int err;
3390
3391	BT_DBG("sock %p %s", sk, hdev->name);
3392
3393	configurable_phys = get_configurable_phys(hdev);
3394	supported_phys = get_supported_phys(hdev);
3395	selected_phys = __le32_to_cpu(cp->selected_phys);
3396
3397	if (selected_phys & ~supported_phys)
3398		return mgmt_cmd_status(sk, hdev->id,
3399				       MGMT_OP_SET_PHY_CONFIGURATION,
3400				       MGMT_STATUS_INVALID_PARAMS);
3401
3402	unconfigure_phys = supported_phys & ~configurable_phys;
3403
3404	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3405		return mgmt_cmd_status(sk, hdev->id,
3406				       MGMT_OP_SET_PHY_CONFIGURATION,
3407				       MGMT_STATUS_INVALID_PARAMS);
3408
3409	if (selected_phys == get_selected_phys(hdev))
3410		return mgmt_cmd_complete(sk, hdev->id,
3411					 MGMT_OP_SET_PHY_CONFIGURATION,
3412					 0, NULL, 0);
3413
3414	hci_dev_lock(hdev);
3415
3416	if (!hdev_is_powered(hdev)) {
3417		err = mgmt_cmd_status(sk, hdev->id,
3418				      MGMT_OP_SET_PHY_CONFIGURATION,
3419				      MGMT_STATUS_REJECTED);
3420		goto unlock;
3421	}
3422
3423	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3424		err = mgmt_cmd_status(sk, hdev->id,
3425				      MGMT_OP_SET_PHY_CONFIGURATION,
3426				      MGMT_STATUS_BUSY);
3427		goto unlock;
3428	}
3429
3430	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3431		pkt_type |= (HCI_DH3 | HCI_DM3);
3432	else
3433		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3434
3435	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3436		pkt_type |= (HCI_DH5 | HCI_DM5);
3437	else
3438		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3439
3440	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3441		pkt_type &= ~HCI_2DH1;
3442	else
3443		pkt_type |= HCI_2DH1;
3444
3445	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3446		pkt_type &= ~HCI_2DH3;
3447	else
3448		pkt_type |= HCI_2DH3;
3449
3450	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3451		pkt_type &= ~HCI_2DH5;
3452	else
3453		pkt_type |= HCI_2DH5;
3454
3455	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3456		pkt_type &= ~HCI_3DH1;
3457	else
3458		pkt_type |= HCI_3DH1;
3459
3460	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3461		pkt_type &= ~HCI_3DH3;
3462	else
3463		pkt_type |= HCI_3DH3;
3464
3465	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3466		pkt_type &= ~HCI_3DH5;
3467	else
3468		pkt_type |= HCI_3DH5;
3469
3470	if (pkt_type != hdev->pkt_type) {
3471		hdev->pkt_type = pkt_type;
3472		changed = true;
3473	}
3474
3475	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3476	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3477		if (changed)
3478			mgmt_phy_configuration_changed(hdev, sk);
3479
3480		err = mgmt_cmd_complete(sk, hdev->id,
3481					MGMT_OP_SET_PHY_CONFIGURATION,
3482					0, NULL, 0);
3483
3484		goto unlock;
3485	}
3486
3487	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3488			       len);
3489	if (!cmd) {
3490		err = -ENOMEM;
3491		goto unlock;
3492	}
3493
3494	hci_req_init(&req, hdev);
3495
3496	memset(&cp_phy, 0, sizeof(cp_phy));
3497
3498	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3499		cp_phy.all_phys |= 0x01;
3500
3501	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3502		cp_phy.all_phys |= 0x02;
3503
3504	if (selected_phys & MGMT_PHY_LE_1M_TX)
3505		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3506
3507	if (selected_phys & MGMT_PHY_LE_2M_TX)
3508		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3509
3510	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3511		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3512
3513	if (selected_phys & MGMT_PHY_LE_1M_RX)
3514		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3515
3516	if (selected_phys & MGMT_PHY_LE_2M_RX)
3517		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3518
3519	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3520		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3521
3522	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3523
3524	err = hci_req_run_skb(&req, set_default_phy_complete);
3525	if (err < 0)
3526		mgmt_pending_remove(cmd);
3527
3528unlock:
3529	hci_dev_unlock(hdev);
3530
3531	return err;
3532}
3533
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3534static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3535				         u16 opcode, struct sk_buff *skb)
3536{
3537	struct mgmt_rp_read_local_oob_data mgmt_rp;
3538	size_t rp_size = sizeof(mgmt_rp);
3539	struct mgmt_pending_cmd *cmd;
3540
3541	BT_DBG("%s status %u", hdev->name, status);
3542
3543	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3544	if (!cmd)
3545		return;
3546
3547	if (status || !skb) {
3548		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3549				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3550		goto remove;
3551	}
3552
3553	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3554
3555	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3556		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3557
3558		if (skb->len < sizeof(*rp)) {
3559			mgmt_cmd_status(cmd->sk, hdev->id,
3560					MGMT_OP_READ_LOCAL_OOB_DATA,
3561					MGMT_STATUS_FAILED);
3562			goto remove;
3563		}
3564
3565		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3566		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3567
3568		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3569	} else {
3570		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3571
3572		if (skb->len < sizeof(*rp)) {
3573			mgmt_cmd_status(cmd->sk, hdev->id,
3574					MGMT_OP_READ_LOCAL_OOB_DATA,
3575					MGMT_STATUS_FAILED);
3576			goto remove;
3577		}
3578
3579		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3580		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3581
3582		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3583		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3584	}
3585
3586	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3587			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3588
3589remove:
3590	mgmt_pending_remove(cmd);
3591}
3592
3593static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3594			       void *data, u16 data_len)
3595{
3596	struct mgmt_pending_cmd *cmd;
3597	struct hci_request req;
3598	int err;
3599
3600	BT_DBG("%s", hdev->name);
3601
3602	hci_dev_lock(hdev);
3603
3604	if (!hdev_is_powered(hdev)) {
3605		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3606				      MGMT_STATUS_NOT_POWERED);
3607		goto unlock;
3608	}
3609
3610	if (!lmp_ssp_capable(hdev)) {
3611		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3612				      MGMT_STATUS_NOT_SUPPORTED);
3613		goto unlock;
3614	}
3615
3616	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3617		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3618				      MGMT_STATUS_BUSY);
3619		goto unlock;
3620	}
3621
3622	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3623	if (!cmd) {
3624		err = -ENOMEM;
3625		goto unlock;
3626	}
3627
3628	hci_req_init(&req, hdev);
3629
3630	if (bredr_sc_enabled(hdev))
3631		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3632	else
3633		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3634
3635	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3636	if (err < 0)
3637		mgmt_pending_remove(cmd);
3638
3639unlock:
3640	hci_dev_unlock(hdev);
3641	return err;
3642}
3643
3644static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3645			       void *data, u16 len)
3646{
3647	struct mgmt_addr_info *addr = data;
3648	int err;
3649
3650	BT_DBG("%s ", hdev->name);
3651
3652	if (!bdaddr_type_is_valid(addr->type))
3653		return mgmt_cmd_complete(sk, hdev->id,
3654					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3655					 MGMT_STATUS_INVALID_PARAMS,
3656					 addr, sizeof(*addr));
3657
3658	hci_dev_lock(hdev);
3659
3660	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3661		struct mgmt_cp_add_remote_oob_data *cp = data;
3662		u8 status;
3663
3664		if (cp->addr.type != BDADDR_BREDR) {
3665			err = mgmt_cmd_complete(sk, hdev->id,
3666						MGMT_OP_ADD_REMOTE_OOB_DATA,
3667						MGMT_STATUS_INVALID_PARAMS,
3668						&cp->addr, sizeof(cp->addr));
3669			goto unlock;
3670		}
3671
3672		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3673					      cp->addr.type, cp->hash,
3674					      cp->rand, NULL, NULL);
3675		if (err < 0)
3676			status = MGMT_STATUS_FAILED;
3677		else
3678			status = MGMT_STATUS_SUCCESS;
3679
3680		err = mgmt_cmd_complete(sk, hdev->id,
3681					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3682					&cp->addr, sizeof(cp->addr));
3683	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3684		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3685		u8 *rand192, *hash192, *rand256, *hash256;
3686		u8 status;
3687
3688		if (bdaddr_type_is_le(cp->addr.type)) {
3689			/* Enforce zero-valued 192-bit parameters as
3690			 * long as legacy SMP OOB isn't implemented.
3691			 */
3692			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3693			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3694				err = mgmt_cmd_complete(sk, hdev->id,
3695							MGMT_OP_ADD_REMOTE_OOB_DATA,
3696							MGMT_STATUS_INVALID_PARAMS,
3697							addr, sizeof(*addr));
3698				goto unlock;
3699			}
3700
3701			rand192 = NULL;
3702			hash192 = NULL;
3703		} else {
3704			/* In case one of the P-192 values is set to zero,
3705			 * then just disable OOB data for P-192.
3706			 */
3707			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3708			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3709				rand192 = NULL;
3710				hash192 = NULL;
3711			} else {
3712				rand192 = cp->rand192;
3713				hash192 = cp->hash192;
3714			}
3715		}
3716
3717		/* In case one of the P-256 values is set to zero, then just
3718		 * disable OOB data for P-256.
3719		 */
3720		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3721		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3722			rand256 = NULL;
3723			hash256 = NULL;
3724		} else {
3725			rand256 = cp->rand256;
3726			hash256 = cp->hash256;
3727		}
3728
3729		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3730					      cp->addr.type, hash192, rand192,
3731					      hash256, rand256);
3732		if (err < 0)
3733			status = MGMT_STATUS_FAILED;
3734		else
3735			status = MGMT_STATUS_SUCCESS;
3736
3737		err = mgmt_cmd_complete(sk, hdev->id,
3738					MGMT_OP_ADD_REMOTE_OOB_DATA,
3739					status, &cp->addr, sizeof(cp->addr));
3740	} else {
3741		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3742			   len);
3743		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3744				      MGMT_STATUS_INVALID_PARAMS);
3745	}
3746
3747unlock:
3748	hci_dev_unlock(hdev);
3749	return err;
3750}
3751
3752static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3753				  void *data, u16 len)
3754{
3755	struct mgmt_cp_remove_remote_oob_data *cp = data;
3756	u8 status;
3757	int err;
3758
3759	BT_DBG("%s", hdev->name);
3760
3761	if (cp->addr.type != BDADDR_BREDR)
3762		return mgmt_cmd_complete(sk, hdev->id,
3763					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3764					 MGMT_STATUS_INVALID_PARAMS,
3765					 &cp->addr, sizeof(cp->addr));
3766
3767	hci_dev_lock(hdev);
3768
3769	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3770		hci_remote_oob_data_clear(hdev);
3771		status = MGMT_STATUS_SUCCESS;
3772		goto done;
3773	}
3774
3775	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3776	if (err < 0)
3777		status = MGMT_STATUS_INVALID_PARAMS;
3778	else
3779		status = MGMT_STATUS_SUCCESS;
3780
3781done:
3782	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3783				status, &cp->addr, sizeof(cp->addr));
3784
3785	hci_dev_unlock(hdev);
3786	return err;
3787}
3788
3789void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3790{
3791	struct mgmt_pending_cmd *cmd;
3792
3793	BT_DBG("status %d", status);
3794
3795	hci_dev_lock(hdev);
3796
3797	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3798	if (!cmd)
3799		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3800
3801	if (!cmd)
3802		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3803
3804	if (cmd) {
3805		cmd->cmd_complete(cmd, mgmt_status(status));
3806		mgmt_pending_remove(cmd);
3807	}
3808
3809	hci_dev_unlock(hdev);
 
 
 
 
 
 
 
3810}
3811
3812static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3813				    uint8_t *mgmt_status)
3814{
3815	switch (type) {
3816	case DISCOV_TYPE_LE:
3817		*mgmt_status = mgmt_le_support(hdev);
3818		if (*mgmt_status)
3819			return false;
3820		break;
3821	case DISCOV_TYPE_INTERLEAVED:
3822		*mgmt_status = mgmt_le_support(hdev);
3823		if (*mgmt_status)
3824			return false;
3825		/* Intentional fall-through */
3826	case DISCOV_TYPE_BREDR:
3827		*mgmt_status = mgmt_bredr_support(hdev);
3828		if (*mgmt_status)
3829			return false;
3830		break;
3831	default:
3832		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3833		return false;
3834	}
3835
3836	return true;
3837}
3838
3839static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3840				    u16 op, void *data, u16 len)
3841{
3842	struct mgmt_cp_start_discovery *cp = data;
3843	struct mgmt_pending_cmd *cmd;
3844	u8 status;
3845	int err;
3846
3847	BT_DBG("%s", hdev->name);
3848
3849	hci_dev_lock(hdev);
3850
3851	if (!hdev_is_powered(hdev)) {
3852		err = mgmt_cmd_complete(sk, hdev->id, op,
3853					MGMT_STATUS_NOT_POWERED,
3854					&cp->type, sizeof(cp->type));
3855		goto failed;
3856	}
3857
3858	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3859	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3860		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3861					&cp->type, sizeof(cp->type));
3862		goto failed;
3863	}
3864
3865	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3866		err = mgmt_cmd_complete(sk, hdev->id, op, status,
3867					&cp->type, sizeof(cp->type));
3868		goto failed;
3869	}
3870
 
 
 
 
 
 
 
3871	/* Clear the discovery filter first to free any previously
3872	 * allocated memory for the UUID list.
3873	 */
3874	hci_discovery_filter_clear(hdev);
3875
3876	hdev->discovery.type = cp->type;
3877	hdev->discovery.report_invalid_rssi = false;
3878	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3879		hdev->discovery.limited = true;
3880	else
3881		hdev->discovery.limited = false;
3882
3883	cmd = mgmt_pending_add(sk, op, hdev, data, len);
3884	if (!cmd) {
3885		err = -ENOMEM;
3886		goto failed;
3887	}
3888
3889	cmd->cmd_complete = generic_cmd_complete;
3890
3891	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3892	queue_work(hdev->req_workqueue, &hdev->discov_update);
3893	err = 0;
3894
3895failed:
3896	hci_dev_unlock(hdev);
3897	return err;
3898}
3899
3900static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3901			   void *data, u16 len)
3902{
3903	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3904					data, len);
3905}
3906
3907static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3908				   void *data, u16 len)
3909{
3910	return start_discovery_internal(sk, hdev,
3911					MGMT_OP_START_LIMITED_DISCOVERY,
3912					data, len);
3913}
3914
3915static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3916					  u8 status)
3917{
3918	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3919				 cmd->param, 1);
3920}
3921
3922static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3923				   void *data, u16 len)
3924{
3925	struct mgmt_cp_start_service_discovery *cp = data;
3926	struct mgmt_pending_cmd *cmd;
3927	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3928	u16 uuid_count, expected_len;
3929	u8 status;
3930	int err;
3931
3932	BT_DBG("%s", hdev->name);
3933
3934	hci_dev_lock(hdev);
3935
3936	if (!hdev_is_powered(hdev)) {
3937		err = mgmt_cmd_complete(sk, hdev->id,
3938					MGMT_OP_START_SERVICE_DISCOVERY,
3939					MGMT_STATUS_NOT_POWERED,
3940					&cp->type, sizeof(cp->type));
3941		goto failed;
3942	}
3943
3944	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3945	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3946		err = mgmt_cmd_complete(sk, hdev->id,
3947					MGMT_OP_START_SERVICE_DISCOVERY,
3948					MGMT_STATUS_BUSY, &cp->type,
3949					sizeof(cp->type));
3950		goto failed;
3951	}
3952
3953	uuid_count = __le16_to_cpu(cp->uuid_count);
3954	if (uuid_count > max_uuid_count) {
3955		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3956			   uuid_count);
3957		err = mgmt_cmd_complete(sk, hdev->id,
3958					MGMT_OP_START_SERVICE_DISCOVERY,
3959					MGMT_STATUS_INVALID_PARAMS, &cp->type,
3960					sizeof(cp->type));
3961		goto failed;
3962	}
3963
3964	expected_len = sizeof(*cp) + uuid_count * 16;
3965	if (expected_len != len) {
3966		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3967			   expected_len, len);
3968		err = mgmt_cmd_complete(sk, hdev->id,
3969					MGMT_OP_START_SERVICE_DISCOVERY,
3970					MGMT_STATUS_INVALID_PARAMS, &cp->type,
3971					sizeof(cp->type));
3972		goto failed;
3973	}
3974
3975	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3976		err = mgmt_cmd_complete(sk, hdev->id,
3977					MGMT_OP_START_SERVICE_DISCOVERY,
3978					status, &cp->type, sizeof(cp->type));
3979		goto failed;
3980	}
3981
3982	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3983			       hdev, data, len);
3984	if (!cmd) {
3985		err = -ENOMEM;
3986		goto failed;
3987	}
3988
3989	cmd->cmd_complete = service_discovery_cmd_complete;
3990
3991	/* Clear the discovery filter first to free any previously
3992	 * allocated memory for the UUID list.
3993	 */
3994	hci_discovery_filter_clear(hdev);
3995
3996	hdev->discovery.result_filtering = true;
3997	hdev->discovery.type = cp->type;
3998	hdev->discovery.rssi = cp->rssi;
3999	hdev->discovery.uuid_count = uuid_count;
4000
4001	if (uuid_count > 0) {
4002		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4003						GFP_KERNEL);
4004		if (!hdev->discovery.uuids) {
4005			err = mgmt_cmd_complete(sk, hdev->id,
4006						MGMT_OP_START_SERVICE_DISCOVERY,
4007						MGMT_STATUS_FAILED,
4008						&cp->type, sizeof(cp->type));
4009			mgmt_pending_remove(cmd);
4010			goto failed;
4011		}
4012	}
4013
4014	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4015	queue_work(hdev->req_workqueue, &hdev->discov_update);
4016	err = 0;
4017
4018failed:
4019	hci_dev_unlock(hdev);
4020	return err;
4021}
4022
4023void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4024{
4025	struct mgmt_pending_cmd *cmd;
4026
4027	BT_DBG("status %d", status);
4028
4029	hci_dev_lock(hdev);
4030
4031	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4032	if (cmd) {
4033		cmd->cmd_complete(cmd, mgmt_status(status));
4034		mgmt_pending_remove(cmd);
4035	}
4036
4037	hci_dev_unlock(hdev);
 
 
 
 
 
 
4038}
4039
4040static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4041			  u16 len)
4042{
4043	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4044	struct mgmt_pending_cmd *cmd;
4045	int err;
4046
4047	BT_DBG("%s", hdev->name);
4048
4049	hci_dev_lock(hdev);
4050
4051	if (!hci_discovery_active(hdev)) {
4052		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4053					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4054					sizeof(mgmt_cp->type));
4055		goto unlock;
4056	}
4057
4058	if (hdev->discovery.type != mgmt_cp->type) {
4059		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4060					MGMT_STATUS_INVALID_PARAMS,
4061					&mgmt_cp->type, sizeof(mgmt_cp->type));
4062		goto unlock;
4063	}
4064
4065	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4066	if (!cmd) {
4067		err = -ENOMEM;
4068		goto unlock;
4069	}
4070
4071	cmd->cmd_complete = generic_cmd_complete;
4072
4073	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4074	queue_work(hdev->req_workqueue, &hdev->discov_update);
4075	err = 0;
4076
4077unlock:
4078	hci_dev_unlock(hdev);
4079	return err;
4080}
4081
4082static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4083			u16 len)
4084{
4085	struct mgmt_cp_confirm_name *cp = data;
4086	struct inquiry_entry *e;
4087	int err;
4088
4089	BT_DBG("%s", hdev->name);
4090
4091	hci_dev_lock(hdev);
4092
4093	if (!hci_discovery_active(hdev)) {
4094		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4095					MGMT_STATUS_FAILED, &cp->addr,
4096					sizeof(cp->addr));
4097		goto failed;
4098	}
4099
4100	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4101	if (!e) {
4102		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4103					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4104					sizeof(cp->addr));
4105		goto failed;
4106	}
4107
4108	if (cp->name_known) {
4109		e->name_state = NAME_KNOWN;
4110		list_del(&e->list);
4111	} else {
4112		e->name_state = NAME_NEEDED;
4113		hci_inquiry_cache_update_resolve(hdev, e);
4114	}
4115
4116	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4117				&cp->addr, sizeof(cp->addr));
4118
4119failed:
4120	hci_dev_unlock(hdev);
4121	return err;
4122}
4123
4124static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4125			u16 len)
4126{
4127	struct mgmt_cp_block_device *cp = data;
4128	u8 status;
4129	int err;
4130
4131	BT_DBG("%s", hdev->name);
4132
4133	if (!bdaddr_type_is_valid(cp->addr.type))
4134		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4135					 MGMT_STATUS_INVALID_PARAMS,
4136					 &cp->addr, sizeof(cp->addr));
4137
4138	hci_dev_lock(hdev);
4139
4140	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4141				  cp->addr.type);
4142	if (err < 0) {
4143		status = MGMT_STATUS_FAILED;
4144		goto done;
4145	}
4146
4147	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4148		   sk);
4149	status = MGMT_STATUS_SUCCESS;
4150
4151done:
4152	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4153				&cp->addr, sizeof(cp->addr));
4154
4155	hci_dev_unlock(hdev);
4156
4157	return err;
4158}
4159
4160static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4161			  u16 len)
4162{
4163	struct mgmt_cp_unblock_device *cp = data;
4164	u8 status;
4165	int err;
4166
4167	BT_DBG("%s", hdev->name);
4168
4169	if (!bdaddr_type_is_valid(cp->addr.type))
4170		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4171					 MGMT_STATUS_INVALID_PARAMS,
4172					 &cp->addr, sizeof(cp->addr));
4173
4174	hci_dev_lock(hdev);
4175
4176	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4177				  cp->addr.type);
4178	if (err < 0) {
4179		status = MGMT_STATUS_INVALID_PARAMS;
4180		goto done;
4181	}
4182
4183	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4184		   sk);
4185	status = MGMT_STATUS_SUCCESS;
4186
4187done:
4188	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4189				&cp->addr, sizeof(cp->addr));
4190
4191	hci_dev_unlock(hdev);
4192
4193	return err;
4194}
4195
4196static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4197			 u16 len)
4198{
4199	struct mgmt_cp_set_device_id *cp = data;
4200	struct hci_request req;
4201	int err;
4202	__u16 source;
4203
4204	BT_DBG("%s", hdev->name);
4205
4206	source = __le16_to_cpu(cp->source);
4207
4208	if (source > 0x0002)
4209		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4210				       MGMT_STATUS_INVALID_PARAMS);
4211
4212	hci_dev_lock(hdev);
4213
4214	hdev->devid_source = source;
4215	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4216	hdev->devid_product = __le16_to_cpu(cp->product);
4217	hdev->devid_version = __le16_to_cpu(cp->version);
4218
4219	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4220				NULL, 0);
4221
4222	hci_req_init(&req, hdev);
4223	__hci_req_update_eir(&req);
4224	hci_req_run(&req, NULL);
4225
4226	hci_dev_unlock(hdev);
4227
4228	return err;
4229}
4230
4231static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4232					u16 opcode)
4233{
4234	BT_DBG("status %d", status);
4235}
4236
4237static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4238				     u16 opcode)
4239{
4240	struct cmd_lookup match = { NULL, hdev };
4241	struct hci_request req;
4242	u8 instance;
4243	struct adv_info *adv_instance;
4244	int err;
4245
4246	hci_dev_lock(hdev);
4247
4248	if (status) {
4249		u8 mgmt_err = mgmt_status(status);
4250
4251		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4252				     cmd_status_rsp, &mgmt_err);
4253		goto unlock;
4254	}
4255
4256	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4257		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4258	else
4259		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4260
4261	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4262			     &match);
4263
4264	new_settings(hdev, match.sk);
4265
4266	if (match.sk)
4267		sock_put(match.sk);
4268
 
 
 
 
 
 
 
 
 
 
 
4269	/* If "Set Advertising" was just disabled and instance advertising was
4270	 * set up earlier, then re-enable multi-instance advertising.
4271	 */
4272	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4273	    list_empty(&hdev->adv_instances))
4274		goto unlock;
4275
4276	instance = hdev->cur_adv_instance;
4277	if (!instance) {
4278		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4279							struct adv_info, list);
4280		if (!adv_instance)
4281			goto unlock;
4282
4283		instance = adv_instance->instance;
4284	}
4285
4286	hci_req_init(&req, hdev);
4287
4288	err = __hci_req_schedule_adv_instance(&req, instance, true);
4289
4290	if (!err)
4291		err = hci_req_run(&req, enable_advertising_instance);
4292
4293	if (err)
4294		bt_dev_err(hdev, "failed to re-configure advertising");
4295
4296unlock:
4297	hci_dev_unlock(hdev);
4298}
4299
4300static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4301			   u16 len)
4302{
4303	struct mgmt_mode *cp = data;
4304	struct mgmt_pending_cmd *cmd;
4305	struct hci_request req;
4306	u8 val, status;
4307	int err;
4308
4309	BT_DBG("request for %s", hdev->name);
4310
4311	status = mgmt_le_support(hdev);
4312	if (status)
4313		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4314				       status);
4315
 
 
 
 
 
 
 
4316	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4317		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4318				       MGMT_STATUS_INVALID_PARAMS);
4319
 
 
 
 
4320	hci_dev_lock(hdev);
4321
4322	val = !!cp->val;
4323
4324	/* The following conditions are ones which mean that we should
4325	 * not do any HCI communication but directly send a mgmt
4326	 * response to user space (after toggling the flag if
4327	 * necessary).
4328	 */
4329	if (!hdev_is_powered(hdev) ||
4330	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4331	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4332	    hci_conn_num(hdev, LE_LINK) > 0 ||
4333	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4334	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4335		bool changed;
4336
4337		if (cp->val) {
4338			hdev->cur_adv_instance = 0x00;
4339			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4340			if (cp->val == 0x02)
4341				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4342			else
4343				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4344		} else {
4345			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4346			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4347		}
4348
4349		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4350		if (err < 0)
4351			goto unlock;
4352
4353		if (changed)
4354			err = new_settings(hdev, sk);
4355
4356		goto unlock;
4357	}
4358
4359	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4360	    pending_find(MGMT_OP_SET_LE, hdev)) {
4361		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4362				      MGMT_STATUS_BUSY);
4363		goto unlock;
4364	}
4365
4366	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4367	if (!cmd) {
4368		err = -ENOMEM;
4369		goto unlock;
4370	}
4371
4372	hci_req_init(&req, hdev);
4373
4374	if (cp->val == 0x02)
4375		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4376	else
4377		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4378
4379	cancel_adv_timeout(hdev);
4380
4381	if (val) {
4382		/* Switch to instance "0" for the Set Advertising setting.
4383		 * We cannot use update_[adv|scan_rsp]_data() here as the
4384		 * HCI_ADVERTISING flag is not yet set.
4385		 */
4386		hdev->cur_adv_instance = 0x00;
4387
4388		if (ext_adv_capable(hdev)) {
4389			__hci_req_start_ext_adv(&req, 0x00);
4390		} else {
4391			__hci_req_update_adv_data(&req, 0x00);
4392			__hci_req_update_scan_rsp_data(&req, 0x00);
4393			__hci_req_enable_advertising(&req);
4394		}
4395	} else {
4396		__hci_req_disable_advertising(&req);
4397	}
4398
4399	err = hci_req_run(&req, set_advertising_complete);
4400	if (err < 0)
4401		mgmt_pending_remove(cmd);
4402
4403unlock:
4404	hci_dev_unlock(hdev);
4405	return err;
4406}
4407
4408static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4409			      void *data, u16 len)
4410{
4411	struct mgmt_cp_set_static_address *cp = data;
4412	int err;
4413
4414	BT_DBG("%s", hdev->name);
4415
4416	if (!lmp_le_capable(hdev))
4417		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4418				       MGMT_STATUS_NOT_SUPPORTED);
4419
4420	if (hdev_is_powered(hdev))
4421		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4422				       MGMT_STATUS_REJECTED);
4423
4424	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4425		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4426			return mgmt_cmd_status(sk, hdev->id,
4427					       MGMT_OP_SET_STATIC_ADDRESS,
4428					       MGMT_STATUS_INVALID_PARAMS);
4429
4430		/* Two most significant bits shall be set */
4431		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4432			return mgmt_cmd_status(sk, hdev->id,
4433					       MGMT_OP_SET_STATIC_ADDRESS,
4434					       MGMT_STATUS_INVALID_PARAMS);
4435	}
4436
4437	hci_dev_lock(hdev);
4438
4439	bacpy(&hdev->static_addr, &cp->bdaddr);
4440
4441	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4442	if (err < 0)
4443		goto unlock;
4444
4445	err = new_settings(hdev, sk);
4446
4447unlock:
4448	hci_dev_unlock(hdev);
4449	return err;
4450}
4451
4452static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4453			   void *data, u16 len)
4454{
4455	struct mgmt_cp_set_scan_params *cp = data;
4456	__u16 interval, window;
4457	int err;
4458
4459	BT_DBG("%s", hdev->name);
4460
4461	if (!lmp_le_capable(hdev))
4462		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4463				       MGMT_STATUS_NOT_SUPPORTED);
4464
4465	interval = __le16_to_cpu(cp->interval);
4466
4467	if (interval < 0x0004 || interval > 0x4000)
4468		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4469				       MGMT_STATUS_INVALID_PARAMS);
4470
4471	window = __le16_to_cpu(cp->window);
4472
4473	if (window < 0x0004 || window > 0x4000)
4474		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4475				       MGMT_STATUS_INVALID_PARAMS);
4476
4477	if (window > interval)
4478		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4479				       MGMT_STATUS_INVALID_PARAMS);
4480
4481	hci_dev_lock(hdev);
4482
4483	hdev->le_scan_interval = interval;
4484	hdev->le_scan_window = window;
4485
4486	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4487				NULL, 0);
4488
4489	/* If background scan is running, restart it so new parameters are
4490	 * loaded.
4491	 */
4492	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4493	    hdev->discovery.state == DISCOVERY_STOPPED) {
4494		struct hci_request req;
4495
4496		hci_req_init(&req, hdev);
4497
4498		hci_req_add_le_scan_disable(&req);
4499		hci_req_add_le_passive_scan(&req);
4500
4501		hci_req_run(&req, NULL);
4502	}
4503
4504	hci_dev_unlock(hdev);
4505
4506	return err;
4507}
4508
4509static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4510				      u16 opcode)
4511{
4512	struct mgmt_pending_cmd *cmd;
4513
4514	BT_DBG("status 0x%02x", status);
4515
4516	hci_dev_lock(hdev);
4517
4518	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4519	if (!cmd)
4520		goto unlock;
4521
4522	if (status) {
4523		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4524			        mgmt_status(status));
4525	} else {
4526		struct mgmt_mode *cp = cmd->param;
4527
4528		if (cp->val)
4529			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4530		else
4531			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4532
4533		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4534		new_settings(hdev, cmd->sk);
4535	}
4536
4537	mgmt_pending_remove(cmd);
4538
4539unlock:
4540	hci_dev_unlock(hdev);
4541}
4542
4543static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4544				void *data, u16 len)
4545{
4546	struct mgmt_mode *cp = data;
4547	struct mgmt_pending_cmd *cmd;
4548	struct hci_request req;
4549	int err;
4550
4551	BT_DBG("%s", hdev->name);
4552
4553	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4554	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4555		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4556				       MGMT_STATUS_NOT_SUPPORTED);
4557
4558	if (cp->val != 0x00 && cp->val != 0x01)
4559		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4560				       MGMT_STATUS_INVALID_PARAMS);
4561
4562	hci_dev_lock(hdev);
4563
4564	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4565		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4566				      MGMT_STATUS_BUSY);
4567		goto unlock;
4568	}
4569
4570	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4571		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4572					hdev);
4573		goto unlock;
4574	}
4575
4576	if (!hdev_is_powered(hdev)) {
4577		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4578		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4579					hdev);
4580		new_settings(hdev, sk);
4581		goto unlock;
4582	}
4583
4584	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4585			       data, len);
4586	if (!cmd) {
4587		err = -ENOMEM;
4588		goto unlock;
4589	}
4590
4591	hci_req_init(&req, hdev);
4592
4593	__hci_req_write_fast_connectable(&req, cp->val);
4594
4595	err = hci_req_run(&req, fast_connectable_complete);
4596	if (err < 0) {
4597		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4598				      MGMT_STATUS_FAILED);
4599		mgmt_pending_remove(cmd);
4600	}
4601
4602unlock:
4603	hci_dev_unlock(hdev);
4604
4605	return err;
4606}
4607
4608static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4609{
4610	struct mgmt_pending_cmd *cmd;
4611
4612	BT_DBG("status 0x%02x", status);
4613
4614	hci_dev_lock(hdev);
4615
4616	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4617	if (!cmd)
4618		goto unlock;
4619
4620	if (status) {
4621		u8 mgmt_err = mgmt_status(status);
4622
4623		/* We need to restore the flag if related HCI commands
4624		 * failed.
4625		 */
4626		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4627
4628		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4629	} else {
4630		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4631		new_settings(hdev, cmd->sk);
4632	}
4633
4634	mgmt_pending_remove(cmd);
4635
4636unlock:
4637	hci_dev_unlock(hdev);
4638}
4639
4640static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4641{
4642	struct mgmt_mode *cp = data;
4643	struct mgmt_pending_cmd *cmd;
4644	struct hci_request req;
4645	int err;
4646
4647	BT_DBG("request for %s", hdev->name);
4648
4649	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4650		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4651				       MGMT_STATUS_NOT_SUPPORTED);
4652
4653	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4654		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4655				       MGMT_STATUS_REJECTED);
4656
4657	if (cp->val != 0x00 && cp->val != 0x01)
4658		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4659				       MGMT_STATUS_INVALID_PARAMS);
4660
4661	hci_dev_lock(hdev);
4662
4663	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4664		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4665		goto unlock;
4666	}
4667
4668	if (!hdev_is_powered(hdev)) {
4669		if (!cp->val) {
4670			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4671			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4672			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4673			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4674			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4675		}
4676
4677		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4678
4679		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4680		if (err < 0)
4681			goto unlock;
4682
4683		err = new_settings(hdev, sk);
4684		goto unlock;
4685	}
4686
4687	/* Reject disabling when powered on */
4688	if (!cp->val) {
4689		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4690				      MGMT_STATUS_REJECTED);
4691		goto unlock;
4692	} else {
4693		/* When configuring a dual-mode controller to operate
4694		 * with LE only and using a static address, then switching
4695		 * BR/EDR back on is not allowed.
4696		 *
4697		 * Dual-mode controllers shall operate with the public
4698		 * address as its identity address for BR/EDR and LE. So
4699		 * reject the attempt to create an invalid configuration.
4700		 *
4701		 * The same restrictions applies when secure connections
4702		 * has been enabled. For BR/EDR this is a controller feature
4703		 * while for LE it is a host stack feature. This means that
4704		 * switching BR/EDR back on when secure connections has been
4705		 * enabled is not a supported transaction.
4706		 */
4707		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4708		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4709		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4710			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4711					      MGMT_STATUS_REJECTED);
4712			goto unlock;
4713		}
4714	}
4715
4716	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4717		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4718				      MGMT_STATUS_BUSY);
4719		goto unlock;
4720	}
4721
4722	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4723	if (!cmd) {
4724		err = -ENOMEM;
4725		goto unlock;
4726	}
4727
4728	/* We need to flip the bit already here so that
4729	 * hci_req_update_adv_data generates the correct flags.
4730	 */
4731	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4732
4733	hci_req_init(&req, hdev);
4734
4735	__hci_req_write_fast_connectable(&req, false);
4736	__hci_req_update_scan(&req);
4737
4738	/* Since only the advertising data flags will change, there
4739	 * is no need to update the scan response data.
4740	 */
4741	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4742
4743	err = hci_req_run(&req, set_bredr_complete);
4744	if (err < 0)
4745		mgmt_pending_remove(cmd);
4746
4747unlock:
4748	hci_dev_unlock(hdev);
4749	return err;
4750}
4751
4752static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4753{
4754	struct mgmt_pending_cmd *cmd;
4755	struct mgmt_mode *cp;
4756
4757	BT_DBG("%s status %u", hdev->name, status);
4758
4759	hci_dev_lock(hdev);
4760
4761	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4762	if (!cmd)
4763		goto unlock;
4764
4765	if (status) {
4766		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4767			        mgmt_status(status));
4768		goto remove;
4769	}
4770
4771	cp = cmd->param;
4772
4773	switch (cp->val) {
4774	case 0x00:
4775		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4776		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4777		break;
4778	case 0x01:
4779		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4780		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4781		break;
4782	case 0x02:
4783		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4784		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4785		break;
4786	}
4787
4788	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4789	new_settings(hdev, cmd->sk);
4790
4791remove:
4792	mgmt_pending_remove(cmd);
4793unlock:
4794	hci_dev_unlock(hdev);
4795}
4796
4797static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4798			   void *data, u16 len)
4799{
4800	struct mgmt_mode *cp = data;
4801	struct mgmt_pending_cmd *cmd;
4802	struct hci_request req;
4803	u8 val;
4804	int err;
4805
4806	BT_DBG("request for %s", hdev->name);
4807
4808	if (!lmp_sc_capable(hdev) &&
4809	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4810		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4811				       MGMT_STATUS_NOT_SUPPORTED);
4812
4813	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4814	    lmp_sc_capable(hdev) &&
4815	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4816		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4817				       MGMT_STATUS_REJECTED);
4818
4819	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4820		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4821				  MGMT_STATUS_INVALID_PARAMS);
4822
4823	hci_dev_lock(hdev);
4824
4825	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4826	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4827		bool changed;
4828
4829		if (cp->val) {
4830			changed = !hci_dev_test_and_set_flag(hdev,
4831							     HCI_SC_ENABLED);
4832			if (cp->val == 0x02)
4833				hci_dev_set_flag(hdev, HCI_SC_ONLY);
4834			else
4835				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4836		} else {
4837			changed = hci_dev_test_and_clear_flag(hdev,
4838							      HCI_SC_ENABLED);
4839			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4840		}
4841
4842		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4843		if (err < 0)
4844			goto failed;
4845
4846		if (changed)
4847			err = new_settings(hdev, sk);
4848
4849		goto failed;
4850	}
4851
4852	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4853		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4854				      MGMT_STATUS_BUSY);
4855		goto failed;
4856	}
4857
4858	val = !!cp->val;
4859
4860	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4861	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4862		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4863		goto failed;
4864	}
4865
4866	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4867	if (!cmd) {
4868		err = -ENOMEM;
4869		goto failed;
4870	}
4871
4872	hci_req_init(&req, hdev);
4873	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4874	err = hci_req_run(&req, sc_enable_complete);
4875	if (err < 0) {
4876		mgmt_pending_remove(cmd);
4877		goto failed;
4878	}
4879
4880failed:
4881	hci_dev_unlock(hdev);
4882	return err;
4883}
4884
4885static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4886			  void *data, u16 len)
4887{
4888	struct mgmt_mode *cp = data;
4889	bool changed, use_changed;
4890	int err;
4891
4892	BT_DBG("request for %s", hdev->name);
4893
4894	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4895		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4896				       MGMT_STATUS_INVALID_PARAMS);
4897
4898	hci_dev_lock(hdev);
4899
4900	if (cp->val)
4901		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4902	else
4903		changed = hci_dev_test_and_clear_flag(hdev,
4904						      HCI_KEEP_DEBUG_KEYS);
4905
4906	if (cp->val == 0x02)
4907		use_changed = !hci_dev_test_and_set_flag(hdev,
4908							 HCI_USE_DEBUG_KEYS);
4909	else
4910		use_changed = hci_dev_test_and_clear_flag(hdev,
4911							  HCI_USE_DEBUG_KEYS);
4912
4913	if (hdev_is_powered(hdev) && use_changed &&
4914	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4915		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4916		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4917			     sizeof(mode), &mode);
4918	}
4919
4920	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4921	if (err < 0)
4922		goto unlock;
4923
4924	if (changed)
4925		err = new_settings(hdev, sk);
4926
4927unlock:
4928	hci_dev_unlock(hdev);
4929	return err;
4930}
4931
4932static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4933		       u16 len)
4934{
4935	struct mgmt_cp_set_privacy *cp = cp_data;
4936	bool changed;
4937	int err;
4938
4939	BT_DBG("request for %s", hdev->name);
4940
4941	if (!lmp_le_capable(hdev))
4942		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4943				       MGMT_STATUS_NOT_SUPPORTED);
4944
4945	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4946		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4947				       MGMT_STATUS_INVALID_PARAMS);
4948
4949	if (hdev_is_powered(hdev))
4950		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4951				       MGMT_STATUS_REJECTED);
4952
4953	hci_dev_lock(hdev);
4954
4955	/* If user space supports this command it is also expected to
4956	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4957	 */
4958	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4959
4960	if (cp->privacy) {
4961		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4962		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4963		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4964		hci_adv_instances_set_rpa_expired(hdev, true);
4965		if (cp->privacy == 0x02)
4966			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4967		else
4968			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4969	} else {
4970		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4971		memset(hdev->irk, 0, sizeof(hdev->irk));
4972		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4973		hci_adv_instances_set_rpa_expired(hdev, false);
4974		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4975	}
4976
4977	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4978	if (err < 0)
4979		goto unlock;
4980
4981	if (changed)
4982		err = new_settings(hdev, sk);
4983
4984unlock:
4985	hci_dev_unlock(hdev);
4986	return err;
4987}
4988
4989static bool irk_is_valid(struct mgmt_irk_info *irk)
4990{
4991	switch (irk->addr.type) {
4992	case BDADDR_LE_PUBLIC:
4993		return true;
4994
4995	case BDADDR_LE_RANDOM:
4996		/* Two most significant bits shall be set */
4997		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4998			return false;
4999		return true;
5000	}
5001
5002	return false;
5003}
5004
5005static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5006		     u16 len)
5007{
5008	struct mgmt_cp_load_irks *cp = cp_data;
5009	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5010				   sizeof(struct mgmt_irk_info));
5011	u16 irk_count, expected_len;
5012	int i, err;
5013
5014	BT_DBG("request for %s", hdev->name);
5015
5016	if (!lmp_le_capable(hdev))
5017		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5018				       MGMT_STATUS_NOT_SUPPORTED);
5019
5020	irk_count = __le16_to_cpu(cp->irk_count);
5021	if (irk_count > max_irk_count) {
5022		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5023			   irk_count);
5024		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5025				       MGMT_STATUS_INVALID_PARAMS);
5026	}
5027
5028	expected_len = struct_size(cp, irks, irk_count);
5029	if (expected_len != len) {
5030		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5031			   expected_len, len);
5032		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5033				       MGMT_STATUS_INVALID_PARAMS);
5034	}
5035
5036	BT_DBG("%s irk_count %u", hdev->name, irk_count);
5037
5038	for (i = 0; i < irk_count; i++) {
5039		struct mgmt_irk_info *key = &cp->irks[i];
5040
5041		if (!irk_is_valid(key))
5042			return mgmt_cmd_status(sk, hdev->id,
5043					       MGMT_OP_LOAD_IRKS,
5044					       MGMT_STATUS_INVALID_PARAMS);
5045	}
5046
5047	hci_dev_lock(hdev);
5048
5049	hci_smp_irks_clear(hdev);
5050
5051	for (i = 0; i < irk_count; i++) {
5052		struct mgmt_irk_info *irk = &cp->irks[i];
5053
 
 
 
 
 
 
 
 
5054		hci_add_irk(hdev, &irk->addr.bdaddr,
5055			    le_addr_type(irk->addr.type), irk->val,
5056			    BDADDR_ANY);
5057	}
5058
5059	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5060
5061	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5062
5063	hci_dev_unlock(hdev);
5064
5065	return err;
5066}
5067
5068static bool ltk_is_valid(struct mgmt_ltk_info *key)
5069{
5070	if (key->master != 0x00 && key->master != 0x01)
5071		return false;
5072
5073	switch (key->addr.type) {
5074	case BDADDR_LE_PUBLIC:
5075		return true;
5076
5077	case BDADDR_LE_RANDOM:
5078		/* Two most significant bits shall be set */
5079		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5080			return false;
5081		return true;
5082	}
5083
5084	return false;
5085}
5086
5087static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5088			       void *cp_data, u16 len)
5089{
5090	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5091	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5092				   sizeof(struct mgmt_ltk_info));
5093	u16 key_count, expected_len;
5094	int i, err;
5095
5096	BT_DBG("request for %s", hdev->name);
5097
5098	if (!lmp_le_capable(hdev))
5099		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5100				       MGMT_STATUS_NOT_SUPPORTED);
5101
5102	key_count = __le16_to_cpu(cp->key_count);
5103	if (key_count > max_key_count) {
5104		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5105			   key_count);
5106		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5107				       MGMT_STATUS_INVALID_PARAMS);
5108	}
5109
5110	expected_len = struct_size(cp, keys, key_count);
5111	if (expected_len != len) {
5112		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5113			   expected_len, len);
5114		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5115				       MGMT_STATUS_INVALID_PARAMS);
5116	}
5117
5118	BT_DBG("%s key_count %u", hdev->name, key_count);
5119
5120	for (i = 0; i < key_count; i++) {
5121		struct mgmt_ltk_info *key = &cp->keys[i];
5122
5123		if (!ltk_is_valid(key))
5124			return mgmt_cmd_status(sk, hdev->id,
5125					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5126					       MGMT_STATUS_INVALID_PARAMS);
5127	}
5128
5129	hci_dev_lock(hdev);
5130
5131	hci_smp_ltks_clear(hdev);
5132
5133	for (i = 0; i < key_count; i++) {
5134		struct mgmt_ltk_info *key = &cp->keys[i];
5135		u8 type, authenticated;
5136
 
 
 
 
 
 
 
 
5137		switch (key->type) {
5138		case MGMT_LTK_UNAUTHENTICATED:
5139			authenticated = 0x00;
5140			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5141			break;
5142		case MGMT_LTK_AUTHENTICATED:
5143			authenticated = 0x01;
5144			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5145			break;
5146		case MGMT_LTK_P256_UNAUTH:
5147			authenticated = 0x00;
5148			type = SMP_LTK_P256;
5149			break;
5150		case MGMT_LTK_P256_AUTH:
5151			authenticated = 0x01;
5152			type = SMP_LTK_P256;
5153			break;
5154		case MGMT_LTK_P256_DEBUG:
5155			authenticated = 0x00;
5156			type = SMP_LTK_P256_DEBUG;
5157			/* fall through */
5158		default:
5159			continue;
5160		}
5161
5162		hci_add_ltk(hdev, &key->addr.bdaddr,
5163			    le_addr_type(key->addr.type), type, authenticated,
5164			    key->val, key->enc_size, key->ediv, key->rand);
5165	}
5166
5167	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5168			   NULL, 0);
5169
5170	hci_dev_unlock(hdev);
5171
5172	return err;
5173}
5174
5175static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5176{
5177	struct hci_conn *conn = cmd->user_data;
5178	struct mgmt_rp_get_conn_info rp;
5179	int err;
5180
5181	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5182
5183	if (status == MGMT_STATUS_SUCCESS) {
5184		rp.rssi = conn->rssi;
5185		rp.tx_power = conn->tx_power;
5186		rp.max_tx_power = conn->max_tx_power;
5187	} else {
5188		rp.rssi = HCI_RSSI_INVALID;
5189		rp.tx_power = HCI_TX_POWER_INVALID;
5190		rp.max_tx_power = HCI_TX_POWER_INVALID;
5191	}
5192
5193	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5194				status, &rp, sizeof(rp));
5195
5196	hci_conn_drop(conn);
5197	hci_conn_put(conn);
5198
5199	return err;
5200}
5201
5202static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5203				       u16 opcode)
5204{
5205	struct hci_cp_read_rssi *cp;
5206	struct mgmt_pending_cmd *cmd;
5207	struct hci_conn *conn;
5208	u16 handle;
5209	u8 status;
5210
5211	BT_DBG("status 0x%02x", hci_status);
5212
5213	hci_dev_lock(hdev);
5214
5215	/* Commands sent in request are either Read RSSI or Read Transmit Power
5216	 * Level so we check which one was last sent to retrieve connection
5217	 * handle.  Both commands have handle as first parameter so it's safe to
5218	 * cast data on the same command struct.
5219	 *
5220	 * First command sent is always Read RSSI and we fail only if it fails.
5221	 * In other case we simply override error to indicate success as we
5222	 * already remembered if TX power value is actually valid.
5223	 */
5224	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5225	if (!cp) {
5226		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5227		status = MGMT_STATUS_SUCCESS;
5228	} else {
5229		status = mgmt_status(hci_status);
5230	}
5231
5232	if (!cp) {
5233		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5234		goto unlock;
5235	}
5236
5237	handle = __le16_to_cpu(cp->handle);
5238	conn = hci_conn_hash_lookup_handle(hdev, handle);
5239	if (!conn) {
5240		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5241			   handle);
5242		goto unlock;
5243	}
5244
5245	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5246	if (!cmd)
5247		goto unlock;
5248
5249	cmd->cmd_complete(cmd, status);
5250	mgmt_pending_remove(cmd);
5251
5252unlock:
5253	hci_dev_unlock(hdev);
5254}
5255
5256static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5257			 u16 len)
5258{
5259	struct mgmt_cp_get_conn_info *cp = data;
5260	struct mgmt_rp_get_conn_info rp;
5261	struct hci_conn *conn;
5262	unsigned long conn_info_age;
5263	int err = 0;
5264
5265	BT_DBG("%s", hdev->name);
5266
5267	memset(&rp, 0, sizeof(rp));
5268	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5269	rp.addr.type = cp->addr.type;
5270
5271	if (!bdaddr_type_is_valid(cp->addr.type))
5272		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5273					 MGMT_STATUS_INVALID_PARAMS,
5274					 &rp, sizeof(rp));
5275
5276	hci_dev_lock(hdev);
5277
5278	if (!hdev_is_powered(hdev)) {
5279		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5280					MGMT_STATUS_NOT_POWERED, &rp,
5281					sizeof(rp));
5282		goto unlock;
5283	}
5284
5285	if (cp->addr.type == BDADDR_BREDR)
5286		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5287					       &cp->addr.bdaddr);
5288	else
5289		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5290
5291	if (!conn || conn->state != BT_CONNECTED) {
5292		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5293					MGMT_STATUS_NOT_CONNECTED, &rp,
5294					sizeof(rp));
5295		goto unlock;
5296	}
5297
5298	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5299		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5300					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5301		goto unlock;
5302	}
5303
5304	/* To avoid client trying to guess when to poll again for information we
5305	 * calculate conn info age as random value between min/max set in hdev.
5306	 */
5307	conn_info_age = hdev->conn_info_min_age +
5308			prandom_u32_max(hdev->conn_info_max_age -
5309					hdev->conn_info_min_age);
5310
5311	/* Query controller to refresh cached values if they are too old or were
5312	 * never read.
5313	 */
5314	if (time_after(jiffies, conn->conn_info_timestamp +
5315		       msecs_to_jiffies(conn_info_age)) ||
5316	    !conn->conn_info_timestamp) {
5317		struct hci_request req;
5318		struct hci_cp_read_tx_power req_txp_cp;
5319		struct hci_cp_read_rssi req_rssi_cp;
5320		struct mgmt_pending_cmd *cmd;
5321
5322		hci_req_init(&req, hdev);
5323		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5324		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5325			    &req_rssi_cp);
5326
5327		/* For LE links TX power does not change thus we don't need to
5328		 * query for it once value is known.
5329		 */
5330		if (!bdaddr_type_is_le(cp->addr.type) ||
5331		    conn->tx_power == HCI_TX_POWER_INVALID) {
5332			req_txp_cp.handle = cpu_to_le16(conn->handle);
5333			req_txp_cp.type = 0x00;
5334			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5335				    sizeof(req_txp_cp), &req_txp_cp);
5336		}
5337
5338		/* Max TX power needs to be read only once per connection */
5339		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5340			req_txp_cp.handle = cpu_to_le16(conn->handle);
5341			req_txp_cp.type = 0x01;
5342			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5343				    sizeof(req_txp_cp), &req_txp_cp);
5344		}
5345
5346		err = hci_req_run(&req, conn_info_refresh_complete);
5347		if (err < 0)
5348			goto unlock;
5349
5350		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5351				       data, len);
5352		if (!cmd) {
5353			err = -ENOMEM;
5354			goto unlock;
5355		}
5356
5357		hci_conn_hold(conn);
5358		cmd->user_data = hci_conn_get(conn);
5359		cmd->cmd_complete = conn_info_cmd_complete;
5360
5361		conn->conn_info_timestamp = jiffies;
5362	} else {
5363		/* Cache is valid, just reply with values cached in hci_conn */
5364		rp.rssi = conn->rssi;
5365		rp.tx_power = conn->tx_power;
5366		rp.max_tx_power = conn->max_tx_power;
5367
5368		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5369					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5370	}
5371
5372unlock:
5373	hci_dev_unlock(hdev);
5374	return err;
5375}
5376
5377static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5378{
5379	struct hci_conn *conn = cmd->user_data;
5380	struct mgmt_rp_get_clock_info rp;
5381	struct hci_dev *hdev;
5382	int err;
5383
5384	memset(&rp, 0, sizeof(rp));
5385	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5386
5387	if (status)
5388		goto complete;
5389
5390	hdev = hci_dev_get(cmd->index);
5391	if (hdev) {
5392		rp.local_clock = cpu_to_le32(hdev->clock);
5393		hci_dev_put(hdev);
5394	}
5395
5396	if (conn) {
5397		rp.piconet_clock = cpu_to_le32(conn->clock);
5398		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5399	}
5400
5401complete:
5402	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5403				sizeof(rp));
5404
5405	if (conn) {
5406		hci_conn_drop(conn);
5407		hci_conn_put(conn);
5408	}
5409
5410	return err;
5411}
5412
5413static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5414{
5415	struct hci_cp_read_clock *hci_cp;
5416	struct mgmt_pending_cmd *cmd;
5417	struct hci_conn *conn;
5418
5419	BT_DBG("%s status %u", hdev->name, status);
5420
5421	hci_dev_lock(hdev);
5422
5423	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5424	if (!hci_cp)
5425		goto unlock;
5426
5427	if (hci_cp->which) {
5428		u16 handle = __le16_to_cpu(hci_cp->handle);
5429		conn = hci_conn_hash_lookup_handle(hdev, handle);
5430	} else {
5431		conn = NULL;
5432	}
5433
5434	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5435	if (!cmd)
5436		goto unlock;
5437
5438	cmd->cmd_complete(cmd, mgmt_status(status));
5439	mgmt_pending_remove(cmd);
5440
5441unlock:
5442	hci_dev_unlock(hdev);
5443}
5444
5445static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5446			 u16 len)
5447{
5448	struct mgmt_cp_get_clock_info *cp = data;
5449	struct mgmt_rp_get_clock_info rp;
5450	struct hci_cp_read_clock hci_cp;
5451	struct mgmt_pending_cmd *cmd;
5452	struct hci_request req;
5453	struct hci_conn *conn;
5454	int err;
5455
5456	BT_DBG("%s", hdev->name);
5457
5458	memset(&rp, 0, sizeof(rp));
5459	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5460	rp.addr.type = cp->addr.type;
5461
5462	if (cp->addr.type != BDADDR_BREDR)
5463		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5464					 MGMT_STATUS_INVALID_PARAMS,
5465					 &rp, sizeof(rp));
5466
5467	hci_dev_lock(hdev);
5468
5469	if (!hdev_is_powered(hdev)) {
5470		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5471					MGMT_STATUS_NOT_POWERED, &rp,
5472					sizeof(rp));
5473		goto unlock;
5474	}
5475
5476	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5477		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5478					       &cp->addr.bdaddr);
5479		if (!conn || conn->state != BT_CONNECTED) {
5480			err = mgmt_cmd_complete(sk, hdev->id,
5481						MGMT_OP_GET_CLOCK_INFO,
5482						MGMT_STATUS_NOT_CONNECTED,
5483						&rp, sizeof(rp));
5484			goto unlock;
5485		}
5486	} else {
5487		conn = NULL;
5488	}
5489
5490	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5491	if (!cmd) {
5492		err = -ENOMEM;
5493		goto unlock;
5494	}
5495
5496	cmd->cmd_complete = clock_info_cmd_complete;
5497
5498	hci_req_init(&req, hdev);
5499
5500	memset(&hci_cp, 0, sizeof(hci_cp));
5501	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5502
5503	if (conn) {
5504		hci_conn_hold(conn);
5505		cmd->user_data = hci_conn_get(conn);
5506
5507		hci_cp.handle = cpu_to_le16(conn->handle);
5508		hci_cp.which = 0x01; /* Piconet clock */
5509		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5510	}
5511
5512	err = hci_req_run(&req, get_clock_info_complete);
5513	if (err < 0)
5514		mgmt_pending_remove(cmd);
5515
5516unlock:
5517	hci_dev_unlock(hdev);
5518	return err;
5519}
5520
5521static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5522{
5523	struct hci_conn *conn;
5524
5525	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5526	if (!conn)
5527		return false;
5528
5529	if (conn->dst_type != type)
5530		return false;
5531
5532	if (conn->state != BT_CONNECTED)
5533		return false;
5534
5535	return true;
5536}
5537
5538/* This function requires the caller holds hdev->lock */
5539static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5540			       u8 addr_type, u8 auto_connect)
5541{
5542	struct hci_conn_params *params;
5543
5544	params = hci_conn_params_add(hdev, addr, addr_type);
5545	if (!params)
5546		return -EIO;
5547
5548	if (params->auto_connect == auto_connect)
5549		return 0;
5550
5551	list_del_init(&params->action);
5552
5553	switch (auto_connect) {
5554	case HCI_AUTO_CONN_DISABLED:
5555	case HCI_AUTO_CONN_LINK_LOSS:
5556		/* If auto connect is being disabled when we're trying to
5557		 * connect to device, keep connecting.
5558		 */
5559		if (params->explicit_connect)
5560			list_add(&params->action, &hdev->pend_le_conns);
5561		break;
5562	case HCI_AUTO_CONN_REPORT:
5563		if (params->explicit_connect)
5564			list_add(&params->action, &hdev->pend_le_conns);
5565		else
5566			list_add(&params->action, &hdev->pend_le_reports);
5567		break;
5568	case HCI_AUTO_CONN_DIRECT:
5569	case HCI_AUTO_CONN_ALWAYS:
5570		if (!is_connected(hdev, addr, addr_type))
5571			list_add(&params->action, &hdev->pend_le_conns);
5572		break;
5573	}
5574
5575	params->auto_connect = auto_connect;
5576
5577	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5578	       auto_connect);
5579
5580	return 0;
5581}
5582
5583static void device_added(struct sock *sk, struct hci_dev *hdev,
5584			 bdaddr_t *bdaddr, u8 type, u8 action)
5585{
5586	struct mgmt_ev_device_added ev;
5587
5588	bacpy(&ev.addr.bdaddr, bdaddr);
5589	ev.addr.type = type;
5590	ev.action = action;
5591
5592	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5593}
5594
5595static int add_device(struct sock *sk, struct hci_dev *hdev,
5596		      void *data, u16 len)
5597{
5598	struct mgmt_cp_add_device *cp = data;
5599	u8 auto_conn, addr_type;
 
5600	int err;
 
5601
5602	BT_DBG("%s", hdev->name);
5603
5604	if (!bdaddr_type_is_valid(cp->addr.type) ||
5605	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5606		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5607					 MGMT_STATUS_INVALID_PARAMS,
5608					 &cp->addr, sizeof(cp->addr));
5609
5610	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5611		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5612					 MGMT_STATUS_INVALID_PARAMS,
5613					 &cp->addr, sizeof(cp->addr));
5614
5615	hci_dev_lock(hdev);
5616
5617	if (cp->addr.type == BDADDR_BREDR) {
5618		/* Only incoming connections action is supported for now */
5619		if (cp->action != 0x01) {
5620			err = mgmt_cmd_complete(sk, hdev->id,
5621						MGMT_OP_ADD_DEVICE,
5622						MGMT_STATUS_INVALID_PARAMS,
5623						&cp->addr, sizeof(cp->addr));
5624			goto unlock;
5625		}
5626
5627		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5628					  cp->addr.type);
 
5629		if (err)
5630			goto unlock;
5631
5632		hci_req_update_scan(hdev);
5633
5634		goto added;
5635	}
5636
5637	addr_type = le_addr_type(cp->addr.type);
5638
5639	if (cp->action == 0x02)
5640		auto_conn = HCI_AUTO_CONN_ALWAYS;
5641	else if (cp->action == 0x01)
5642		auto_conn = HCI_AUTO_CONN_DIRECT;
5643	else
5644		auto_conn = HCI_AUTO_CONN_REPORT;
5645
5646	/* Kernel internally uses conn_params with resolvable private
5647	 * address, but Add Device allows only identity addresses.
5648	 * Make sure it is enforced before calling
5649	 * hci_conn_params_lookup.
5650	 */
5651	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5652		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5653					MGMT_STATUS_INVALID_PARAMS,
5654					&cp->addr, sizeof(cp->addr));
5655		goto unlock;
5656	}
5657
5658	/* If the connection parameters don't exist for this device,
5659	 * they will be created and configured with defaults.
5660	 */
5661	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5662				auto_conn) < 0) {
5663		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5664					MGMT_STATUS_FAILED, &cp->addr,
5665					sizeof(cp->addr));
5666		goto unlock;
 
 
 
 
 
5667	}
5668
5669	hci_update_background_scan(hdev);
5670
5671added:
5672	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
 
 
5673
5674	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5675				MGMT_STATUS_SUCCESS, &cp->addr,
5676				sizeof(cp->addr));
5677
5678unlock:
5679	hci_dev_unlock(hdev);
5680	return err;
5681}
5682
5683static void device_removed(struct sock *sk, struct hci_dev *hdev,
5684			   bdaddr_t *bdaddr, u8 type)
5685{
5686	struct mgmt_ev_device_removed ev;
5687
5688	bacpy(&ev.addr.bdaddr, bdaddr);
5689	ev.addr.type = type;
5690
5691	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5692}
5693
5694static int remove_device(struct sock *sk, struct hci_dev *hdev,
5695			 void *data, u16 len)
5696{
5697	struct mgmt_cp_remove_device *cp = data;
5698	int err;
5699
5700	BT_DBG("%s", hdev->name);
5701
5702	hci_dev_lock(hdev);
5703
5704	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5705		struct hci_conn_params *params;
5706		u8 addr_type;
5707
5708		if (!bdaddr_type_is_valid(cp->addr.type)) {
5709			err = mgmt_cmd_complete(sk, hdev->id,
5710						MGMT_OP_REMOVE_DEVICE,
5711						MGMT_STATUS_INVALID_PARAMS,
5712						&cp->addr, sizeof(cp->addr));
5713			goto unlock;
5714		}
5715
5716		if (cp->addr.type == BDADDR_BREDR) {
5717			err = hci_bdaddr_list_del(&hdev->whitelist,
5718						  &cp->addr.bdaddr,
5719						  cp->addr.type);
5720			if (err) {
5721				err = mgmt_cmd_complete(sk, hdev->id,
5722							MGMT_OP_REMOVE_DEVICE,
5723							MGMT_STATUS_INVALID_PARAMS,
5724							&cp->addr,
5725							sizeof(cp->addr));
5726				goto unlock;
5727			}
5728
5729			hci_req_update_scan(hdev);
5730
5731			device_removed(sk, hdev, &cp->addr.bdaddr,
5732				       cp->addr.type);
5733			goto complete;
5734		}
5735
5736		addr_type = le_addr_type(cp->addr.type);
5737
5738		/* Kernel internally uses conn_params with resolvable private
5739		 * address, but Remove Device allows only identity addresses.
5740		 * Make sure it is enforced before calling
5741		 * hci_conn_params_lookup.
5742		 */
5743		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5744			err = mgmt_cmd_complete(sk, hdev->id,
5745						MGMT_OP_REMOVE_DEVICE,
5746						MGMT_STATUS_INVALID_PARAMS,
5747						&cp->addr, sizeof(cp->addr));
5748			goto unlock;
5749		}
5750
5751		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5752						addr_type);
5753		if (!params) {
5754			err = mgmt_cmd_complete(sk, hdev->id,
5755						MGMT_OP_REMOVE_DEVICE,
5756						MGMT_STATUS_INVALID_PARAMS,
5757						&cp->addr, sizeof(cp->addr));
5758			goto unlock;
5759		}
5760
5761		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5762		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5763			err = mgmt_cmd_complete(sk, hdev->id,
5764						MGMT_OP_REMOVE_DEVICE,
5765						MGMT_STATUS_INVALID_PARAMS,
5766						&cp->addr, sizeof(cp->addr));
5767			goto unlock;
5768		}
5769
5770		list_del(&params->action);
5771		list_del(&params->list);
5772		kfree(params);
5773		hci_update_background_scan(hdev);
5774
5775		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5776	} else {
5777		struct hci_conn_params *p, *tmp;
5778		struct bdaddr_list *b, *btmp;
5779
5780		if (cp->addr.type) {
5781			err = mgmt_cmd_complete(sk, hdev->id,
5782						MGMT_OP_REMOVE_DEVICE,
5783						MGMT_STATUS_INVALID_PARAMS,
5784						&cp->addr, sizeof(cp->addr));
5785			goto unlock;
5786		}
5787
5788		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5789			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5790			list_del(&b->list);
5791			kfree(b);
5792		}
5793
5794		hci_req_update_scan(hdev);
5795
5796		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5797			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5798				continue;
5799			device_removed(sk, hdev, &p->addr, p->addr_type);
5800			if (p->explicit_connect) {
5801				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5802				continue;
5803			}
5804			list_del(&p->action);
5805			list_del(&p->list);
5806			kfree(p);
5807		}
5808
5809		BT_DBG("All LE connection parameters were removed");
5810
5811		hci_update_background_scan(hdev);
5812	}
5813
5814complete:
5815	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5816				MGMT_STATUS_SUCCESS, &cp->addr,
5817				sizeof(cp->addr));
5818unlock:
5819	hci_dev_unlock(hdev);
5820	return err;
5821}
5822
5823static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5824			   u16 len)
5825{
5826	struct mgmt_cp_load_conn_param *cp = data;
5827	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5828				     sizeof(struct mgmt_conn_param));
5829	u16 param_count, expected_len;
5830	int i;
5831
5832	if (!lmp_le_capable(hdev))
5833		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5834				       MGMT_STATUS_NOT_SUPPORTED);
5835
5836	param_count = __le16_to_cpu(cp->param_count);
5837	if (param_count > max_param_count) {
5838		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
5839			   param_count);
5840		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5841				       MGMT_STATUS_INVALID_PARAMS);
5842	}
5843
5844	expected_len = struct_size(cp, params, param_count);
5845	if (expected_len != len) {
5846		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
5847			   expected_len, len);
5848		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5849				       MGMT_STATUS_INVALID_PARAMS);
5850	}
5851
5852	BT_DBG("%s param_count %u", hdev->name, param_count);
5853
5854	hci_dev_lock(hdev);
5855
5856	hci_conn_params_clear_disabled(hdev);
5857
5858	for (i = 0; i < param_count; i++) {
5859		struct mgmt_conn_param *param = &cp->params[i];
5860		struct hci_conn_params *hci_param;
5861		u16 min, max, latency, timeout;
5862		u8 addr_type;
5863
5864		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5865		       param->addr.type);
5866
5867		if (param->addr.type == BDADDR_LE_PUBLIC) {
5868			addr_type = ADDR_LE_DEV_PUBLIC;
5869		} else if (param->addr.type == BDADDR_LE_RANDOM) {
5870			addr_type = ADDR_LE_DEV_RANDOM;
5871		} else {
5872			bt_dev_err(hdev, "ignoring invalid connection parameters");
5873			continue;
5874		}
5875
5876		min = le16_to_cpu(param->min_interval);
5877		max = le16_to_cpu(param->max_interval);
5878		latency = le16_to_cpu(param->latency);
5879		timeout = le16_to_cpu(param->timeout);
5880
5881		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5882		       min, max, latency, timeout);
5883
5884		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5885			bt_dev_err(hdev, "ignoring invalid connection parameters");
5886			continue;
5887		}
5888
5889		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5890						addr_type);
5891		if (!hci_param) {
5892			bt_dev_err(hdev, "failed to add connection parameters");
5893			continue;
5894		}
5895
5896		hci_param->conn_min_interval = min;
5897		hci_param->conn_max_interval = max;
5898		hci_param->conn_latency = latency;
5899		hci_param->supervision_timeout = timeout;
5900	}
5901
5902	hci_dev_unlock(hdev);
5903
5904	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5905				 NULL, 0);
5906}
5907
5908static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5909			       void *data, u16 len)
5910{
5911	struct mgmt_cp_set_external_config *cp = data;
5912	bool changed;
5913	int err;
5914
5915	BT_DBG("%s", hdev->name);
5916
5917	if (hdev_is_powered(hdev))
5918		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5919				       MGMT_STATUS_REJECTED);
5920
5921	if (cp->config != 0x00 && cp->config != 0x01)
5922		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5923				         MGMT_STATUS_INVALID_PARAMS);
5924
5925	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5926		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5927				       MGMT_STATUS_NOT_SUPPORTED);
5928
5929	hci_dev_lock(hdev);
5930
5931	if (cp->config)
5932		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5933	else
5934		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5935
5936	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5937	if (err < 0)
5938		goto unlock;
5939
5940	if (!changed)
5941		goto unlock;
5942
5943	err = new_options(hdev, sk);
5944
5945	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5946		mgmt_index_removed(hdev);
5947
5948		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5949			hci_dev_set_flag(hdev, HCI_CONFIG);
5950			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5951
5952			queue_work(hdev->req_workqueue, &hdev->power_on);
5953		} else {
5954			set_bit(HCI_RAW, &hdev->flags);
5955			mgmt_index_added(hdev);
5956		}
5957	}
5958
5959unlock:
5960	hci_dev_unlock(hdev);
5961	return err;
5962}
5963
5964static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5965			      void *data, u16 len)
5966{
5967	struct mgmt_cp_set_public_address *cp = data;
5968	bool changed;
5969	int err;
5970
5971	BT_DBG("%s", hdev->name);
5972
5973	if (hdev_is_powered(hdev))
5974		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5975				       MGMT_STATUS_REJECTED);
5976
5977	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5978		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5979				       MGMT_STATUS_INVALID_PARAMS);
5980
5981	if (!hdev->set_bdaddr)
5982		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5983				       MGMT_STATUS_NOT_SUPPORTED);
5984
5985	hci_dev_lock(hdev);
5986
5987	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5988	bacpy(&hdev->public_addr, &cp->bdaddr);
5989
5990	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5991	if (err < 0)
5992		goto unlock;
5993
5994	if (!changed)
5995		goto unlock;
5996
5997	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5998		err = new_options(hdev, sk);
5999
6000	if (is_configured(hdev)) {
6001		mgmt_index_removed(hdev);
6002
6003		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6004
6005		hci_dev_set_flag(hdev, HCI_CONFIG);
6006		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6007
6008		queue_work(hdev->req_workqueue, &hdev->power_on);
6009	}
6010
6011unlock:
6012	hci_dev_unlock(hdev);
6013	return err;
6014}
6015
6016static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6017					     u16 opcode, struct sk_buff *skb)
6018{
6019	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6020	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6021	u8 *h192, *r192, *h256, *r256;
6022	struct mgmt_pending_cmd *cmd;
6023	u16 eir_len;
6024	int err;
6025
6026	BT_DBG("%s status %u", hdev->name, status);
6027
6028	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6029	if (!cmd)
6030		return;
6031
6032	mgmt_cp = cmd->param;
6033
6034	if (status) {
6035		status = mgmt_status(status);
6036		eir_len = 0;
6037
6038		h192 = NULL;
6039		r192 = NULL;
6040		h256 = NULL;
6041		r256 = NULL;
6042	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6043		struct hci_rp_read_local_oob_data *rp;
6044
6045		if (skb->len != sizeof(*rp)) {
6046			status = MGMT_STATUS_FAILED;
6047			eir_len = 0;
6048		} else {
6049			status = MGMT_STATUS_SUCCESS;
6050			rp = (void *)skb->data;
6051
6052			eir_len = 5 + 18 + 18;
6053			h192 = rp->hash;
6054			r192 = rp->rand;
6055			h256 = NULL;
6056			r256 = NULL;
6057		}
6058	} else {
6059		struct hci_rp_read_local_oob_ext_data *rp;
6060
6061		if (skb->len != sizeof(*rp)) {
6062			status = MGMT_STATUS_FAILED;
6063			eir_len = 0;
6064		} else {
6065			status = MGMT_STATUS_SUCCESS;
6066			rp = (void *)skb->data;
6067
6068			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6069				eir_len = 5 + 18 + 18;
6070				h192 = NULL;
6071				r192 = NULL;
6072			} else {
6073				eir_len = 5 + 18 + 18 + 18 + 18;
6074				h192 = rp->hash192;
6075				r192 = rp->rand192;
6076			}
6077
6078			h256 = rp->hash256;
6079			r256 = rp->rand256;
6080		}
6081	}
6082
6083	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6084	if (!mgmt_rp)
6085		goto done;
6086
6087	if (status)
6088		goto send_rsp;
6089
6090	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6091				  hdev->dev_class, 3);
6092
6093	if (h192 && r192) {
6094		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6095					  EIR_SSP_HASH_C192, h192, 16);
6096		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6097					  EIR_SSP_RAND_R192, r192, 16);
6098	}
6099
6100	if (h256 && r256) {
6101		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6102					  EIR_SSP_HASH_C256, h256, 16);
6103		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6104					  EIR_SSP_RAND_R256, r256, 16);
6105	}
6106
6107send_rsp:
6108	mgmt_rp->type = mgmt_cp->type;
6109	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6110
6111	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6112				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6113				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6114	if (err < 0 || status)
6115		goto done;
6116
6117	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6118
6119	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6120				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6121				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6122done:
6123	kfree(mgmt_rp);
6124	mgmt_pending_remove(cmd);
6125}
6126
6127static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6128				  struct mgmt_cp_read_local_oob_ext_data *cp)
6129{
6130	struct mgmt_pending_cmd *cmd;
6131	struct hci_request req;
6132	int err;
6133
6134	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6135			       cp, sizeof(*cp));
6136	if (!cmd)
6137		return -ENOMEM;
6138
6139	hci_req_init(&req, hdev);
6140
6141	if (bredr_sc_enabled(hdev))
6142		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6143	else
6144		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6145
6146	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6147	if (err < 0) {
6148		mgmt_pending_remove(cmd);
6149		return err;
6150	}
6151
6152	return 0;
6153}
6154
6155static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6156				   void *data, u16 data_len)
6157{
6158	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6159	struct mgmt_rp_read_local_oob_ext_data *rp;
6160	size_t rp_len;
6161	u16 eir_len;
6162	u8 status, flags, role, addr[7], hash[16], rand[16];
6163	int err;
6164
6165	BT_DBG("%s", hdev->name);
6166
6167	if (hdev_is_powered(hdev)) {
6168		switch (cp->type) {
6169		case BIT(BDADDR_BREDR):
6170			status = mgmt_bredr_support(hdev);
6171			if (status)
6172				eir_len = 0;
6173			else
6174				eir_len = 5;
6175			break;
6176		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6177			status = mgmt_le_support(hdev);
6178			if (status)
6179				eir_len = 0;
6180			else
6181				eir_len = 9 + 3 + 18 + 18 + 3;
6182			break;
6183		default:
6184			status = MGMT_STATUS_INVALID_PARAMS;
6185			eir_len = 0;
6186			break;
6187		}
6188	} else {
6189		status = MGMT_STATUS_NOT_POWERED;
6190		eir_len = 0;
6191	}
6192
6193	rp_len = sizeof(*rp) + eir_len;
6194	rp = kmalloc(rp_len, GFP_ATOMIC);
6195	if (!rp)
6196		return -ENOMEM;
6197
6198	if (status)
6199		goto complete;
6200
6201	hci_dev_lock(hdev);
6202
6203	eir_len = 0;
6204	switch (cp->type) {
6205	case BIT(BDADDR_BREDR):
6206		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6207			err = read_local_ssp_oob_req(hdev, sk, cp);
6208			hci_dev_unlock(hdev);
6209			if (!err)
6210				goto done;
6211
6212			status = MGMT_STATUS_FAILED;
6213			goto complete;
6214		} else {
6215			eir_len = eir_append_data(rp->eir, eir_len,
6216						  EIR_CLASS_OF_DEV,
6217						  hdev->dev_class, 3);
6218		}
6219		break;
6220	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6221		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6222		    smp_generate_oob(hdev, hash, rand) < 0) {
6223			hci_dev_unlock(hdev);
6224			status = MGMT_STATUS_FAILED;
6225			goto complete;
6226		}
6227
6228		/* This should return the active RPA, but since the RPA
6229		 * is only programmed on demand, it is really hard to fill
6230		 * this in at the moment. For now disallow retrieving
6231		 * local out-of-band data when privacy is in use.
6232		 *
6233		 * Returning the identity address will not help here since
6234		 * pairing happens before the identity resolving key is
6235		 * known and thus the connection establishment happens
6236		 * based on the RPA and not the identity address.
6237		 */
6238		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6239			hci_dev_unlock(hdev);
6240			status = MGMT_STATUS_REJECTED;
6241			goto complete;
6242		}
6243
6244		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6245		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6246		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6247		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6248			memcpy(addr, &hdev->static_addr, 6);
6249			addr[6] = 0x01;
6250		} else {
6251			memcpy(addr, &hdev->bdaddr, 6);
6252			addr[6] = 0x00;
6253		}
6254
6255		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6256					  addr, sizeof(addr));
6257
6258		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6259			role = 0x02;
6260		else
6261			role = 0x01;
6262
6263		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6264					  &role, sizeof(role));
6265
6266		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6267			eir_len = eir_append_data(rp->eir, eir_len,
6268						  EIR_LE_SC_CONFIRM,
6269						  hash, sizeof(hash));
6270
6271			eir_len = eir_append_data(rp->eir, eir_len,
6272						  EIR_LE_SC_RANDOM,
6273						  rand, sizeof(rand));
6274		}
6275
6276		flags = mgmt_get_adv_discov_flags(hdev);
6277
6278		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6279			flags |= LE_AD_NO_BREDR;
6280
6281		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6282					  &flags, sizeof(flags));
6283		break;
6284	}
6285
6286	hci_dev_unlock(hdev);
6287
6288	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6289
6290	status = MGMT_STATUS_SUCCESS;
6291
6292complete:
6293	rp->type = cp->type;
6294	rp->eir_len = cpu_to_le16(eir_len);
6295
6296	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6297				status, rp, sizeof(*rp) + eir_len);
6298	if (err < 0 || status)
6299		goto done;
6300
6301	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6302				 rp, sizeof(*rp) + eir_len,
6303				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6304
6305done:
6306	kfree(rp);
6307
6308	return err;
6309}
6310
6311static u32 get_supported_adv_flags(struct hci_dev *hdev)
6312{
6313	u32 flags = 0;
6314
6315	flags |= MGMT_ADV_FLAG_CONNECTABLE;
6316	flags |= MGMT_ADV_FLAG_DISCOV;
6317	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6318	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6319	flags |= MGMT_ADV_FLAG_APPEARANCE;
6320	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6321
6322	/* In extended adv TX_POWER returned from Set Adv Param
6323	 * will be always valid.
6324	 */
6325	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6326	    ext_adv_capable(hdev))
6327		flags |= MGMT_ADV_FLAG_TX_POWER;
6328
6329	if (ext_adv_capable(hdev)) {
6330		flags |= MGMT_ADV_FLAG_SEC_1M;
6331
6332		if (hdev->le_features[1] & HCI_LE_PHY_2M)
6333			flags |= MGMT_ADV_FLAG_SEC_2M;
6334
6335		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6336			flags |= MGMT_ADV_FLAG_SEC_CODED;
6337	}
6338
6339	return flags;
6340}
6341
6342static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6343			     void *data, u16 data_len)
6344{
6345	struct mgmt_rp_read_adv_features *rp;
6346	size_t rp_len;
6347	int err;
6348	struct adv_info *adv_instance;
6349	u32 supported_flags;
6350	u8 *instance;
6351
6352	BT_DBG("%s", hdev->name);
6353
6354	if (!lmp_le_capable(hdev))
6355		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6356				       MGMT_STATUS_REJECTED);
6357
 
 
 
 
 
 
 
6358	hci_dev_lock(hdev);
6359
6360	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6361	rp = kmalloc(rp_len, GFP_ATOMIC);
6362	if (!rp) {
6363		hci_dev_unlock(hdev);
6364		return -ENOMEM;
6365	}
6366
6367	supported_flags = get_supported_adv_flags(hdev);
6368
6369	rp->supported_flags = cpu_to_le32(supported_flags);
6370	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6371	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6372	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6373	rp->num_instances = hdev->adv_instance_cnt;
6374
6375	instance = rp->instance;
6376	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6377		*instance = adv_instance->instance;
6378		instance++;
6379	}
6380
6381	hci_dev_unlock(hdev);
6382
6383	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6384				MGMT_STATUS_SUCCESS, rp, rp_len);
6385
6386	kfree(rp);
6387
6388	return err;
6389}
6390
6391static u8 calculate_name_len(struct hci_dev *hdev)
6392{
6393	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6394
6395	return append_local_name(hdev, buf, 0);
6396}
6397
6398static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6399			   bool is_adv_data)
6400{
6401	u8 max_len = HCI_MAX_AD_LENGTH;
6402
6403	if (is_adv_data) {
6404		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6405				 MGMT_ADV_FLAG_LIMITED_DISCOV |
6406				 MGMT_ADV_FLAG_MANAGED_FLAGS))
6407			max_len -= 3;
6408
6409		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6410			max_len -= 3;
6411	} else {
6412		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6413			max_len -= calculate_name_len(hdev);
6414
6415		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6416			max_len -= 4;
6417	}
6418
6419	return max_len;
6420}
6421
6422static bool flags_managed(u32 adv_flags)
6423{
6424	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6425			    MGMT_ADV_FLAG_LIMITED_DISCOV |
6426			    MGMT_ADV_FLAG_MANAGED_FLAGS);
6427}
6428
6429static bool tx_power_managed(u32 adv_flags)
6430{
6431	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6432}
6433
6434static bool name_managed(u32 adv_flags)
6435{
6436	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6437}
6438
6439static bool appearance_managed(u32 adv_flags)
6440{
6441	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6442}
6443
6444static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6445			      u8 len, bool is_adv_data)
6446{
6447	int i, cur_len;
6448	u8 max_len;
6449
6450	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6451
6452	if (len > max_len)
6453		return false;
6454
6455	/* Make sure that the data is correctly formatted. */
6456	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6457		cur_len = data[i];
6458
6459		if (data[i + 1] == EIR_FLAGS &&
6460		    (!is_adv_data || flags_managed(adv_flags)))
6461			return false;
6462
6463		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6464			return false;
6465
6466		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6467			return false;
6468
6469		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6470			return false;
6471
6472		if (data[i + 1] == EIR_APPEARANCE &&
6473		    appearance_managed(adv_flags))
6474			return false;
6475
6476		/* If the current field length would exceed the total data
6477		 * length, then it's invalid.
6478		 */
6479		if (i + cur_len >= len)
6480			return false;
6481	}
6482
6483	return true;
6484}
6485
6486static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6487				     u16 opcode)
6488{
6489	struct mgmt_pending_cmd *cmd;
6490	struct mgmt_cp_add_advertising *cp;
6491	struct mgmt_rp_add_advertising rp;
6492	struct adv_info *adv_instance, *n;
6493	u8 instance;
6494
6495	BT_DBG("status %d", status);
6496
6497	hci_dev_lock(hdev);
6498
6499	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6500
6501	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6502		if (!adv_instance->pending)
6503			continue;
6504
6505		if (!status) {
6506			adv_instance->pending = false;
6507			continue;
6508		}
6509
6510		instance = adv_instance->instance;
6511
6512		if (hdev->cur_adv_instance == instance)
6513			cancel_adv_timeout(hdev);
6514
6515		hci_remove_adv_instance(hdev, instance);
6516		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6517	}
6518
6519	if (!cmd)
6520		goto unlock;
6521
6522	cp = cmd->param;
6523	rp.instance = cp->instance;
6524
6525	if (status)
6526		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6527				mgmt_status(status));
6528	else
6529		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6530				  mgmt_status(status), &rp, sizeof(rp));
6531
6532	mgmt_pending_remove(cmd);
6533
6534unlock:
6535	hci_dev_unlock(hdev);
6536}
6537
6538static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6539			   void *data, u16 data_len)
6540{
6541	struct mgmt_cp_add_advertising *cp = data;
6542	struct mgmt_rp_add_advertising rp;
6543	u32 flags;
6544	u32 supported_flags, phy_flags;
6545	u8 status;
6546	u16 timeout, duration;
6547	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6548	u8 schedule_instance = 0;
6549	struct adv_info *next_instance;
6550	int err;
6551	struct mgmt_pending_cmd *cmd;
6552	struct hci_request req;
6553
6554	BT_DBG("%s", hdev->name);
6555
6556	status = mgmt_le_support(hdev);
6557	if (status)
6558		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6559				       status);
6560
 
 
 
 
 
 
 
6561	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6562		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6563				       MGMT_STATUS_INVALID_PARAMS);
6564
6565	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6566		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6567				       MGMT_STATUS_INVALID_PARAMS);
6568
6569	flags = __le32_to_cpu(cp->flags);
6570	timeout = __le16_to_cpu(cp->timeout);
6571	duration = __le16_to_cpu(cp->duration);
6572
6573	/* The current implementation only supports a subset of the specified
6574	 * flags. Also need to check mutual exclusiveness of sec flags.
6575	 */
6576	supported_flags = get_supported_adv_flags(hdev);
6577	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6578	if (flags & ~supported_flags ||
6579	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6580		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6581				       MGMT_STATUS_INVALID_PARAMS);
6582
6583	hci_dev_lock(hdev);
6584
6585	if (timeout && !hdev_is_powered(hdev)) {
6586		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6587				      MGMT_STATUS_REJECTED);
6588		goto unlock;
6589	}
6590
6591	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6592	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6593	    pending_find(MGMT_OP_SET_LE, hdev)) {
6594		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6595				      MGMT_STATUS_BUSY);
6596		goto unlock;
6597	}
6598
6599	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6600	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6601			       cp->scan_rsp_len, false)) {
6602		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6603				      MGMT_STATUS_INVALID_PARAMS);
6604		goto unlock;
6605	}
6606
6607	err = hci_add_adv_instance(hdev, cp->instance, flags,
6608				   cp->adv_data_len, cp->data,
6609				   cp->scan_rsp_len,
6610				   cp->data + cp->adv_data_len,
6611				   timeout, duration);
6612	if (err < 0) {
6613		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6614				      MGMT_STATUS_FAILED);
6615		goto unlock;
6616	}
6617
6618	/* Only trigger an advertising added event if a new instance was
6619	 * actually added.
6620	 */
6621	if (hdev->adv_instance_cnt > prev_instance_cnt)
6622		mgmt_advertising_added(sk, hdev, cp->instance);
6623
6624	if (hdev->cur_adv_instance == cp->instance) {
6625		/* If the currently advertised instance is being changed then
6626		 * cancel the current advertising and schedule the next
6627		 * instance. If there is only one instance then the overridden
6628		 * advertising data will be visible right away.
6629		 */
6630		cancel_adv_timeout(hdev);
6631
6632		next_instance = hci_get_next_instance(hdev, cp->instance);
6633		if (next_instance)
6634			schedule_instance = next_instance->instance;
6635	} else if (!hdev->adv_instance_timeout) {
6636		/* Immediately advertise the new instance if no other
6637		 * instance is currently being advertised.
6638		 */
6639		schedule_instance = cp->instance;
6640	}
6641
6642	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
6643	 * there is no instance to be advertised then we have no HCI
6644	 * communication to make. Simply return.
6645	 */
6646	if (!hdev_is_powered(hdev) ||
6647	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6648	    !schedule_instance) {
6649		rp.instance = cp->instance;
6650		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6651					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6652		goto unlock;
6653	}
6654
6655	/* We're good to go, update advertising data, parameters, and start
6656	 * advertising.
6657	 */
6658	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6659			       data_len);
6660	if (!cmd) {
6661		err = -ENOMEM;
6662		goto unlock;
6663	}
6664
6665	hci_req_init(&req, hdev);
6666
6667	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6668
6669	if (!err)
6670		err = hci_req_run(&req, add_advertising_complete);
6671
6672	if (err < 0)
 
 
6673		mgmt_pending_remove(cmd);
 
6674
6675unlock:
6676	hci_dev_unlock(hdev);
6677
6678	return err;
6679}
6680
6681static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6682					u16 opcode)
6683{
6684	struct mgmt_pending_cmd *cmd;
6685	struct mgmt_cp_remove_advertising *cp;
6686	struct mgmt_rp_remove_advertising rp;
6687
6688	BT_DBG("status %d", status);
6689
6690	hci_dev_lock(hdev);
6691
6692	/* A failure status here only means that we failed to disable
6693	 * advertising. Otherwise, the advertising instance has been removed,
6694	 * so report success.
6695	 */
6696	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6697	if (!cmd)
6698		goto unlock;
6699
6700	cp = cmd->param;
6701	rp.instance = cp->instance;
6702
6703	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6704			  &rp, sizeof(rp));
6705	mgmt_pending_remove(cmd);
6706
6707unlock:
6708	hci_dev_unlock(hdev);
6709}
6710
6711static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6712			      void *data, u16 data_len)
6713{
6714	struct mgmt_cp_remove_advertising *cp = data;
6715	struct mgmt_rp_remove_advertising rp;
6716	struct mgmt_pending_cmd *cmd;
6717	struct hci_request req;
6718	int err;
6719
6720	BT_DBG("%s", hdev->name);
 
 
 
 
 
 
 
6721
6722	hci_dev_lock(hdev);
6723
6724	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6725		err = mgmt_cmd_status(sk, hdev->id,
6726				      MGMT_OP_REMOVE_ADVERTISING,
6727				      MGMT_STATUS_INVALID_PARAMS);
6728		goto unlock;
6729	}
6730
6731	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6732	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6733	    pending_find(MGMT_OP_SET_LE, hdev)) {
6734		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6735				      MGMT_STATUS_BUSY);
6736		goto unlock;
6737	}
6738
6739	if (list_empty(&hdev->adv_instances)) {
6740		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6741				      MGMT_STATUS_INVALID_PARAMS);
6742		goto unlock;
6743	}
6744
6745	hci_req_init(&req, hdev);
6746
 
 
 
 
 
 
6747	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6748
6749	if (list_empty(&hdev->adv_instances))
6750		__hci_req_disable_advertising(&req);
6751
6752	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
6753	 * flag is set or the device isn't powered then we have no HCI
6754	 * communication to make. Simply return.
6755	 */
6756	if (skb_queue_empty(&req.cmd_q) ||
6757	    !hdev_is_powered(hdev) ||
6758	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6759		hci_req_purge(&req);
6760		rp.instance = cp->instance;
6761		err = mgmt_cmd_complete(sk, hdev->id,
6762					MGMT_OP_REMOVE_ADVERTISING,
6763					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6764		goto unlock;
6765	}
6766
6767	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6768			       data_len);
6769	if (!cmd) {
6770		err = -ENOMEM;
6771		goto unlock;
6772	}
6773
6774	err = hci_req_run(&req, remove_advertising_complete);
6775	if (err < 0)
6776		mgmt_pending_remove(cmd);
6777
6778unlock:
6779	hci_dev_unlock(hdev);
6780
6781	return err;
6782}
6783
6784static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6785			     void *data, u16 data_len)
6786{
6787	struct mgmt_cp_get_adv_size_info *cp = data;
6788	struct mgmt_rp_get_adv_size_info rp;
6789	u32 flags, supported_flags;
6790	int err;
6791
6792	BT_DBG("%s", hdev->name);
6793
6794	if (!lmp_le_capable(hdev))
6795		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6796				       MGMT_STATUS_REJECTED);
6797
6798	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6799		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6800				       MGMT_STATUS_INVALID_PARAMS);
6801
6802	flags = __le32_to_cpu(cp->flags);
6803
6804	/* The current implementation only supports a subset of the specified
6805	 * flags.
6806	 */
6807	supported_flags = get_supported_adv_flags(hdev);
6808	if (flags & ~supported_flags)
6809		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6810				       MGMT_STATUS_INVALID_PARAMS);
6811
6812	rp.instance = cp->instance;
6813	rp.flags = cp->flags;
6814	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6815	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6816
6817	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6818				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6819
6820	return err;
6821}
6822
6823static const struct hci_mgmt_handler mgmt_handlers[] = {
6824	{ NULL }, /* 0x0000 (no command) */
6825	{ read_version,            MGMT_READ_VERSION_SIZE,
6826						HCI_MGMT_NO_HDEV |
6827						HCI_MGMT_UNTRUSTED },
6828	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
6829						HCI_MGMT_NO_HDEV |
6830						HCI_MGMT_UNTRUSTED },
6831	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
6832						HCI_MGMT_NO_HDEV |
6833						HCI_MGMT_UNTRUSTED },
6834	{ read_controller_info,    MGMT_READ_INFO_SIZE,
6835						HCI_MGMT_UNTRUSTED },
6836	{ set_powered,             MGMT_SETTING_SIZE },
6837	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
6838	{ set_connectable,         MGMT_SETTING_SIZE },
6839	{ set_fast_connectable,    MGMT_SETTING_SIZE },
6840	{ set_bondable,            MGMT_SETTING_SIZE },
6841	{ set_link_security,       MGMT_SETTING_SIZE },
6842	{ set_ssp,                 MGMT_SETTING_SIZE },
6843	{ set_hs,                  MGMT_SETTING_SIZE },
6844	{ set_le,                  MGMT_SETTING_SIZE },
6845	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
6846	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
6847	{ add_uuid,                MGMT_ADD_UUID_SIZE },
6848	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
6849	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
6850						HCI_MGMT_VAR_LEN },
6851	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6852						HCI_MGMT_VAR_LEN },
6853	{ disconnect,              MGMT_DISCONNECT_SIZE },
6854	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
6855	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
6856	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
6857	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
6858	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
6859	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
6860	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
6861	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
6862	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6863	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
6864	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6865	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
6866	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6867						HCI_MGMT_VAR_LEN },
6868	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6869	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
6870	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
6871	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
6872	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
6873	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
6874	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
6875	{ set_advertising,         MGMT_SETTING_SIZE },
6876	{ set_bredr,               MGMT_SETTING_SIZE },
6877	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
6878	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
6879	{ set_secure_conn,         MGMT_SETTING_SIZE },
6880	{ set_debug_keys,          MGMT_SETTING_SIZE },
6881	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
6882	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
6883						HCI_MGMT_VAR_LEN },
6884	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
6885	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
6886	{ add_device,              MGMT_ADD_DEVICE_SIZE },
6887	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
6888	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
6889						HCI_MGMT_VAR_LEN },
6890	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6891						HCI_MGMT_NO_HDEV |
6892						HCI_MGMT_UNTRUSTED },
6893	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
6894						HCI_MGMT_UNCONFIGURED |
6895						HCI_MGMT_UNTRUSTED },
6896	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
6897						HCI_MGMT_UNCONFIGURED },
6898	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
6899						HCI_MGMT_UNCONFIGURED },
6900	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6901						HCI_MGMT_VAR_LEN },
6902	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6903	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
6904						HCI_MGMT_NO_HDEV |
6905						HCI_MGMT_UNTRUSTED },
6906	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
6907	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
6908						HCI_MGMT_VAR_LEN },
6909	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
6910	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
6911	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6912	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6913						HCI_MGMT_UNTRUSTED },
6914	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
6915	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
6916	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6917};
6918
6919void mgmt_index_added(struct hci_dev *hdev)
6920{
6921	struct mgmt_ev_ext_index ev;
6922
6923	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6924		return;
6925
6926	switch (hdev->dev_type) {
6927	case HCI_PRIMARY:
6928		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6929			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6930					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6931			ev.type = 0x01;
6932		} else {
6933			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6934					 HCI_MGMT_INDEX_EVENTS);
6935			ev.type = 0x00;
6936		}
6937		break;
6938	case HCI_AMP:
6939		ev.type = 0x02;
6940		break;
6941	default:
6942		return;
6943	}
6944
6945	ev.bus = hdev->bus;
6946
6947	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6948			 HCI_MGMT_EXT_INDEX_EVENTS);
6949}
6950
6951void mgmt_index_removed(struct hci_dev *hdev)
6952{
6953	struct mgmt_ev_ext_index ev;
6954	u8 status = MGMT_STATUS_INVALID_INDEX;
6955
6956	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6957		return;
6958
6959	switch (hdev->dev_type) {
6960	case HCI_PRIMARY:
6961		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6962
6963		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6964			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6965					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6966			ev.type = 0x01;
6967		} else {
6968			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6969					 HCI_MGMT_INDEX_EVENTS);
6970			ev.type = 0x00;
6971		}
6972		break;
6973	case HCI_AMP:
6974		ev.type = 0x02;
6975		break;
6976	default:
6977		return;
6978	}
6979
6980	ev.bus = hdev->bus;
6981
6982	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6983			 HCI_MGMT_EXT_INDEX_EVENTS);
6984}
6985
6986/* This function requires the caller holds hdev->lock */
6987static void restart_le_actions(struct hci_dev *hdev)
6988{
6989	struct hci_conn_params *p;
6990
6991	list_for_each_entry(p, &hdev->le_conn_params, list) {
6992		/* Needed for AUTO_OFF case where might not "really"
6993		 * have been powered off.
6994		 */
6995		list_del_init(&p->action);
6996
6997		switch (p->auto_connect) {
6998		case HCI_AUTO_CONN_DIRECT:
6999		case HCI_AUTO_CONN_ALWAYS:
7000			list_add(&p->action, &hdev->pend_le_conns);
7001			break;
7002		case HCI_AUTO_CONN_REPORT:
7003			list_add(&p->action, &hdev->pend_le_reports);
7004			break;
7005		default:
7006			break;
7007		}
7008	}
7009}
7010
7011void mgmt_power_on(struct hci_dev *hdev, int err)
7012{
7013	struct cmd_lookup match = { NULL, hdev };
7014
7015	BT_DBG("err %d", err);
7016
7017	hci_dev_lock(hdev);
7018
7019	if (!err) {
7020		restart_le_actions(hdev);
7021		hci_update_background_scan(hdev);
7022	}
7023
7024	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7025
7026	new_settings(hdev, match.sk);
7027
7028	if (match.sk)
7029		sock_put(match.sk);
7030
7031	hci_dev_unlock(hdev);
7032}
7033
7034void __mgmt_power_off(struct hci_dev *hdev)
7035{
7036	struct cmd_lookup match = { NULL, hdev };
7037	u8 status, zero_cod[] = { 0, 0, 0 };
7038
7039	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7040
7041	/* If the power off is because of hdev unregistration let
7042	 * use the appropriate INVALID_INDEX status. Otherwise use
7043	 * NOT_POWERED. We cover both scenarios here since later in
7044	 * mgmt_index_removed() any hci_conn callbacks will have already
7045	 * been triggered, potentially causing misleading DISCONNECTED
7046	 * status responses.
7047	 */
7048	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7049		status = MGMT_STATUS_INVALID_INDEX;
7050	else
7051		status = MGMT_STATUS_NOT_POWERED;
7052
7053	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7054
7055	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7056		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7057				   zero_cod, sizeof(zero_cod),
7058				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7059		ext_info_changed(hdev, NULL);
7060	}
7061
7062	new_settings(hdev, match.sk);
7063
7064	if (match.sk)
7065		sock_put(match.sk);
7066}
7067
7068void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7069{
7070	struct mgmt_pending_cmd *cmd;
7071	u8 status;
7072
7073	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7074	if (!cmd)
7075		return;
7076
7077	if (err == -ERFKILL)
7078		status = MGMT_STATUS_RFKILLED;
7079	else
7080		status = MGMT_STATUS_FAILED;
7081
7082	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7083
7084	mgmt_pending_remove(cmd);
7085}
7086
7087void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7088		       bool persistent)
7089{
7090	struct mgmt_ev_new_link_key ev;
7091
7092	memset(&ev, 0, sizeof(ev));
7093
7094	ev.store_hint = persistent;
7095	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7096	ev.key.addr.type = BDADDR_BREDR;
7097	ev.key.type = key->type;
7098	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7099	ev.key.pin_len = key->pin_len;
7100
7101	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7102}
7103
7104static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7105{
7106	switch (ltk->type) {
7107	case SMP_LTK:
7108	case SMP_LTK_SLAVE:
7109		if (ltk->authenticated)
7110			return MGMT_LTK_AUTHENTICATED;
7111		return MGMT_LTK_UNAUTHENTICATED;
7112	case SMP_LTK_P256:
7113		if (ltk->authenticated)
7114			return MGMT_LTK_P256_AUTH;
7115		return MGMT_LTK_P256_UNAUTH;
7116	case SMP_LTK_P256_DEBUG:
7117		return MGMT_LTK_P256_DEBUG;
7118	}
7119
7120	return MGMT_LTK_UNAUTHENTICATED;
7121}
7122
7123void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7124{
7125	struct mgmt_ev_new_long_term_key ev;
7126
7127	memset(&ev, 0, sizeof(ev));
7128
7129	/* Devices using resolvable or non-resolvable random addresses
7130	 * without providing an identity resolving key don't require
7131	 * to store long term keys. Their addresses will change the
7132	 * next time around.
7133	 *
7134	 * Only when a remote device provides an identity address
7135	 * make sure the long term key is stored. If the remote
7136	 * identity is known, the long term keys are internally
7137	 * mapped to the identity address. So allow static random
7138	 * and public addresses here.
7139	 */
7140	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7141	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7142		ev.store_hint = 0x00;
7143	else
7144		ev.store_hint = persistent;
7145
7146	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7147	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7148	ev.key.type = mgmt_ltk_type(key);
7149	ev.key.enc_size = key->enc_size;
7150	ev.key.ediv = key->ediv;
7151	ev.key.rand = key->rand;
7152
7153	if (key->type == SMP_LTK)
7154		ev.key.master = 1;
7155
7156	/* Make sure we copy only the significant bytes based on the
7157	 * encryption key size, and set the rest of the value to zeroes.
7158	 */
7159	memcpy(ev.key.val, key->val, key->enc_size);
7160	memset(ev.key.val + key->enc_size, 0,
7161	       sizeof(ev.key.val) - key->enc_size);
7162
7163	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7164}
7165
7166void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7167{
7168	struct mgmt_ev_new_irk ev;
7169
7170	memset(&ev, 0, sizeof(ev));
7171
7172	ev.store_hint = persistent;
7173
7174	bacpy(&ev.rpa, &irk->rpa);
7175	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7176	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7177	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7178
7179	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7180}
7181
7182void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7183		   bool persistent)
7184{
7185	struct mgmt_ev_new_csrk ev;
7186
7187	memset(&ev, 0, sizeof(ev));
7188
7189	/* Devices using resolvable or non-resolvable random addresses
7190	 * without providing an identity resolving key don't require
7191	 * to store signature resolving keys. Their addresses will change
7192	 * the next time around.
7193	 *
7194	 * Only when a remote device provides an identity address
7195	 * make sure the signature resolving key is stored. So allow
7196	 * static random and public addresses here.
7197	 */
7198	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7199	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7200		ev.store_hint = 0x00;
7201	else
7202		ev.store_hint = persistent;
7203
7204	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7205	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7206	ev.key.type = csrk->type;
7207	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7208
7209	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7210}
7211
7212void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7213			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7214			 u16 max_interval, u16 latency, u16 timeout)
7215{
7216	struct mgmt_ev_new_conn_param ev;
7217
7218	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7219		return;
7220
7221	memset(&ev, 0, sizeof(ev));
7222	bacpy(&ev.addr.bdaddr, bdaddr);
7223	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7224	ev.store_hint = store_hint;
7225	ev.min_interval = cpu_to_le16(min_interval);
7226	ev.max_interval = cpu_to_le16(max_interval);
7227	ev.latency = cpu_to_le16(latency);
7228	ev.timeout = cpu_to_le16(timeout);
7229
7230	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7231}
7232
7233void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7234			   u32 flags, u8 *name, u8 name_len)
7235{
7236	char buf[512];
7237	struct mgmt_ev_device_connected *ev = (void *) buf;
7238	u16 eir_len = 0;
7239
7240	bacpy(&ev->addr.bdaddr, &conn->dst);
7241	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7242
7243	ev->flags = __cpu_to_le32(flags);
7244
7245	/* We must ensure that the EIR Data fields are ordered and
7246	 * unique. Keep it simple for now and avoid the problem by not
7247	 * adding any BR/EDR data to the LE adv.
7248	 */
7249	if (conn->le_adv_data_len > 0) {
7250		memcpy(&ev->eir[eir_len],
7251		       conn->le_adv_data, conn->le_adv_data_len);
7252		eir_len = conn->le_adv_data_len;
7253	} else {
7254		if (name_len > 0)
7255			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7256						  name, name_len);
7257
7258		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7259			eir_len = eir_append_data(ev->eir, eir_len,
7260						  EIR_CLASS_OF_DEV,
7261						  conn->dev_class, 3);
7262	}
7263
7264	ev->eir_len = cpu_to_le16(eir_len);
7265
7266	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7267		    sizeof(*ev) + eir_len, NULL);
7268}
7269
7270static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7271{
7272	struct sock **sk = data;
7273
7274	cmd->cmd_complete(cmd, 0);
7275
7276	*sk = cmd->sk;
7277	sock_hold(*sk);
7278
7279	mgmt_pending_remove(cmd);
7280}
7281
7282static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7283{
7284	struct hci_dev *hdev = data;
7285	struct mgmt_cp_unpair_device *cp = cmd->param;
7286
7287	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7288
7289	cmd->cmd_complete(cmd, 0);
7290	mgmt_pending_remove(cmd);
7291}
7292
7293bool mgmt_powering_down(struct hci_dev *hdev)
7294{
7295	struct mgmt_pending_cmd *cmd;
7296	struct mgmt_mode *cp;
7297
7298	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7299	if (!cmd)
7300		return false;
7301
7302	cp = cmd->param;
7303	if (!cp->val)
7304		return true;
7305
7306	return false;
7307}
7308
7309void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7310			      u8 link_type, u8 addr_type, u8 reason,
7311			      bool mgmt_connected)
7312{
7313	struct mgmt_ev_device_disconnected ev;
7314	struct sock *sk = NULL;
7315
7316	/* The connection is still in hci_conn_hash so test for 1
7317	 * instead of 0 to know if this is the last one.
7318	 */
7319	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7320		cancel_delayed_work(&hdev->power_off);
7321		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7322	}
7323
7324	if (!mgmt_connected)
7325		return;
7326
7327	if (link_type != ACL_LINK && link_type != LE_LINK)
7328		return;
7329
7330	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7331
7332	bacpy(&ev.addr.bdaddr, bdaddr);
7333	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7334	ev.reason = reason;
7335
7336	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7337
7338	if (sk)
7339		sock_put(sk);
7340
7341	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7342			     hdev);
7343}
7344
7345void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7346			    u8 link_type, u8 addr_type, u8 status)
7347{
7348	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7349	struct mgmt_cp_disconnect *cp;
7350	struct mgmt_pending_cmd *cmd;
7351
7352	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7353			     hdev);
7354
7355	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7356	if (!cmd)
7357		return;
7358
7359	cp = cmd->param;
7360
7361	if (bacmp(bdaddr, &cp->addr.bdaddr))
7362		return;
7363
7364	if (cp->addr.type != bdaddr_type)
7365		return;
7366
7367	cmd->cmd_complete(cmd, mgmt_status(status));
7368	mgmt_pending_remove(cmd);
7369}
7370
7371void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7372			 u8 addr_type, u8 status)
7373{
7374	struct mgmt_ev_connect_failed ev;
7375
7376	/* The connection is still in hci_conn_hash so test for 1
7377	 * instead of 0 to know if this is the last one.
7378	 */
7379	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7380		cancel_delayed_work(&hdev->power_off);
7381		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7382	}
7383
7384	bacpy(&ev.addr.bdaddr, bdaddr);
7385	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7386	ev.status = mgmt_status(status);
7387
7388	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7389}
7390
7391void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7392{
7393	struct mgmt_ev_pin_code_request ev;
7394
7395	bacpy(&ev.addr.bdaddr, bdaddr);
7396	ev.addr.type = BDADDR_BREDR;
7397	ev.secure = secure;
7398
7399	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7400}
7401
7402void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7403				  u8 status)
7404{
7405	struct mgmt_pending_cmd *cmd;
7406
7407	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7408	if (!cmd)
7409		return;
7410
7411	cmd->cmd_complete(cmd, mgmt_status(status));
7412	mgmt_pending_remove(cmd);
7413}
7414
7415void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7416				      u8 status)
7417{
7418	struct mgmt_pending_cmd *cmd;
7419
7420	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7421	if (!cmd)
7422		return;
7423
7424	cmd->cmd_complete(cmd, mgmt_status(status));
7425	mgmt_pending_remove(cmd);
7426}
7427
7428int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7429			      u8 link_type, u8 addr_type, u32 value,
7430			      u8 confirm_hint)
7431{
7432	struct mgmt_ev_user_confirm_request ev;
7433
7434	BT_DBG("%s", hdev->name);
7435
7436	bacpy(&ev.addr.bdaddr, bdaddr);
7437	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7438	ev.confirm_hint = confirm_hint;
7439	ev.value = cpu_to_le32(value);
7440
7441	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7442			  NULL);
7443}
7444
7445int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7446			      u8 link_type, u8 addr_type)
7447{
7448	struct mgmt_ev_user_passkey_request ev;
7449
7450	BT_DBG("%s", hdev->name);
7451
7452	bacpy(&ev.addr.bdaddr, bdaddr);
7453	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7454
7455	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7456			  NULL);
7457}
7458
7459static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7460				      u8 link_type, u8 addr_type, u8 status,
7461				      u8 opcode)
7462{
7463	struct mgmt_pending_cmd *cmd;
7464
7465	cmd = pending_find(opcode, hdev);
7466	if (!cmd)
7467		return -ENOENT;
7468
7469	cmd->cmd_complete(cmd, mgmt_status(status));
7470	mgmt_pending_remove(cmd);
7471
7472	return 0;
7473}
7474
7475int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7476				     u8 link_type, u8 addr_type, u8 status)
7477{
7478	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7479					  status, MGMT_OP_USER_CONFIRM_REPLY);
7480}
7481
7482int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7483					 u8 link_type, u8 addr_type, u8 status)
7484{
7485	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7486					  status,
7487					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7488}
7489
7490int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7491				     u8 link_type, u8 addr_type, u8 status)
7492{
7493	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7494					  status, MGMT_OP_USER_PASSKEY_REPLY);
7495}
7496
7497int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7498					 u8 link_type, u8 addr_type, u8 status)
7499{
7500	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7501					  status,
7502					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7503}
7504
7505int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7506			     u8 link_type, u8 addr_type, u32 passkey,
7507			     u8 entered)
7508{
7509	struct mgmt_ev_passkey_notify ev;
7510
7511	BT_DBG("%s", hdev->name);
7512
7513	bacpy(&ev.addr.bdaddr, bdaddr);
7514	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7515	ev.passkey = __cpu_to_le32(passkey);
7516	ev.entered = entered;
7517
7518	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7519}
7520
7521void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7522{
7523	struct mgmt_ev_auth_failed ev;
7524	struct mgmt_pending_cmd *cmd;
7525	u8 status = mgmt_status(hci_status);
7526
7527	bacpy(&ev.addr.bdaddr, &conn->dst);
7528	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7529	ev.status = status;
7530
7531	cmd = find_pairing(conn);
7532
7533	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7534		    cmd ? cmd->sk : NULL);
7535
7536	if (cmd) {
7537		cmd->cmd_complete(cmd, status);
7538		mgmt_pending_remove(cmd);
7539	}
7540}
7541
7542void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7543{
7544	struct cmd_lookup match = { NULL, hdev };
7545	bool changed;
7546
7547	if (status) {
7548		u8 mgmt_err = mgmt_status(status);
7549		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7550				     cmd_status_rsp, &mgmt_err);
7551		return;
7552	}
7553
7554	if (test_bit(HCI_AUTH, &hdev->flags))
7555		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7556	else
7557		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7558
7559	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7560			     &match);
7561
7562	if (changed)
7563		new_settings(hdev, match.sk);
7564
7565	if (match.sk)
7566		sock_put(match.sk);
7567}
7568
7569static void clear_eir(struct hci_request *req)
7570{
7571	struct hci_dev *hdev = req->hdev;
7572	struct hci_cp_write_eir cp;
7573
7574	if (!lmp_ext_inq_capable(hdev))
7575		return;
7576
7577	memset(hdev->eir, 0, sizeof(hdev->eir));
7578
7579	memset(&cp, 0, sizeof(cp));
7580
7581	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7582}
7583
7584void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7585{
7586	struct cmd_lookup match = { NULL, hdev };
7587	struct hci_request req;
7588	bool changed = false;
7589
7590	if (status) {
7591		u8 mgmt_err = mgmt_status(status);
7592
7593		if (enable && hci_dev_test_and_clear_flag(hdev,
7594							  HCI_SSP_ENABLED)) {
7595			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7596			new_settings(hdev, NULL);
7597		}
7598
7599		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7600				     &mgmt_err);
7601		return;
7602	}
7603
7604	if (enable) {
7605		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7606	} else {
7607		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7608		if (!changed)
7609			changed = hci_dev_test_and_clear_flag(hdev,
7610							      HCI_HS_ENABLED);
7611		else
7612			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7613	}
7614
7615	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7616
7617	if (changed)
7618		new_settings(hdev, match.sk);
7619
7620	if (match.sk)
7621		sock_put(match.sk);
7622
7623	hci_req_init(&req, hdev);
7624
7625	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7626		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7627			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7628				    sizeof(enable), &enable);
7629		__hci_req_update_eir(&req);
7630	} else {
7631		clear_eir(&req);
7632	}
7633
7634	hci_req_run(&req, NULL);
7635}
7636
7637static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7638{
7639	struct cmd_lookup *match = data;
7640
7641	if (match->sk == NULL) {
7642		match->sk = cmd->sk;
7643		sock_hold(match->sk);
7644	}
7645}
7646
7647void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7648				    u8 status)
7649{
7650	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7651
7652	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7653	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7654	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7655
7656	if (!status) {
7657		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7658				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7659		ext_info_changed(hdev, NULL);
7660	}
7661
7662	if (match.sk)
7663		sock_put(match.sk);
7664}
7665
7666void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7667{
7668	struct mgmt_cp_set_local_name ev;
7669	struct mgmt_pending_cmd *cmd;
7670
7671	if (status)
7672		return;
7673
7674	memset(&ev, 0, sizeof(ev));
7675	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7676	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7677
7678	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7679	if (!cmd) {
7680		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7681
7682		/* If this is a HCI command related to powering on the
7683		 * HCI dev don't send any mgmt signals.
7684		 */
7685		if (pending_find(MGMT_OP_SET_POWERED, hdev))
7686			return;
7687	}
7688
7689	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7690			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7691	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7692}
7693
7694static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7695{
7696	int i;
7697
7698	for (i = 0; i < uuid_count; i++) {
7699		if (!memcmp(uuid, uuids[i], 16))
7700			return true;
7701	}
7702
7703	return false;
7704}
7705
7706static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7707{
7708	u16 parsed = 0;
7709
7710	while (parsed < eir_len) {
7711		u8 field_len = eir[0];
7712		u8 uuid[16];
7713		int i;
7714
7715		if (field_len == 0)
7716			break;
7717
7718		if (eir_len - parsed < field_len + 1)
7719			break;
7720
7721		switch (eir[1]) {
7722		case EIR_UUID16_ALL:
7723		case EIR_UUID16_SOME:
7724			for (i = 0; i + 3 <= field_len; i += 2) {
7725				memcpy(uuid, bluetooth_base_uuid, 16);
7726				uuid[13] = eir[i + 3];
7727				uuid[12] = eir[i + 2];
7728				if (has_uuid(uuid, uuid_count, uuids))
7729					return true;
7730			}
7731			break;
7732		case EIR_UUID32_ALL:
7733		case EIR_UUID32_SOME:
7734			for (i = 0; i + 5 <= field_len; i += 4) {
7735				memcpy(uuid, bluetooth_base_uuid, 16);
7736				uuid[15] = eir[i + 5];
7737				uuid[14] = eir[i + 4];
7738				uuid[13] = eir[i + 3];
7739				uuid[12] = eir[i + 2];
7740				if (has_uuid(uuid, uuid_count, uuids))
7741					return true;
7742			}
7743			break;
7744		case EIR_UUID128_ALL:
7745		case EIR_UUID128_SOME:
7746			for (i = 0; i + 17 <= field_len; i += 16) {
7747				memcpy(uuid, eir + i + 2, 16);
7748				if (has_uuid(uuid, uuid_count, uuids))
7749					return true;
7750			}
7751			break;
7752		}
7753
7754		parsed += field_len + 1;
7755		eir += field_len + 1;
7756	}
7757
7758	return false;
7759}
7760
7761static void restart_le_scan(struct hci_dev *hdev)
7762{
7763	/* If controller is not scanning we are done. */
7764	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7765		return;
7766
7767	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7768		       hdev->discovery.scan_start +
7769		       hdev->discovery.scan_duration))
7770		return;
7771
7772	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7773			   DISCOV_LE_RESTART_DELAY);
7774}
7775
7776static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7777			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7778{
7779	/* If a RSSI threshold has been specified, and
7780	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7781	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7782	 * is set, let it through for further processing, as we might need to
7783	 * restart the scan.
7784	 *
7785	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7786	 * the results are also dropped.
7787	 */
7788	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7789	    (rssi == HCI_RSSI_INVALID ||
7790	    (rssi < hdev->discovery.rssi &&
7791	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7792		return  false;
7793
7794	if (hdev->discovery.uuid_count != 0) {
7795		/* If a list of UUIDs is provided in filter, results with no
7796		 * matching UUID should be dropped.
7797		 */
7798		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7799				   hdev->discovery.uuids) &&
7800		    !eir_has_uuids(scan_rsp, scan_rsp_len,
7801				   hdev->discovery.uuid_count,
7802				   hdev->discovery.uuids))
7803			return false;
7804	}
7805
7806	/* If duplicate filtering does not report RSSI changes, then restart
7807	 * scanning to ensure updated result with updated RSSI values.
7808	 */
7809	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7810		restart_le_scan(hdev);
7811
7812		/* Validate RSSI value against the RSSI threshold once more. */
7813		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7814		    rssi < hdev->discovery.rssi)
7815			return false;
7816	}
7817
7818	return true;
7819}
7820
7821void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7822		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7823		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7824{
7825	char buf[512];
7826	struct mgmt_ev_device_found *ev = (void *)buf;
7827	size_t ev_size;
7828
7829	/* Don't send events for a non-kernel initiated discovery. With
7830	 * LE one exception is if we have pend_le_reports > 0 in which
7831	 * case we're doing passive scanning and want these events.
7832	 */
7833	if (!hci_discovery_active(hdev)) {
7834		if (link_type == ACL_LINK)
7835			return;
7836		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
 
 
7837			return;
 
7838	}
7839
7840	if (hdev->discovery.result_filtering) {
7841		/* We are using service discovery */
7842		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7843				     scan_rsp_len))
7844			return;
7845	}
7846
7847	if (hdev->discovery.limited) {
7848		/* Check for limited discoverable bit */
7849		if (dev_class) {
7850			if (!(dev_class[1] & 0x20))
7851				return;
7852		} else {
7853			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7854			if (!flags || !(flags[0] & LE_AD_LIMITED))
7855				return;
7856		}
7857	}
7858
7859	/* Make sure that the buffer is big enough. The 5 extra bytes
7860	 * are for the potential CoD field.
7861	 */
7862	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7863		return;
7864
7865	memset(buf, 0, sizeof(buf));
7866
7867	/* In case of device discovery with BR/EDR devices (pre 1.2), the
7868	 * RSSI value was reported as 0 when not available. This behavior
7869	 * is kept when using device discovery. This is required for full
7870	 * backwards compatibility with the API.
7871	 *
7872	 * However when using service discovery, the value 127 will be
7873	 * returned when the RSSI is not available.
7874	 */
7875	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7876	    link_type == ACL_LINK)
7877		rssi = 0;
7878
7879	bacpy(&ev->addr.bdaddr, bdaddr);
7880	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7881	ev->rssi = rssi;
7882	ev->flags = cpu_to_le32(flags);
7883
7884	if (eir_len > 0)
7885		/* Copy EIR or advertising data into event */
7886		memcpy(ev->eir, eir, eir_len);
7887
7888	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7889				       NULL))
7890		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7891					  dev_class, 3);
7892
7893	if (scan_rsp_len > 0)
7894		/* Append scan response data to event */
7895		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7896
7897	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7898	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7899
7900	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7901}
7902
7903void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7904		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7905{
7906	struct mgmt_ev_device_found *ev;
7907	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7908	u16 eir_len;
7909
7910	ev = (struct mgmt_ev_device_found *) buf;
7911
7912	memset(buf, 0, sizeof(buf));
7913
7914	bacpy(&ev->addr.bdaddr, bdaddr);
7915	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7916	ev->rssi = rssi;
7917
7918	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7919				  name_len);
7920
7921	ev->eir_len = cpu_to_le16(eir_len);
7922
7923	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7924}
7925
7926void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7927{
7928	struct mgmt_ev_discovering ev;
7929
7930	BT_DBG("%s discovering %u", hdev->name, discovering);
7931
7932	memset(&ev, 0, sizeof(ev));
7933	ev.type = hdev->discovery.type;
7934	ev.discovering = discovering;
7935
7936	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7937}
7938
7939static struct hci_mgmt_chan chan = {
7940	.channel	= HCI_CHANNEL_CONTROL,
7941	.handler_count	= ARRAY_SIZE(mgmt_handlers),
7942	.handlers	= mgmt_handlers,
7943	.hdev_init	= mgmt_init_hdev,
7944};
7945
7946int mgmt_init(void)
7947{
7948	return hci_mgmt_chan_register(&chan);
7949}
7950
7951void mgmt_exit(void)
7952{
7953	hci_mgmt_chan_unregister(&chan);
7954}
v5.9
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2010  Nokia Corporation
   5   Copyright (C) 2011-2012 Intel Corporation
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI Management interface */
  26
  27#include <linux/module.h>
  28#include <asm/unaligned.h>
  29
  30#include <net/bluetooth/bluetooth.h>
  31#include <net/bluetooth/hci_core.h>
  32#include <net/bluetooth/hci_sock.h>
  33#include <net/bluetooth/l2cap.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "hci_request.h"
  37#include "smp.h"
  38#include "mgmt_util.h"
  39#include "mgmt_config.h"
  40#include "msft.h"
  41
  42#define MGMT_VERSION	1
  43#define MGMT_REVISION	18
  44
  45static const u16 mgmt_commands[] = {
  46	MGMT_OP_READ_INDEX_LIST,
  47	MGMT_OP_READ_INFO,
  48	MGMT_OP_SET_POWERED,
  49	MGMT_OP_SET_DISCOVERABLE,
  50	MGMT_OP_SET_CONNECTABLE,
  51	MGMT_OP_SET_FAST_CONNECTABLE,
  52	MGMT_OP_SET_BONDABLE,
  53	MGMT_OP_SET_LINK_SECURITY,
  54	MGMT_OP_SET_SSP,
  55	MGMT_OP_SET_HS,
  56	MGMT_OP_SET_LE,
  57	MGMT_OP_SET_DEV_CLASS,
  58	MGMT_OP_SET_LOCAL_NAME,
  59	MGMT_OP_ADD_UUID,
  60	MGMT_OP_REMOVE_UUID,
  61	MGMT_OP_LOAD_LINK_KEYS,
  62	MGMT_OP_LOAD_LONG_TERM_KEYS,
  63	MGMT_OP_DISCONNECT,
  64	MGMT_OP_GET_CONNECTIONS,
  65	MGMT_OP_PIN_CODE_REPLY,
  66	MGMT_OP_PIN_CODE_NEG_REPLY,
  67	MGMT_OP_SET_IO_CAPABILITY,
  68	MGMT_OP_PAIR_DEVICE,
  69	MGMT_OP_CANCEL_PAIR_DEVICE,
  70	MGMT_OP_UNPAIR_DEVICE,
  71	MGMT_OP_USER_CONFIRM_REPLY,
  72	MGMT_OP_USER_CONFIRM_NEG_REPLY,
  73	MGMT_OP_USER_PASSKEY_REPLY,
  74	MGMT_OP_USER_PASSKEY_NEG_REPLY,
  75	MGMT_OP_READ_LOCAL_OOB_DATA,
  76	MGMT_OP_ADD_REMOTE_OOB_DATA,
  77	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
  78	MGMT_OP_START_DISCOVERY,
  79	MGMT_OP_STOP_DISCOVERY,
  80	MGMT_OP_CONFIRM_NAME,
  81	MGMT_OP_BLOCK_DEVICE,
  82	MGMT_OP_UNBLOCK_DEVICE,
  83	MGMT_OP_SET_DEVICE_ID,
  84	MGMT_OP_SET_ADVERTISING,
  85	MGMT_OP_SET_BREDR,
  86	MGMT_OP_SET_STATIC_ADDRESS,
  87	MGMT_OP_SET_SCAN_PARAMS,
  88	MGMT_OP_SET_SECURE_CONN,
  89	MGMT_OP_SET_DEBUG_KEYS,
  90	MGMT_OP_SET_PRIVACY,
  91	MGMT_OP_LOAD_IRKS,
  92	MGMT_OP_GET_CONN_INFO,
  93	MGMT_OP_GET_CLOCK_INFO,
  94	MGMT_OP_ADD_DEVICE,
  95	MGMT_OP_REMOVE_DEVICE,
  96	MGMT_OP_LOAD_CONN_PARAM,
  97	MGMT_OP_READ_UNCONF_INDEX_LIST,
  98	MGMT_OP_READ_CONFIG_INFO,
  99	MGMT_OP_SET_EXTERNAL_CONFIG,
 100	MGMT_OP_SET_PUBLIC_ADDRESS,
 101	MGMT_OP_START_SERVICE_DISCOVERY,
 102	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
 103	MGMT_OP_READ_EXT_INDEX_LIST,
 104	MGMT_OP_READ_ADV_FEATURES,
 105	MGMT_OP_ADD_ADVERTISING,
 106	MGMT_OP_REMOVE_ADVERTISING,
 107	MGMT_OP_GET_ADV_SIZE_INFO,
 108	MGMT_OP_START_LIMITED_DISCOVERY,
 109	MGMT_OP_READ_EXT_INFO,
 110	MGMT_OP_SET_APPEARANCE,
 111	MGMT_OP_SET_BLOCKED_KEYS,
 112	MGMT_OP_SET_WIDEBAND_SPEECH,
 113	MGMT_OP_READ_SECURITY_INFO,
 114	MGMT_OP_READ_EXP_FEATURES_INFO,
 115	MGMT_OP_SET_EXP_FEATURE,
 116	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
 117	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
 118	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
 119	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
 120	MGMT_OP_GET_DEVICE_FLAGS,
 121	MGMT_OP_SET_DEVICE_FLAGS,
 122	MGMT_OP_READ_ADV_MONITOR_FEATURES,
 123	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
 124	MGMT_OP_REMOVE_ADV_MONITOR,
 125};
 126
 127static const u16 mgmt_events[] = {
 128	MGMT_EV_CONTROLLER_ERROR,
 129	MGMT_EV_INDEX_ADDED,
 130	MGMT_EV_INDEX_REMOVED,
 131	MGMT_EV_NEW_SETTINGS,
 132	MGMT_EV_CLASS_OF_DEV_CHANGED,
 133	MGMT_EV_LOCAL_NAME_CHANGED,
 134	MGMT_EV_NEW_LINK_KEY,
 135	MGMT_EV_NEW_LONG_TERM_KEY,
 136	MGMT_EV_DEVICE_CONNECTED,
 137	MGMT_EV_DEVICE_DISCONNECTED,
 138	MGMT_EV_CONNECT_FAILED,
 139	MGMT_EV_PIN_CODE_REQUEST,
 140	MGMT_EV_USER_CONFIRM_REQUEST,
 141	MGMT_EV_USER_PASSKEY_REQUEST,
 142	MGMT_EV_AUTH_FAILED,
 143	MGMT_EV_DEVICE_FOUND,
 144	MGMT_EV_DISCOVERING,
 145	MGMT_EV_DEVICE_BLOCKED,
 146	MGMT_EV_DEVICE_UNBLOCKED,
 147	MGMT_EV_DEVICE_UNPAIRED,
 148	MGMT_EV_PASSKEY_NOTIFY,
 149	MGMT_EV_NEW_IRK,
 150	MGMT_EV_NEW_CSRK,
 151	MGMT_EV_DEVICE_ADDED,
 152	MGMT_EV_DEVICE_REMOVED,
 153	MGMT_EV_NEW_CONN_PARAM,
 154	MGMT_EV_UNCONF_INDEX_ADDED,
 155	MGMT_EV_UNCONF_INDEX_REMOVED,
 156	MGMT_EV_NEW_CONFIG_OPTIONS,
 157	MGMT_EV_EXT_INDEX_ADDED,
 158	MGMT_EV_EXT_INDEX_REMOVED,
 159	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
 160	MGMT_EV_ADVERTISING_ADDED,
 161	MGMT_EV_ADVERTISING_REMOVED,
 162	MGMT_EV_EXT_INFO_CHANGED,
 163	MGMT_EV_PHY_CONFIGURATION_CHANGED,
 164	MGMT_EV_EXP_FEATURE_CHANGED,
 165	MGMT_EV_DEVICE_FLAGS_CHANGED,
 166};
 167
 168static const u16 mgmt_untrusted_commands[] = {
 169	MGMT_OP_READ_INDEX_LIST,
 170	MGMT_OP_READ_INFO,
 171	MGMT_OP_READ_UNCONF_INDEX_LIST,
 172	MGMT_OP_READ_CONFIG_INFO,
 173	MGMT_OP_READ_EXT_INDEX_LIST,
 174	MGMT_OP_READ_EXT_INFO,
 175	MGMT_OP_READ_SECURITY_INFO,
 176	MGMT_OP_READ_EXP_FEATURES_INFO,
 177	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
 178	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
 179};
 180
 181static const u16 mgmt_untrusted_events[] = {
 182	MGMT_EV_INDEX_ADDED,
 183	MGMT_EV_INDEX_REMOVED,
 184	MGMT_EV_NEW_SETTINGS,
 185	MGMT_EV_CLASS_OF_DEV_CHANGED,
 186	MGMT_EV_LOCAL_NAME_CHANGED,
 187	MGMT_EV_UNCONF_INDEX_ADDED,
 188	MGMT_EV_UNCONF_INDEX_REMOVED,
 189	MGMT_EV_NEW_CONFIG_OPTIONS,
 190	MGMT_EV_EXT_INDEX_ADDED,
 191	MGMT_EV_EXT_INDEX_REMOVED,
 192	MGMT_EV_EXT_INFO_CHANGED,
 193	MGMT_EV_EXP_FEATURE_CHANGED,
 194	MGMT_EV_ADV_MONITOR_ADDED,
 195	MGMT_EV_ADV_MONITOR_REMOVED,
 196};
 197
 198#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
 199
 200#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
 201		 "\x00\x00\x00\x00\x00\x00\x00\x00"
 202
 203/* HCI to MGMT error code conversion table */
 204static const u8 mgmt_status_table[] = {
 205	MGMT_STATUS_SUCCESS,
 206	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
 207	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
 208	MGMT_STATUS_FAILED,		/* Hardware Failure */
 209	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
 210	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
 211	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
 212	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
 213	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
 214	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
 215	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
 216	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
 217	MGMT_STATUS_BUSY,		/* Command Disallowed */
 218	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
 219	MGMT_STATUS_REJECTED,		/* Rejected Security */
 220	MGMT_STATUS_REJECTED,		/* Rejected Personal */
 221	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
 222	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
 223	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
 224	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
 225	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
 226	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
 227	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
 228	MGMT_STATUS_BUSY,		/* Repeated Attempts */
 229	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
 230	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
 231	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
 232	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
 233	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
 234	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
 235	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
 236	MGMT_STATUS_FAILED,		/* Unspecified Error */
 237	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
 238	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
 239	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
 240	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
 241	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
 242	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
 243	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
 244	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
 245	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
 246	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
 247	MGMT_STATUS_FAILED,		/* Transaction Collision */
 248	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
 249	MGMT_STATUS_REJECTED,		/* QoS Rejected */
 250	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
 251	MGMT_STATUS_REJECTED,		/* Insufficient Security */
 252	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
 253	MGMT_STATUS_BUSY,		/* Role Switch Pending */
 254	MGMT_STATUS_FAILED,		/* Slot Violation */
 255	MGMT_STATUS_FAILED,		/* Role Switch Failed */
 256	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
 257	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
 258	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
 259	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
 260	MGMT_STATUS_BUSY,		/* Controller Busy */
 261	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
 262	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
 263	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
 264	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
 265	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
 266};
 267
 268static u8 mgmt_status(u8 hci_status)
 269{
 270	if (hci_status < ARRAY_SIZE(mgmt_status_table))
 271		return mgmt_status_table[hci_status];
 272
 273	return MGMT_STATUS_FAILED;
 274}
 275
 276static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
 277			    u16 len, int flag)
 278{
 279	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
 280			       flag, NULL);
 281}
 282
 283static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
 284			      u16 len, int flag, struct sock *skip_sk)
 285{
 286	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
 287			       flag, skip_sk);
 288}
 289
 290static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
 291		      struct sock *skip_sk)
 292{
 293	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
 294			       HCI_SOCK_TRUSTED, skip_sk);
 295}
 296
 297static u8 le_addr_type(u8 mgmt_addr_type)
 298{
 299	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
 300		return ADDR_LE_DEV_PUBLIC;
 301	else
 302		return ADDR_LE_DEV_RANDOM;
 303}
 304
 305void mgmt_fill_version_info(void *ver)
 306{
 307	struct mgmt_rp_read_version *rp = ver;
 308
 309	rp->version = MGMT_VERSION;
 310	rp->revision = cpu_to_le16(MGMT_REVISION);
 311}
 312
 313static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
 314			u16 data_len)
 315{
 316	struct mgmt_rp_read_version rp;
 317
 318	bt_dev_dbg(hdev, "sock %p", sk);
 319
 320	mgmt_fill_version_info(&rp);
 321
 322	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
 323				 &rp, sizeof(rp));
 324}
 325
 326static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
 327			 u16 data_len)
 328{
 329	struct mgmt_rp_read_commands *rp;
 330	u16 num_commands, num_events;
 331	size_t rp_size;
 332	int i, err;
 333
 334	bt_dev_dbg(hdev, "sock %p", sk);
 335
 336	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
 337		num_commands = ARRAY_SIZE(mgmt_commands);
 338		num_events = ARRAY_SIZE(mgmt_events);
 339	} else {
 340		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
 341		num_events = ARRAY_SIZE(mgmt_untrusted_events);
 342	}
 343
 344	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
 345
 346	rp = kmalloc(rp_size, GFP_KERNEL);
 347	if (!rp)
 348		return -ENOMEM;
 349
 350	rp->num_commands = cpu_to_le16(num_commands);
 351	rp->num_events = cpu_to_le16(num_events);
 352
 353	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
 354		__le16 *opcode = rp->opcodes;
 355
 356		for (i = 0; i < num_commands; i++, opcode++)
 357			put_unaligned_le16(mgmt_commands[i], opcode);
 358
 359		for (i = 0; i < num_events; i++, opcode++)
 360			put_unaligned_le16(mgmt_events[i], opcode);
 361	} else {
 362		__le16 *opcode = rp->opcodes;
 363
 364		for (i = 0; i < num_commands; i++, opcode++)
 365			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
 366
 367		for (i = 0; i < num_events; i++, opcode++)
 368			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
 369	}
 370
 371	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
 372				rp, rp_size);
 373	kfree(rp);
 374
 375	return err;
 376}
 377
 378static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
 379			   u16 data_len)
 380{
 381	struct mgmt_rp_read_index_list *rp;
 382	struct hci_dev *d;
 383	size_t rp_len;
 384	u16 count;
 385	int err;
 386
 387	bt_dev_dbg(hdev, "sock %p", sk);
 388
 389	read_lock(&hci_dev_list_lock);
 390
 391	count = 0;
 392	list_for_each_entry(d, &hci_dev_list, list) {
 393		if (d->dev_type == HCI_PRIMARY &&
 394		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
 395			count++;
 396	}
 397
 398	rp_len = sizeof(*rp) + (2 * count);
 399	rp = kmalloc(rp_len, GFP_ATOMIC);
 400	if (!rp) {
 401		read_unlock(&hci_dev_list_lock);
 402		return -ENOMEM;
 403	}
 404
 405	count = 0;
 406	list_for_each_entry(d, &hci_dev_list, list) {
 407		if (hci_dev_test_flag(d, HCI_SETUP) ||
 408		    hci_dev_test_flag(d, HCI_CONFIG) ||
 409		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
 410			continue;
 411
 412		/* Devices marked as raw-only are neither configured
 413		 * nor unconfigured controllers.
 414		 */
 415		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
 416			continue;
 417
 418		if (d->dev_type == HCI_PRIMARY &&
 419		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
 420			rp->index[count++] = cpu_to_le16(d->id);
 421			bt_dev_dbg(hdev, "Added hci%u", d->id);
 422		}
 423	}
 424
 425	rp->num_controllers = cpu_to_le16(count);
 426	rp_len = sizeof(*rp) + (2 * count);
 427
 428	read_unlock(&hci_dev_list_lock);
 429
 430	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
 431				0, rp, rp_len);
 432
 433	kfree(rp);
 434
 435	return err;
 436}
 437
 438static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
 439				  void *data, u16 data_len)
 440{
 441	struct mgmt_rp_read_unconf_index_list *rp;
 442	struct hci_dev *d;
 443	size_t rp_len;
 444	u16 count;
 445	int err;
 446
 447	bt_dev_dbg(hdev, "sock %p", sk);
 448
 449	read_lock(&hci_dev_list_lock);
 450
 451	count = 0;
 452	list_for_each_entry(d, &hci_dev_list, list) {
 453		if (d->dev_type == HCI_PRIMARY &&
 454		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
 455			count++;
 456	}
 457
 458	rp_len = sizeof(*rp) + (2 * count);
 459	rp = kmalloc(rp_len, GFP_ATOMIC);
 460	if (!rp) {
 461		read_unlock(&hci_dev_list_lock);
 462		return -ENOMEM;
 463	}
 464
 465	count = 0;
 466	list_for_each_entry(d, &hci_dev_list, list) {
 467		if (hci_dev_test_flag(d, HCI_SETUP) ||
 468		    hci_dev_test_flag(d, HCI_CONFIG) ||
 469		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
 470			continue;
 471
 472		/* Devices marked as raw-only are neither configured
 473		 * nor unconfigured controllers.
 474		 */
 475		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
 476			continue;
 477
 478		if (d->dev_type == HCI_PRIMARY &&
 479		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
 480			rp->index[count++] = cpu_to_le16(d->id);
 481			bt_dev_dbg(hdev, "Added hci%u", d->id);
 482		}
 483	}
 484
 485	rp->num_controllers = cpu_to_le16(count);
 486	rp_len = sizeof(*rp) + (2 * count);
 487
 488	read_unlock(&hci_dev_list_lock);
 489
 490	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
 491				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
 492
 493	kfree(rp);
 494
 495	return err;
 496}
 497
 498static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
 499			       void *data, u16 data_len)
 500{
 501	struct mgmt_rp_read_ext_index_list *rp;
 502	struct hci_dev *d;
 503	u16 count;
 504	int err;
 505
 506	bt_dev_dbg(hdev, "sock %p", sk);
 507
 508	read_lock(&hci_dev_list_lock);
 509
 510	count = 0;
 511	list_for_each_entry(d, &hci_dev_list, list) {
 512		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
 513			count++;
 514	}
 515
 516	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
 517	if (!rp) {
 518		read_unlock(&hci_dev_list_lock);
 519		return -ENOMEM;
 520	}
 521
 522	count = 0;
 523	list_for_each_entry(d, &hci_dev_list, list) {
 524		if (hci_dev_test_flag(d, HCI_SETUP) ||
 525		    hci_dev_test_flag(d, HCI_CONFIG) ||
 526		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
 527			continue;
 528
 529		/* Devices marked as raw-only are neither configured
 530		 * nor unconfigured controllers.
 531		 */
 532		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
 533			continue;
 534
 535		if (d->dev_type == HCI_PRIMARY) {
 536			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
 537				rp->entry[count].type = 0x01;
 538			else
 539				rp->entry[count].type = 0x00;
 540		} else if (d->dev_type == HCI_AMP) {
 541			rp->entry[count].type = 0x02;
 542		} else {
 543			continue;
 544		}
 545
 546		rp->entry[count].bus = d->bus;
 547		rp->entry[count++].index = cpu_to_le16(d->id);
 548		bt_dev_dbg(hdev, "Added hci%u", d->id);
 549	}
 550
 551	rp->num_controllers = cpu_to_le16(count);
 552
 553	read_unlock(&hci_dev_list_lock);
 554
 555	/* If this command is called at least once, then all the
 556	 * default index and unconfigured index events are disabled
 557	 * and from now on only extended index events are used.
 558	 */
 559	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
 560	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
 561	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
 562
 563	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
 564				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
 565				struct_size(rp, entry, count));
 566
 567	kfree(rp);
 568
 569	return err;
 570}
 571
 572static bool is_configured(struct hci_dev *hdev)
 573{
 574	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
 575	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
 576		return false;
 577
 578	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
 579	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
 580	    !bacmp(&hdev->public_addr, BDADDR_ANY))
 581		return false;
 582
 583	return true;
 584}
 585
 586static __le32 get_missing_options(struct hci_dev *hdev)
 587{
 588	u32 options = 0;
 589
 590	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
 591	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
 592		options |= MGMT_OPTION_EXTERNAL_CONFIG;
 593
 594	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
 595	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
 596	    !bacmp(&hdev->public_addr, BDADDR_ANY))
 597		options |= MGMT_OPTION_PUBLIC_ADDRESS;
 598
 599	return cpu_to_le32(options);
 600}
 601
 602static int new_options(struct hci_dev *hdev, struct sock *skip)
 603{
 604	__le32 options = get_missing_options(hdev);
 605
 606	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
 607				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
 608}
 609
 610static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
 611{
 612	__le32 options = get_missing_options(hdev);
 613
 614	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
 615				 sizeof(options));
 616}
 617
 618static int read_config_info(struct sock *sk, struct hci_dev *hdev,
 619			    void *data, u16 data_len)
 620{
 621	struct mgmt_rp_read_config_info rp;
 622	u32 options = 0;
 623
 624	bt_dev_dbg(hdev, "sock %p", sk);
 625
 626	hci_dev_lock(hdev);
 627
 628	memset(&rp, 0, sizeof(rp));
 629	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
 630
 631	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
 632		options |= MGMT_OPTION_EXTERNAL_CONFIG;
 633
 634	if (hdev->set_bdaddr)
 635		options |= MGMT_OPTION_PUBLIC_ADDRESS;
 636
 637	rp.supported_options = cpu_to_le32(options);
 638	rp.missing_options = get_missing_options(hdev);
 639
 640	hci_dev_unlock(hdev);
 641
 642	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
 643				 &rp, sizeof(rp));
 644}
 645
 646static u32 get_supported_phys(struct hci_dev *hdev)
 647{
 648	u32 supported_phys = 0;
 649
 650	if (lmp_bredr_capable(hdev)) {
 651		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
 652
 653		if (hdev->features[0][0] & LMP_3SLOT)
 654			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
 655
 656		if (hdev->features[0][0] & LMP_5SLOT)
 657			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
 658
 659		if (lmp_edr_2m_capable(hdev)) {
 660			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
 661
 662			if (lmp_edr_3slot_capable(hdev))
 663				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
 664
 665			if (lmp_edr_5slot_capable(hdev))
 666				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
 667
 668			if (lmp_edr_3m_capable(hdev)) {
 669				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
 670
 671				if (lmp_edr_3slot_capable(hdev))
 672					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
 673
 674				if (lmp_edr_5slot_capable(hdev))
 675					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
 676			}
 677		}
 678	}
 679
 680	if (lmp_le_capable(hdev)) {
 681		supported_phys |= MGMT_PHY_LE_1M_TX;
 682		supported_phys |= MGMT_PHY_LE_1M_RX;
 683
 684		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
 685			supported_phys |= MGMT_PHY_LE_2M_TX;
 686			supported_phys |= MGMT_PHY_LE_2M_RX;
 687		}
 688
 689		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
 690			supported_phys |= MGMT_PHY_LE_CODED_TX;
 691			supported_phys |= MGMT_PHY_LE_CODED_RX;
 692		}
 693	}
 694
 695	return supported_phys;
 696}
 697
 698static u32 get_selected_phys(struct hci_dev *hdev)
 699{
 700	u32 selected_phys = 0;
 701
 702	if (lmp_bredr_capable(hdev)) {
 703		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
 704
 705		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
 706			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
 707
 708		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
 709			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
 710
 711		if (lmp_edr_2m_capable(hdev)) {
 712			if (!(hdev->pkt_type & HCI_2DH1))
 713				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
 714
 715			if (lmp_edr_3slot_capable(hdev) &&
 716			    !(hdev->pkt_type & HCI_2DH3))
 717				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
 718
 719			if (lmp_edr_5slot_capable(hdev) &&
 720			    !(hdev->pkt_type & HCI_2DH5))
 721				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
 722
 723			if (lmp_edr_3m_capable(hdev)) {
 724				if (!(hdev->pkt_type & HCI_3DH1))
 725					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
 726
 727				if (lmp_edr_3slot_capable(hdev) &&
 728				    !(hdev->pkt_type & HCI_3DH3))
 729					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
 730
 731				if (lmp_edr_5slot_capable(hdev) &&
 732				    !(hdev->pkt_type & HCI_3DH5))
 733					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
 734			}
 735		}
 736	}
 737
 738	if (lmp_le_capable(hdev)) {
 739		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
 740			selected_phys |= MGMT_PHY_LE_1M_TX;
 741
 742		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
 743			selected_phys |= MGMT_PHY_LE_1M_RX;
 744
 745		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
 746			selected_phys |= MGMT_PHY_LE_2M_TX;
 747
 748		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
 749			selected_phys |= MGMT_PHY_LE_2M_RX;
 750
 751		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
 752			selected_phys |= MGMT_PHY_LE_CODED_TX;
 753
 754		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
 755			selected_phys |= MGMT_PHY_LE_CODED_RX;
 756	}
 757
 758	return selected_phys;
 759}
 760
 761static u32 get_configurable_phys(struct hci_dev *hdev)
 762{
 763	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
 764		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
 765}
 766
 767static u32 get_supported_settings(struct hci_dev *hdev)
 768{
 769	u32 settings = 0;
 770
 771	settings |= MGMT_SETTING_POWERED;
 772	settings |= MGMT_SETTING_BONDABLE;
 773	settings |= MGMT_SETTING_DEBUG_KEYS;
 774	settings |= MGMT_SETTING_CONNECTABLE;
 775	settings |= MGMT_SETTING_DISCOVERABLE;
 776
 777	if (lmp_bredr_capable(hdev)) {
 778		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
 779			settings |= MGMT_SETTING_FAST_CONNECTABLE;
 780		settings |= MGMT_SETTING_BREDR;
 781		settings |= MGMT_SETTING_LINK_SECURITY;
 782
 783		if (lmp_ssp_capable(hdev)) {
 784			settings |= MGMT_SETTING_SSP;
 785			settings |= MGMT_SETTING_HS;
 786		}
 787
 788		if (lmp_sc_capable(hdev))
 789			settings |= MGMT_SETTING_SECURE_CONN;
 790
 791		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
 792			     &hdev->quirks))
 793			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
 794	}
 795
 796	if (lmp_le_capable(hdev)) {
 797		settings |= MGMT_SETTING_LE;
 
 798		settings |= MGMT_SETTING_SECURE_CONN;
 799		settings |= MGMT_SETTING_PRIVACY;
 800		settings |= MGMT_SETTING_STATIC_ADDRESS;
 801
 802		/* When the experimental feature for LL Privacy support is
 803		 * enabled, then advertising is no longer supported.
 804		 */
 805		if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
 806			settings |= MGMT_SETTING_ADVERTISING;
 807	}
 808
 809	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
 810	    hdev->set_bdaddr)
 811		settings |= MGMT_SETTING_CONFIGURATION;
 812
 813	settings |= MGMT_SETTING_PHY_CONFIGURATION;
 814
 815	return settings;
 816}
 817
 818static u32 get_current_settings(struct hci_dev *hdev)
 819{
 820	u32 settings = 0;
 821
 822	if (hdev_is_powered(hdev))
 823		settings |= MGMT_SETTING_POWERED;
 824
 825	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
 826		settings |= MGMT_SETTING_CONNECTABLE;
 827
 828	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
 829		settings |= MGMT_SETTING_FAST_CONNECTABLE;
 830
 831	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
 832		settings |= MGMT_SETTING_DISCOVERABLE;
 833
 834	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
 835		settings |= MGMT_SETTING_BONDABLE;
 836
 837	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 838		settings |= MGMT_SETTING_BREDR;
 839
 840	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 841		settings |= MGMT_SETTING_LE;
 842
 843	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
 844		settings |= MGMT_SETTING_LINK_SECURITY;
 845
 846	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 847		settings |= MGMT_SETTING_SSP;
 848
 849	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
 850		settings |= MGMT_SETTING_HS;
 851
 852	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
 853		settings |= MGMT_SETTING_ADVERTISING;
 854
 855	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
 856		settings |= MGMT_SETTING_SECURE_CONN;
 857
 858	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
 859		settings |= MGMT_SETTING_DEBUG_KEYS;
 860
 861	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
 862		settings |= MGMT_SETTING_PRIVACY;
 863
 864	/* The current setting for static address has two purposes. The
 865	 * first is to indicate if the static address will be used and
 866	 * the second is to indicate if it is actually set.
 867	 *
 868	 * This means if the static address is not configured, this flag
 869	 * will never be set. If the address is configured, then if the
 870	 * address is actually used decides if the flag is set or not.
 871	 *
 872	 * For single mode LE only controllers and dual-mode controllers
 873	 * with BR/EDR disabled, the existence of the static address will
 874	 * be evaluated.
 875	 */
 876	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
 877	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
 878	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
 879		if (bacmp(&hdev->static_addr, BDADDR_ANY))
 880			settings |= MGMT_SETTING_STATIC_ADDRESS;
 881	}
 882
 883	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
 884		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
 885
 886	return settings;
 887}
 888
 889static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
 890{
 891	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
 892}
 893
 894static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
 895						  struct hci_dev *hdev,
 896						  const void *data)
 897{
 898	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
 899}
 900
 901u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
 902{
 903	struct mgmt_pending_cmd *cmd;
 904
 905	/* If there's a pending mgmt command the flags will not yet have
 906	 * their final values, so check for this first.
 907	 */
 908	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
 909	if (cmd) {
 910		struct mgmt_mode *cp = cmd->param;
 911		if (cp->val == 0x01)
 912			return LE_AD_GENERAL;
 913		else if (cp->val == 0x02)
 914			return LE_AD_LIMITED;
 915	} else {
 916		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
 917			return LE_AD_LIMITED;
 918		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
 919			return LE_AD_GENERAL;
 920	}
 921
 922	return 0;
 923}
 924
 925bool mgmt_get_connectable(struct hci_dev *hdev)
 926{
 927	struct mgmt_pending_cmd *cmd;
 928
 929	/* If there's a pending mgmt command the flag will not yet have
 930	 * it's final value, so check for this first.
 931	 */
 932	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
 933	if (cmd) {
 934		struct mgmt_mode *cp = cmd->param;
 935
 936		return cp->val;
 937	}
 938
 939	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
 940}
 941
 942static void service_cache_off(struct work_struct *work)
 943{
 944	struct hci_dev *hdev = container_of(work, struct hci_dev,
 945					    service_cache.work);
 946	struct hci_request req;
 947
 948	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
 949		return;
 950
 951	hci_req_init(&req, hdev);
 952
 953	hci_dev_lock(hdev);
 954
 955	__hci_req_update_eir(&req);
 956	__hci_req_update_class(&req);
 957
 958	hci_dev_unlock(hdev);
 959
 960	hci_req_run(&req, NULL);
 961}
 962
 963static void rpa_expired(struct work_struct *work)
 964{
 965	struct hci_dev *hdev = container_of(work, struct hci_dev,
 966					    rpa_expired.work);
 967	struct hci_request req;
 968
 969	bt_dev_dbg(hdev, "");
 970
 971	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 972
 973	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
 974		return;
 975
 976	/* The generation of a new RPA and programming it into the
 977	 * controller happens in the hci_req_enable_advertising()
 978	 * function.
 979	 */
 980	hci_req_init(&req, hdev);
 981	if (ext_adv_capable(hdev))
 982		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
 983	else
 984		__hci_req_enable_advertising(&req);
 985	hci_req_run(&req, NULL);
 986}
 987
 988static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
 989{
 990	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
 991		return;
 992
 993	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
 994	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
 995
 996	/* Non-mgmt controlled devices get this bit set
 997	 * implicitly so that pairing works for them, however
 998	 * for mgmt we require user-space to explicitly enable
 999	 * it
1000	 */
1001	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1002}
1003
1004static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1005				void *data, u16 data_len)
1006{
1007	struct mgmt_rp_read_info rp;
1008
1009	bt_dev_dbg(hdev, "sock %p", sk);
1010
1011	hci_dev_lock(hdev);
1012
1013	memset(&rp, 0, sizeof(rp));
1014
1015	bacpy(&rp.bdaddr, &hdev->bdaddr);
1016
1017	rp.version = hdev->hci_ver;
1018	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1019
1020	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1021	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1022
1023	memcpy(rp.dev_class, hdev->dev_class, 3);
1024
1025	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1026	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1027
1028	hci_dev_unlock(hdev);
1029
1030	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1031				 sizeof(rp));
1032}
1033
1034static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1035{
1036	u16 eir_len = 0;
1037	size_t name_len;
1038
1039	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1040		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1041					  hdev->dev_class, 3);
1042
1043	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1044		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1045					  hdev->appearance);
1046
1047	name_len = strlen(hdev->dev_name);
1048	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1049				  hdev->dev_name, name_len);
1050
1051	name_len = strlen(hdev->short_name);
1052	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1053				  hdev->short_name, name_len);
1054
1055	return eir_len;
1056}
1057
1058static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1059				    void *data, u16 data_len)
1060{
1061	char buf[512];
1062	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1063	u16 eir_len;
1064
1065	bt_dev_dbg(hdev, "sock %p", sk);
1066
1067	memset(&buf, 0, sizeof(buf));
1068
1069	hci_dev_lock(hdev);
1070
1071	bacpy(&rp->bdaddr, &hdev->bdaddr);
1072
1073	rp->version = hdev->hci_ver;
1074	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1075
1076	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1077	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1078
1079
1080	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1081	rp->eir_len = cpu_to_le16(eir_len);
1082
1083	hci_dev_unlock(hdev);
1084
1085	/* If this command is called at least once, then the events
1086	 * for class of device and local name changes are disabled
1087	 * and only the new extended controller information event
1088	 * is used.
1089	 */
1090	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1091	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1092	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1093
1094	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1095				 sizeof(*rp) + eir_len);
1096}
1097
1098static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1099{
1100	char buf[512];
1101	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1102	u16 eir_len;
1103
1104	memset(buf, 0, sizeof(buf));
1105
1106	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1107	ev->eir_len = cpu_to_le16(eir_len);
1108
1109	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1110				  sizeof(*ev) + eir_len,
1111				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1112}
1113
1114static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1115{
1116	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1117
1118	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1119				 sizeof(settings));
1120}
1121
1122static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1123{
1124	bt_dev_dbg(hdev, "status 0x%02x", status);
1125
1126	if (hci_conn_count(hdev) == 0) {
1127		cancel_delayed_work(&hdev->power_off);
1128		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1129	}
1130}
1131
1132void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1133{
1134	struct mgmt_ev_advertising_added ev;
1135
1136	ev.instance = instance;
1137
1138	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1139}
1140
1141void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1142			      u8 instance)
1143{
1144	struct mgmt_ev_advertising_removed ev;
1145
1146	ev.instance = instance;
1147
1148	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1149}
1150
1151static void cancel_adv_timeout(struct hci_dev *hdev)
1152{
1153	if (hdev->adv_instance_timeout) {
1154		hdev->adv_instance_timeout = 0;
1155		cancel_delayed_work(&hdev->adv_instance_expire);
1156	}
1157}
1158
1159static int clean_up_hci_state(struct hci_dev *hdev)
1160{
1161	struct hci_request req;
1162	struct hci_conn *conn;
1163	bool discov_stopped;
1164	int err;
1165
1166	hci_req_init(&req, hdev);
1167
1168	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1169	    test_bit(HCI_PSCAN, &hdev->flags)) {
1170		u8 scan = 0x00;
1171		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1172	}
1173
1174	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1175
1176	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1177		__hci_req_disable_advertising(&req);
1178
1179	discov_stopped = hci_req_stop_discovery(&req);
1180
1181	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1182		/* 0x15 == Terminated due to Power Off */
1183		__hci_abort_conn(&req, conn, 0x15);
1184	}
1185
1186	err = hci_req_run(&req, clean_up_hci_complete);
1187	if (!err && discov_stopped)
1188		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1189
1190	return err;
1191}
1192
1193static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1194		       u16 len)
1195{
1196	struct mgmt_mode *cp = data;
1197	struct mgmt_pending_cmd *cmd;
1198	int err;
1199
1200	bt_dev_dbg(hdev, "sock %p", sk);
1201
1202	if (cp->val != 0x00 && cp->val != 0x01)
1203		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1204				       MGMT_STATUS_INVALID_PARAMS);
1205
1206	hci_dev_lock(hdev);
1207
1208	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1209		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210				      MGMT_STATUS_BUSY);
1211		goto failed;
1212	}
1213
1214	if (!!cp->val == hdev_is_powered(hdev)) {
1215		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1216		goto failed;
1217	}
1218
1219	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1220	if (!cmd) {
1221		err = -ENOMEM;
1222		goto failed;
1223	}
1224
1225	if (cp->val) {
1226		queue_work(hdev->req_workqueue, &hdev->power_on);
1227		err = 0;
1228	} else {
1229		/* Disconnect connections, stop scans, etc */
1230		err = clean_up_hci_state(hdev);
1231		if (!err)
1232			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1233					   HCI_POWER_OFF_TIMEOUT);
1234
1235		/* ENODATA means there were no HCI commands queued */
1236		if (err == -ENODATA) {
1237			cancel_delayed_work(&hdev->power_off);
1238			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1239			err = 0;
1240		}
1241	}
1242
1243failed:
1244	hci_dev_unlock(hdev);
1245	return err;
1246}
1247
1248static int new_settings(struct hci_dev *hdev, struct sock *skip)
1249{
1250	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1251
1252	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1253				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1254}
1255
1256int mgmt_new_settings(struct hci_dev *hdev)
1257{
1258	return new_settings(hdev, NULL);
1259}
1260
1261struct cmd_lookup {
1262	struct sock *sk;
1263	struct hci_dev *hdev;
1264	u8 mgmt_status;
1265};
1266
1267static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1268{
1269	struct cmd_lookup *match = data;
1270
1271	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1272
1273	list_del(&cmd->list);
1274
1275	if (match->sk == NULL) {
1276		match->sk = cmd->sk;
1277		sock_hold(match->sk);
1278	}
1279
1280	mgmt_pending_free(cmd);
1281}
1282
1283static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1284{
1285	u8 *status = data;
1286
1287	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1288	mgmt_pending_remove(cmd);
1289}
1290
1291static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1292{
1293	if (cmd->cmd_complete) {
1294		u8 *status = data;
1295
1296		cmd->cmd_complete(cmd, *status);
1297		mgmt_pending_remove(cmd);
1298
1299		return;
1300	}
1301
1302	cmd_status_rsp(cmd, data);
1303}
1304
1305static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1306{
1307	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1308				 cmd->param, cmd->param_len);
1309}
1310
1311static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1312{
1313	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1314				 cmd->param, sizeof(struct mgmt_addr_info));
1315}
1316
1317static u8 mgmt_bredr_support(struct hci_dev *hdev)
1318{
1319	if (!lmp_bredr_capable(hdev))
1320		return MGMT_STATUS_NOT_SUPPORTED;
1321	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1322		return MGMT_STATUS_REJECTED;
1323	else
1324		return MGMT_STATUS_SUCCESS;
1325}
1326
1327static u8 mgmt_le_support(struct hci_dev *hdev)
1328{
1329	if (!lmp_le_capable(hdev))
1330		return MGMT_STATUS_NOT_SUPPORTED;
1331	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1332		return MGMT_STATUS_REJECTED;
1333	else
1334		return MGMT_STATUS_SUCCESS;
1335}
1336
1337void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1338{
1339	struct mgmt_pending_cmd *cmd;
1340
1341	bt_dev_dbg(hdev, "status 0x%02x", status);
1342
1343	hci_dev_lock(hdev);
1344
1345	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1346	if (!cmd)
1347		goto unlock;
1348
1349	if (status) {
1350		u8 mgmt_err = mgmt_status(status);
1351		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1352		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1353		goto remove_cmd;
1354	}
1355
1356	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1357	    hdev->discov_timeout > 0) {
1358		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1359		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1360	}
1361
1362	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1363	new_settings(hdev, cmd->sk);
1364
1365remove_cmd:
1366	mgmt_pending_remove(cmd);
1367
1368unlock:
1369	hci_dev_unlock(hdev);
1370}
1371
1372static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1373			    u16 len)
1374{
1375	struct mgmt_cp_set_discoverable *cp = data;
1376	struct mgmt_pending_cmd *cmd;
1377	u16 timeout;
1378	int err;
1379
1380	bt_dev_dbg(hdev, "sock %p", sk);
1381
1382	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1383	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1384		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1385				       MGMT_STATUS_REJECTED);
1386
1387	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1388		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1389				       MGMT_STATUS_INVALID_PARAMS);
1390
1391	timeout = __le16_to_cpu(cp->timeout);
1392
1393	/* Disabling discoverable requires that no timeout is set,
1394	 * and enabling limited discoverable requires a timeout.
1395	 */
1396	if ((cp->val == 0x00 && timeout > 0) ||
1397	    (cp->val == 0x02 && timeout == 0))
1398		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1399				       MGMT_STATUS_INVALID_PARAMS);
1400
1401	hci_dev_lock(hdev);
1402
1403	if (!hdev_is_powered(hdev) && timeout > 0) {
1404		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405				      MGMT_STATUS_NOT_POWERED);
1406		goto failed;
1407	}
1408
1409	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1410	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1411		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1412				      MGMT_STATUS_BUSY);
1413		goto failed;
1414	}
1415
1416	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1417		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1418				      MGMT_STATUS_REJECTED);
1419		goto failed;
1420	}
1421
1422	if (hdev->advertising_paused) {
1423		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424				      MGMT_STATUS_BUSY);
1425		goto failed;
1426	}
1427
1428	if (!hdev_is_powered(hdev)) {
1429		bool changed = false;
1430
1431		/* Setting limited discoverable when powered off is
1432		 * not a valid operation since it requires a timeout
1433		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1434		 */
1435		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1436			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1437			changed = true;
1438		}
1439
1440		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1441		if (err < 0)
1442			goto failed;
1443
1444		if (changed)
1445			err = new_settings(hdev, sk);
1446
1447		goto failed;
1448	}
1449
1450	/* If the current mode is the same, then just update the timeout
1451	 * value with the new value. And if only the timeout gets updated,
1452	 * then no need for any HCI transactions.
1453	 */
1454	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1455	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1456						   HCI_LIMITED_DISCOVERABLE)) {
1457		cancel_delayed_work(&hdev->discov_off);
1458		hdev->discov_timeout = timeout;
1459
1460		if (cp->val && hdev->discov_timeout > 0) {
1461			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1462			queue_delayed_work(hdev->req_workqueue,
1463					   &hdev->discov_off, to);
1464		}
1465
1466		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1467		goto failed;
1468	}
1469
1470	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1471	if (!cmd) {
1472		err = -ENOMEM;
1473		goto failed;
1474	}
1475
1476	/* Cancel any potential discoverable timeout that might be
1477	 * still active and store new timeout value. The arming of
1478	 * the timeout happens in the complete handler.
1479	 */
1480	cancel_delayed_work(&hdev->discov_off);
1481	hdev->discov_timeout = timeout;
1482
1483	if (cp->val)
1484		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1485	else
1486		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1487
1488	/* Limited discoverable mode */
1489	if (cp->val == 0x02)
1490		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1491	else
1492		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1493
1494	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1495	err = 0;
1496
1497failed:
1498	hci_dev_unlock(hdev);
1499	return err;
1500}
1501
1502void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1503{
1504	struct mgmt_pending_cmd *cmd;
1505
1506	bt_dev_dbg(hdev, "status 0x%02x", status);
1507
1508	hci_dev_lock(hdev);
1509
1510	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1511	if (!cmd)
1512		goto unlock;
1513
1514	if (status) {
1515		u8 mgmt_err = mgmt_status(status);
1516		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1517		goto remove_cmd;
1518	}
1519
1520	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1521	new_settings(hdev, cmd->sk);
1522
1523remove_cmd:
1524	mgmt_pending_remove(cmd);
1525
1526unlock:
1527	hci_dev_unlock(hdev);
1528}
1529
1530static int set_connectable_update_settings(struct hci_dev *hdev,
1531					   struct sock *sk, u8 val)
1532{
1533	bool changed = false;
1534	int err;
1535
1536	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1537		changed = true;
1538
1539	if (val) {
1540		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1541	} else {
1542		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1543		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1544	}
1545
1546	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1547	if (err < 0)
1548		return err;
1549
1550	if (changed) {
1551		hci_req_update_scan(hdev);
1552		hci_update_background_scan(hdev);
1553		return new_settings(hdev, sk);
1554	}
1555
1556	return 0;
1557}
1558
1559static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1560			   u16 len)
1561{
1562	struct mgmt_mode *cp = data;
1563	struct mgmt_pending_cmd *cmd;
1564	int err;
1565
1566	bt_dev_dbg(hdev, "sock %p", sk);
1567
1568	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1569	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1570		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1571				       MGMT_STATUS_REJECTED);
1572
1573	if (cp->val != 0x00 && cp->val != 0x01)
1574		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1575				       MGMT_STATUS_INVALID_PARAMS);
1576
1577	hci_dev_lock(hdev);
1578
1579	if (!hdev_is_powered(hdev)) {
1580		err = set_connectable_update_settings(hdev, sk, cp->val);
1581		goto failed;
1582	}
1583
1584	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1585	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1586		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1587				      MGMT_STATUS_BUSY);
1588		goto failed;
1589	}
1590
1591	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1592	if (!cmd) {
1593		err = -ENOMEM;
1594		goto failed;
1595	}
1596
1597	if (cp->val) {
1598		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1599	} else {
1600		if (hdev->discov_timeout > 0)
1601			cancel_delayed_work(&hdev->discov_off);
1602
1603		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1604		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1605		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606	}
1607
1608	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1609	err = 0;
1610
1611failed:
1612	hci_dev_unlock(hdev);
1613	return err;
1614}
1615
1616static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1617			u16 len)
1618{
1619	struct mgmt_mode *cp = data;
1620	bool changed;
1621	int err;
1622
1623	bt_dev_dbg(hdev, "sock %p", sk);
1624
1625	if (cp->val != 0x00 && cp->val != 0x01)
1626		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1627				       MGMT_STATUS_INVALID_PARAMS);
1628
1629	hci_dev_lock(hdev);
1630
1631	if (cp->val)
1632		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1633	else
1634		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1635
1636	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1637	if (err < 0)
1638		goto unlock;
1639
1640	if (changed) {
1641		/* In limited privacy mode the change of bondable mode
1642		 * may affect the local advertising address.
1643		 */
1644		if (hdev_is_powered(hdev) &&
1645		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1646		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1647		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1648			queue_work(hdev->req_workqueue,
1649				   &hdev->discoverable_update);
1650
1651		err = new_settings(hdev, sk);
1652	}
1653
1654unlock:
1655	hci_dev_unlock(hdev);
1656	return err;
1657}
1658
1659static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1660			     u16 len)
1661{
1662	struct mgmt_mode *cp = data;
1663	struct mgmt_pending_cmd *cmd;
1664	u8 val, status;
1665	int err;
1666
1667	bt_dev_dbg(hdev, "sock %p", sk);
1668
1669	status = mgmt_bredr_support(hdev);
1670	if (status)
1671		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1672				       status);
1673
1674	if (cp->val != 0x00 && cp->val != 0x01)
1675		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1676				       MGMT_STATUS_INVALID_PARAMS);
1677
1678	hci_dev_lock(hdev);
1679
1680	if (!hdev_is_powered(hdev)) {
1681		bool changed = false;
1682
1683		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1684			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1685			changed = true;
1686		}
1687
1688		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1689		if (err < 0)
1690			goto failed;
1691
1692		if (changed)
1693			err = new_settings(hdev, sk);
1694
1695		goto failed;
1696	}
1697
1698	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1699		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1700				      MGMT_STATUS_BUSY);
1701		goto failed;
1702	}
1703
1704	val = !!cp->val;
1705
1706	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1707		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1708		goto failed;
1709	}
1710
1711	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1712	if (!cmd) {
1713		err = -ENOMEM;
1714		goto failed;
1715	}
1716
1717	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1718	if (err < 0) {
1719		mgmt_pending_remove(cmd);
1720		goto failed;
1721	}
1722
1723failed:
1724	hci_dev_unlock(hdev);
1725	return err;
1726}
1727
1728static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1729{
1730	struct mgmt_mode *cp = data;
1731	struct mgmt_pending_cmd *cmd;
1732	u8 status;
1733	int err;
1734
1735	bt_dev_dbg(hdev, "sock %p", sk);
1736
1737	status = mgmt_bredr_support(hdev);
1738	if (status)
1739		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1740
1741	if (!lmp_ssp_capable(hdev))
1742		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1743				       MGMT_STATUS_NOT_SUPPORTED);
1744
1745	if (cp->val != 0x00 && cp->val != 0x01)
1746		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1747				       MGMT_STATUS_INVALID_PARAMS);
1748
1749	hci_dev_lock(hdev);
1750
1751	if (!hdev_is_powered(hdev)) {
1752		bool changed;
1753
1754		if (cp->val) {
1755			changed = !hci_dev_test_and_set_flag(hdev,
1756							     HCI_SSP_ENABLED);
1757		} else {
1758			changed = hci_dev_test_and_clear_flag(hdev,
1759							      HCI_SSP_ENABLED);
1760			if (!changed)
1761				changed = hci_dev_test_and_clear_flag(hdev,
1762								      HCI_HS_ENABLED);
1763			else
1764				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1765		}
1766
1767		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1768		if (err < 0)
1769			goto failed;
1770
1771		if (changed)
1772			err = new_settings(hdev, sk);
1773
1774		goto failed;
1775	}
1776
1777	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1778		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1779				      MGMT_STATUS_BUSY);
1780		goto failed;
1781	}
1782
1783	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1784		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1785		goto failed;
1786	}
1787
1788	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1789	if (!cmd) {
1790		err = -ENOMEM;
1791		goto failed;
1792	}
1793
1794	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1795		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1796			     sizeof(cp->val), &cp->val);
1797
1798	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1799	if (err < 0) {
1800		mgmt_pending_remove(cmd);
1801		goto failed;
1802	}
1803
1804failed:
1805	hci_dev_unlock(hdev);
1806	return err;
1807}
1808
1809static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1810{
1811	struct mgmt_mode *cp = data;
1812	bool changed;
1813	u8 status;
1814	int err;
1815
1816	bt_dev_dbg(hdev, "sock %p", sk);
1817
1818	status = mgmt_bredr_support(hdev);
1819	if (status)
1820		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1821
1822	if (!lmp_ssp_capable(hdev))
1823		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1824				       MGMT_STATUS_NOT_SUPPORTED);
1825
1826	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1827		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1828				       MGMT_STATUS_REJECTED);
1829
1830	if (cp->val != 0x00 && cp->val != 0x01)
1831		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1832				       MGMT_STATUS_INVALID_PARAMS);
1833
1834	hci_dev_lock(hdev);
1835
1836	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1837		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1838				      MGMT_STATUS_BUSY);
1839		goto unlock;
1840	}
1841
1842	if (cp->val) {
1843		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1844	} else {
1845		if (hdev_is_powered(hdev)) {
1846			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847					      MGMT_STATUS_REJECTED);
1848			goto unlock;
1849		}
1850
1851		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1852	}
1853
1854	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1855	if (err < 0)
1856		goto unlock;
1857
1858	if (changed)
1859		err = new_settings(hdev, sk);
1860
1861unlock:
1862	hci_dev_unlock(hdev);
1863	return err;
1864}
1865
1866static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1867{
1868	struct cmd_lookup match = { NULL, hdev };
1869
1870	hci_dev_lock(hdev);
1871
1872	if (status) {
1873		u8 mgmt_err = mgmt_status(status);
1874
1875		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1876				     &mgmt_err);
1877		goto unlock;
1878	}
1879
1880	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1881
1882	new_settings(hdev, match.sk);
1883
1884	if (match.sk)
1885		sock_put(match.sk);
1886
1887	/* Make sure the controller has a good default for
1888	 * advertising data. Restrict the update to when LE
1889	 * has actually been enabled. During power on, the
1890	 * update in powered_update_hci will take care of it.
1891	 */
1892	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1893		struct hci_request req;
1894		hci_req_init(&req, hdev);
1895		if (ext_adv_capable(hdev)) {
1896			int err;
1897
1898			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1899			if (!err)
1900				__hci_req_update_scan_rsp_data(&req, 0x00);
1901		} else {
1902			__hci_req_update_adv_data(&req, 0x00);
1903			__hci_req_update_scan_rsp_data(&req, 0x00);
1904		}
1905		hci_req_run(&req, NULL);
1906		hci_update_background_scan(hdev);
1907	}
1908
1909unlock:
1910	hci_dev_unlock(hdev);
1911}
1912
1913static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1914{
1915	struct mgmt_mode *cp = data;
1916	struct hci_cp_write_le_host_supported hci_cp;
1917	struct mgmt_pending_cmd *cmd;
1918	struct hci_request req;
1919	int err;
1920	u8 val, enabled;
1921
1922	bt_dev_dbg(hdev, "sock %p", sk);
1923
1924	if (!lmp_le_capable(hdev))
1925		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1926				       MGMT_STATUS_NOT_SUPPORTED);
1927
1928	if (cp->val != 0x00 && cp->val != 0x01)
1929		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1930				       MGMT_STATUS_INVALID_PARAMS);
1931
1932	/* Bluetooth single mode LE only controllers or dual-mode
1933	 * controllers configured as LE only devices, do not allow
1934	 * switching LE off. These have either LE enabled explicitly
1935	 * or BR/EDR has been previously switched off.
1936	 *
1937	 * When trying to enable an already enabled LE, then gracefully
1938	 * send a positive response. Trying to disable it however will
1939	 * result into rejection.
1940	 */
1941	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1942		if (cp->val == 0x01)
1943			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1944
1945		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1946				       MGMT_STATUS_REJECTED);
1947	}
1948
1949	hci_dev_lock(hdev);
1950
1951	val = !!cp->val;
1952	enabled = lmp_host_le_capable(hdev);
1953
1954	if (!val)
1955		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1956
1957	if (!hdev_is_powered(hdev) || val == enabled) {
1958		bool changed = false;
1959
1960		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1961			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1962			changed = true;
1963		}
1964
1965		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1966			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1967			changed = true;
1968		}
1969
1970		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1971		if (err < 0)
1972			goto unlock;
1973
1974		if (changed)
1975			err = new_settings(hdev, sk);
1976
1977		goto unlock;
1978	}
1979
1980	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1981	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1982		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1983				      MGMT_STATUS_BUSY);
1984		goto unlock;
1985	}
1986
1987	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1988	if (!cmd) {
1989		err = -ENOMEM;
1990		goto unlock;
1991	}
1992
1993	hci_req_init(&req, hdev);
1994
1995	memset(&hci_cp, 0, sizeof(hci_cp));
1996
1997	if (val) {
1998		hci_cp.le = val;
1999		hci_cp.simul = 0x00;
2000	} else {
2001		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2002			__hci_req_disable_advertising(&req);
2003
2004		if (ext_adv_capable(hdev))
2005			__hci_req_clear_ext_adv_sets(&req);
2006	}
2007
2008	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2009		    &hci_cp);
2010
2011	err = hci_req_run(&req, le_enable_complete);
2012	if (err < 0)
2013		mgmt_pending_remove(cmd);
2014
2015unlock:
2016	hci_dev_unlock(hdev);
2017	return err;
2018}
2019
2020/* This is a helper function to test for pending mgmt commands that can
2021 * cause CoD or EIR HCI commands. We can only allow one such pending
2022 * mgmt command at a time since otherwise we cannot easily track what
2023 * the current values are, will be, and based on that calculate if a new
2024 * HCI command needs to be sent and if yes with what value.
2025 */
2026static bool pending_eir_or_class(struct hci_dev *hdev)
2027{
2028	struct mgmt_pending_cmd *cmd;
2029
2030	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2031		switch (cmd->opcode) {
2032		case MGMT_OP_ADD_UUID:
2033		case MGMT_OP_REMOVE_UUID:
2034		case MGMT_OP_SET_DEV_CLASS:
2035		case MGMT_OP_SET_POWERED:
2036			return true;
2037		}
2038	}
2039
2040	return false;
2041}
2042
2043static const u8 bluetooth_base_uuid[] = {
2044			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2045			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2046};
2047
2048static u8 get_uuid_size(const u8 *uuid)
2049{
2050	u32 val;
2051
2052	if (memcmp(uuid, bluetooth_base_uuid, 12))
2053		return 128;
2054
2055	val = get_unaligned_le32(&uuid[12]);
2056	if (val > 0xffff)
2057		return 32;
2058
2059	return 16;
2060}
2061
2062static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2063{
2064	struct mgmt_pending_cmd *cmd;
2065
2066	hci_dev_lock(hdev);
2067
2068	cmd = pending_find(mgmt_op, hdev);
2069	if (!cmd)
2070		goto unlock;
2071
2072	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2073			  mgmt_status(status), hdev->dev_class, 3);
2074
2075	mgmt_pending_remove(cmd);
2076
2077unlock:
2078	hci_dev_unlock(hdev);
2079}
2080
2081static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2082{
2083	bt_dev_dbg(hdev, "status 0x%02x", status);
2084
2085	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2086}
2087
2088static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2089{
2090	struct mgmt_cp_add_uuid *cp = data;
2091	struct mgmt_pending_cmd *cmd;
2092	struct hci_request req;
2093	struct bt_uuid *uuid;
2094	int err;
2095
2096	bt_dev_dbg(hdev, "sock %p", sk);
2097
2098	hci_dev_lock(hdev);
2099
2100	if (pending_eir_or_class(hdev)) {
2101		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2102				      MGMT_STATUS_BUSY);
2103		goto failed;
2104	}
2105
2106	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2107	if (!uuid) {
2108		err = -ENOMEM;
2109		goto failed;
2110	}
2111
2112	memcpy(uuid->uuid, cp->uuid, 16);
2113	uuid->svc_hint = cp->svc_hint;
2114	uuid->size = get_uuid_size(cp->uuid);
2115
2116	list_add_tail(&uuid->list, &hdev->uuids);
2117
2118	hci_req_init(&req, hdev);
2119
2120	__hci_req_update_class(&req);
2121	__hci_req_update_eir(&req);
2122
2123	err = hci_req_run(&req, add_uuid_complete);
2124	if (err < 0) {
2125		if (err != -ENODATA)
2126			goto failed;
2127
2128		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2129					hdev->dev_class, 3);
2130		goto failed;
2131	}
2132
2133	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2134	if (!cmd) {
2135		err = -ENOMEM;
2136		goto failed;
2137	}
2138
2139	err = 0;
2140
2141failed:
2142	hci_dev_unlock(hdev);
2143	return err;
2144}
2145
2146static bool enable_service_cache(struct hci_dev *hdev)
2147{
2148	if (!hdev_is_powered(hdev))
2149		return false;
2150
2151	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2152		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2153				   CACHE_TIMEOUT);
2154		return true;
2155	}
2156
2157	return false;
2158}
2159
2160static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2161{
2162	bt_dev_dbg(hdev, "status 0x%02x", status);
2163
2164	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2165}
2166
2167static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2168		       u16 len)
2169{
2170	struct mgmt_cp_remove_uuid *cp = data;
2171	struct mgmt_pending_cmd *cmd;
2172	struct bt_uuid *match, *tmp;
2173	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2174	struct hci_request req;
2175	int err, found;
2176
2177	bt_dev_dbg(hdev, "sock %p", sk);
2178
2179	hci_dev_lock(hdev);
2180
2181	if (pending_eir_or_class(hdev)) {
2182		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2183				      MGMT_STATUS_BUSY);
2184		goto unlock;
2185	}
2186
2187	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2188		hci_uuids_clear(hdev);
2189
2190		if (enable_service_cache(hdev)) {
2191			err = mgmt_cmd_complete(sk, hdev->id,
2192						MGMT_OP_REMOVE_UUID,
2193						0, hdev->dev_class, 3);
2194			goto unlock;
2195		}
2196
2197		goto update_class;
2198	}
2199
2200	found = 0;
2201
2202	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2203		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2204			continue;
2205
2206		list_del(&match->list);
2207		kfree(match);
2208		found++;
2209	}
2210
2211	if (found == 0) {
2212		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2213				      MGMT_STATUS_INVALID_PARAMS);
2214		goto unlock;
2215	}
2216
2217update_class:
2218	hci_req_init(&req, hdev);
2219
2220	__hci_req_update_class(&req);
2221	__hci_req_update_eir(&req);
2222
2223	err = hci_req_run(&req, remove_uuid_complete);
2224	if (err < 0) {
2225		if (err != -ENODATA)
2226			goto unlock;
2227
2228		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2229					hdev->dev_class, 3);
2230		goto unlock;
2231	}
2232
2233	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2234	if (!cmd) {
2235		err = -ENOMEM;
2236		goto unlock;
2237	}
2238
2239	err = 0;
2240
2241unlock:
2242	hci_dev_unlock(hdev);
2243	return err;
2244}
2245
2246static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2247{
2248	bt_dev_dbg(hdev, "status 0x%02x", status);
2249
2250	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2251}
2252
2253static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2254			 u16 len)
2255{
2256	struct mgmt_cp_set_dev_class *cp = data;
2257	struct mgmt_pending_cmd *cmd;
2258	struct hci_request req;
2259	int err;
2260
2261	bt_dev_dbg(hdev, "sock %p", sk);
2262
2263	if (!lmp_bredr_capable(hdev))
2264		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2265				       MGMT_STATUS_NOT_SUPPORTED);
2266
2267	hci_dev_lock(hdev);
2268
2269	if (pending_eir_or_class(hdev)) {
2270		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2271				      MGMT_STATUS_BUSY);
2272		goto unlock;
2273	}
2274
2275	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2276		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2277				      MGMT_STATUS_INVALID_PARAMS);
2278		goto unlock;
2279	}
2280
2281	hdev->major_class = cp->major;
2282	hdev->minor_class = cp->minor;
2283
2284	if (!hdev_is_powered(hdev)) {
2285		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2286					hdev->dev_class, 3);
2287		goto unlock;
2288	}
2289
2290	hci_req_init(&req, hdev);
2291
2292	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2293		hci_dev_unlock(hdev);
2294		cancel_delayed_work_sync(&hdev->service_cache);
2295		hci_dev_lock(hdev);
2296		__hci_req_update_eir(&req);
2297	}
2298
2299	__hci_req_update_class(&req);
2300
2301	err = hci_req_run(&req, set_class_complete);
2302	if (err < 0) {
2303		if (err != -ENODATA)
2304			goto unlock;
2305
2306		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2307					hdev->dev_class, 3);
2308		goto unlock;
2309	}
2310
2311	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2312	if (!cmd) {
2313		err = -ENOMEM;
2314		goto unlock;
2315	}
2316
2317	err = 0;
2318
2319unlock:
2320	hci_dev_unlock(hdev);
2321	return err;
2322}
2323
2324static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2325			  u16 len)
2326{
2327	struct mgmt_cp_load_link_keys *cp = data;
2328	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2329				   sizeof(struct mgmt_link_key_info));
2330	u16 key_count, expected_len;
2331	bool changed;
2332	int i;
2333
2334	bt_dev_dbg(hdev, "sock %p", sk);
2335
2336	if (!lmp_bredr_capable(hdev))
2337		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2338				       MGMT_STATUS_NOT_SUPPORTED);
2339
2340	key_count = __le16_to_cpu(cp->key_count);
2341	if (key_count > max_key_count) {
2342		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2343			   key_count);
2344		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2345				       MGMT_STATUS_INVALID_PARAMS);
2346	}
2347
2348	expected_len = struct_size(cp, keys, key_count);
2349	if (expected_len != len) {
2350		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2351			   expected_len, len);
2352		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2353				       MGMT_STATUS_INVALID_PARAMS);
2354	}
2355
2356	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2357		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2358				       MGMT_STATUS_INVALID_PARAMS);
2359
2360	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2361		   key_count);
2362
2363	for (i = 0; i < key_count; i++) {
2364		struct mgmt_link_key_info *key = &cp->keys[i];
2365
2366		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2367			return mgmt_cmd_status(sk, hdev->id,
2368					       MGMT_OP_LOAD_LINK_KEYS,
2369					       MGMT_STATUS_INVALID_PARAMS);
2370	}
2371
2372	hci_dev_lock(hdev);
2373
2374	hci_link_keys_clear(hdev);
2375
2376	if (cp->debug_keys)
2377		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2378	else
2379		changed = hci_dev_test_and_clear_flag(hdev,
2380						      HCI_KEEP_DEBUG_KEYS);
2381
2382	if (changed)
2383		new_settings(hdev, NULL);
2384
2385	for (i = 0; i < key_count; i++) {
2386		struct mgmt_link_key_info *key = &cp->keys[i];
2387
2388		if (hci_is_blocked_key(hdev,
2389				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2390				       key->val)) {
2391			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2392				    &key->addr.bdaddr);
2393			continue;
2394		}
2395
2396		/* Always ignore debug keys and require a new pairing if
2397		 * the user wants to use them.
2398		 */
2399		if (key->type == HCI_LK_DEBUG_COMBINATION)
2400			continue;
2401
2402		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2403				 key->type, key->pin_len, NULL);
2404	}
2405
2406	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2407
2408	hci_dev_unlock(hdev);
2409
2410	return 0;
2411}
2412
2413static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2414			   u8 addr_type, struct sock *skip_sk)
2415{
2416	struct mgmt_ev_device_unpaired ev;
2417
2418	bacpy(&ev.addr.bdaddr, bdaddr);
2419	ev.addr.type = addr_type;
2420
2421	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2422			  skip_sk);
2423}
2424
2425static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2426			 u16 len)
2427{
2428	struct mgmt_cp_unpair_device *cp = data;
2429	struct mgmt_rp_unpair_device rp;
2430	struct hci_conn_params *params;
2431	struct mgmt_pending_cmd *cmd;
2432	struct hci_conn *conn;
2433	u8 addr_type;
2434	int err;
2435
2436	memset(&rp, 0, sizeof(rp));
2437	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2438	rp.addr.type = cp->addr.type;
2439
2440	if (!bdaddr_type_is_valid(cp->addr.type))
2441		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2442					 MGMT_STATUS_INVALID_PARAMS,
2443					 &rp, sizeof(rp));
2444
2445	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2446		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2447					 MGMT_STATUS_INVALID_PARAMS,
2448					 &rp, sizeof(rp));
2449
2450	hci_dev_lock(hdev);
2451
2452	if (!hdev_is_powered(hdev)) {
2453		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2454					MGMT_STATUS_NOT_POWERED, &rp,
2455					sizeof(rp));
2456		goto unlock;
2457	}
2458
2459	if (cp->addr.type == BDADDR_BREDR) {
2460		/* If disconnection is requested, then look up the
2461		 * connection. If the remote device is connected, it
2462		 * will be later used to terminate the link.
2463		 *
2464		 * Setting it to NULL explicitly will cause no
2465		 * termination of the link.
2466		 */
2467		if (cp->disconnect)
2468			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2469						       &cp->addr.bdaddr);
2470		else
2471			conn = NULL;
2472
2473		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2474		if (err < 0) {
2475			err = mgmt_cmd_complete(sk, hdev->id,
2476						MGMT_OP_UNPAIR_DEVICE,
2477						MGMT_STATUS_NOT_PAIRED, &rp,
2478						sizeof(rp));
2479			goto unlock;
2480		}
2481
2482		goto done;
2483	}
2484
2485	/* LE address type */
2486	addr_type = le_addr_type(cp->addr.type);
2487
2488	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2489	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2490	if (err < 0) {
2491		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2492					MGMT_STATUS_NOT_PAIRED, &rp,
2493					sizeof(rp));
2494		goto unlock;
2495	}
2496
2497	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2498	if (!conn) {
2499		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2500		goto done;
2501	}
2502
2503
2504	/* Defer clearing up the connection parameters until closing to
2505	 * give a chance of keeping them if a repairing happens.
2506	 */
2507	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2508
2509	/* Disable auto-connection parameters if present */
2510	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2511	if (params) {
2512		if (params->explicit_connect)
2513			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2514		else
2515			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2516	}
2517
2518	/* If disconnection is not requested, then clear the connection
2519	 * variable so that the link is not terminated.
2520	 */
2521	if (!cp->disconnect)
2522		conn = NULL;
2523
2524done:
2525	/* If the connection variable is set, then termination of the
2526	 * link is requested.
2527	 */
2528	if (!conn) {
2529		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2530					&rp, sizeof(rp));
2531		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2532		goto unlock;
2533	}
2534
2535	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2536			       sizeof(*cp));
2537	if (!cmd) {
2538		err = -ENOMEM;
2539		goto unlock;
2540	}
2541
2542	cmd->cmd_complete = addr_cmd_complete;
2543
2544	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2545	if (err < 0)
2546		mgmt_pending_remove(cmd);
2547
2548unlock:
2549	hci_dev_unlock(hdev);
2550	return err;
2551}
2552
2553static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2554		      u16 len)
2555{
2556	struct mgmt_cp_disconnect *cp = data;
2557	struct mgmt_rp_disconnect rp;
2558	struct mgmt_pending_cmd *cmd;
2559	struct hci_conn *conn;
2560	int err;
2561
2562	bt_dev_dbg(hdev, "sock %p", sk);
2563
2564	memset(&rp, 0, sizeof(rp));
2565	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2566	rp.addr.type = cp->addr.type;
2567
2568	if (!bdaddr_type_is_valid(cp->addr.type))
2569		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2570					 MGMT_STATUS_INVALID_PARAMS,
2571					 &rp, sizeof(rp));
2572
2573	hci_dev_lock(hdev);
2574
2575	if (!test_bit(HCI_UP, &hdev->flags)) {
2576		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2577					MGMT_STATUS_NOT_POWERED, &rp,
2578					sizeof(rp));
2579		goto failed;
2580	}
2581
2582	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2583		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2584					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2585		goto failed;
2586	}
2587
2588	if (cp->addr.type == BDADDR_BREDR)
2589		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2590					       &cp->addr.bdaddr);
2591	else
2592		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2593					       le_addr_type(cp->addr.type));
2594
2595	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2596		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2597					MGMT_STATUS_NOT_CONNECTED, &rp,
2598					sizeof(rp));
2599		goto failed;
2600	}
2601
2602	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2603	if (!cmd) {
2604		err = -ENOMEM;
2605		goto failed;
2606	}
2607
2608	cmd->cmd_complete = generic_cmd_complete;
2609
2610	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2611	if (err < 0)
2612		mgmt_pending_remove(cmd);
2613
2614failed:
2615	hci_dev_unlock(hdev);
2616	return err;
2617}
2618
2619static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2620{
2621	switch (link_type) {
2622	case LE_LINK:
2623		switch (addr_type) {
2624		case ADDR_LE_DEV_PUBLIC:
2625			return BDADDR_LE_PUBLIC;
2626
2627		default:
2628			/* Fallback to LE Random address type */
2629			return BDADDR_LE_RANDOM;
2630		}
2631
2632	default:
2633		/* Fallback to BR/EDR type */
2634		return BDADDR_BREDR;
2635	}
2636}
2637
2638static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2639			   u16 data_len)
2640{
2641	struct mgmt_rp_get_connections *rp;
2642	struct hci_conn *c;
2643	int err;
2644	u16 i;
2645
2646	bt_dev_dbg(hdev, "sock %p", sk);
2647
2648	hci_dev_lock(hdev);
2649
2650	if (!hdev_is_powered(hdev)) {
2651		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2652				      MGMT_STATUS_NOT_POWERED);
2653		goto unlock;
2654	}
2655
2656	i = 0;
2657	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2658		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2659			i++;
2660	}
2661
2662	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2663	if (!rp) {
2664		err = -ENOMEM;
2665		goto unlock;
2666	}
2667
2668	i = 0;
2669	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2670		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2671			continue;
2672		bacpy(&rp->addr[i].bdaddr, &c->dst);
2673		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2674		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2675			continue;
2676		i++;
2677	}
2678
2679	rp->conn_count = cpu_to_le16(i);
2680
2681	/* Recalculate length in case of filtered SCO connections, etc */
2682	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2683				struct_size(rp, addr, i));
2684
2685	kfree(rp);
2686
2687unlock:
2688	hci_dev_unlock(hdev);
2689	return err;
2690}
2691
2692static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2693				   struct mgmt_cp_pin_code_neg_reply *cp)
2694{
2695	struct mgmt_pending_cmd *cmd;
2696	int err;
2697
2698	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2699			       sizeof(*cp));
2700	if (!cmd)
2701		return -ENOMEM;
2702
2703	cmd->cmd_complete = addr_cmd_complete;
2704
2705	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2706			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2707	if (err < 0)
2708		mgmt_pending_remove(cmd);
2709
2710	return err;
2711}
2712
2713static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2714			  u16 len)
2715{
2716	struct hci_conn *conn;
2717	struct mgmt_cp_pin_code_reply *cp = data;
2718	struct hci_cp_pin_code_reply reply;
2719	struct mgmt_pending_cmd *cmd;
2720	int err;
2721
2722	bt_dev_dbg(hdev, "sock %p", sk);
2723
2724	hci_dev_lock(hdev);
2725
2726	if (!hdev_is_powered(hdev)) {
2727		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2728				      MGMT_STATUS_NOT_POWERED);
2729		goto failed;
2730	}
2731
2732	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2733	if (!conn) {
2734		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2735				      MGMT_STATUS_NOT_CONNECTED);
2736		goto failed;
2737	}
2738
2739	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2740		struct mgmt_cp_pin_code_neg_reply ncp;
2741
2742		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2743
2744		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2745
2746		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2747		if (err >= 0)
2748			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2749					      MGMT_STATUS_INVALID_PARAMS);
2750
2751		goto failed;
2752	}
2753
2754	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2755	if (!cmd) {
2756		err = -ENOMEM;
2757		goto failed;
2758	}
2759
2760	cmd->cmd_complete = addr_cmd_complete;
2761
2762	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2763	reply.pin_len = cp->pin_len;
2764	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2765
2766	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2767	if (err < 0)
2768		mgmt_pending_remove(cmd);
2769
2770failed:
2771	hci_dev_unlock(hdev);
2772	return err;
2773}
2774
2775static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2776			     u16 len)
2777{
2778	struct mgmt_cp_set_io_capability *cp = data;
2779
2780	bt_dev_dbg(hdev, "sock %p", sk);
2781
2782	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2783		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2784				       MGMT_STATUS_INVALID_PARAMS);
2785
2786	hci_dev_lock(hdev);
2787
2788	hdev->io_capability = cp->io_capability;
2789
2790	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
 
2791
2792	hci_dev_unlock(hdev);
2793
2794	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2795				 NULL, 0);
2796}
2797
2798static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2799{
2800	struct hci_dev *hdev = conn->hdev;
2801	struct mgmt_pending_cmd *cmd;
2802
2803	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2804		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2805			continue;
2806
2807		if (cmd->user_data != conn)
2808			continue;
2809
2810		return cmd;
2811	}
2812
2813	return NULL;
2814}
2815
2816static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2817{
2818	struct mgmt_rp_pair_device rp;
2819	struct hci_conn *conn = cmd->user_data;
2820	int err;
2821
2822	bacpy(&rp.addr.bdaddr, &conn->dst);
2823	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2824
2825	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2826				status, &rp, sizeof(rp));
2827
2828	/* So we don't get further callbacks for this connection */
2829	conn->connect_cfm_cb = NULL;
2830	conn->security_cfm_cb = NULL;
2831	conn->disconn_cfm_cb = NULL;
2832
2833	hci_conn_drop(conn);
2834
2835	/* The device is paired so there is no need to remove
2836	 * its connection parameters anymore.
2837	 */
2838	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2839
2840	hci_conn_put(conn);
2841
2842	return err;
2843}
2844
2845void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2846{
2847	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2848	struct mgmt_pending_cmd *cmd;
2849
2850	cmd = find_pairing(conn);
2851	if (cmd) {
2852		cmd->cmd_complete(cmd, status);
2853		mgmt_pending_remove(cmd);
2854	}
2855}
2856
2857static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2858{
2859	struct mgmt_pending_cmd *cmd;
2860
2861	BT_DBG("status %u", status);
2862
2863	cmd = find_pairing(conn);
2864	if (!cmd) {
2865		BT_DBG("Unable to find a pending command");
2866		return;
2867	}
2868
2869	cmd->cmd_complete(cmd, mgmt_status(status));
2870	mgmt_pending_remove(cmd);
2871}
2872
2873static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2874{
2875	struct mgmt_pending_cmd *cmd;
2876
2877	BT_DBG("status %u", status);
2878
2879	if (!status)
2880		return;
2881
2882	cmd = find_pairing(conn);
2883	if (!cmd) {
2884		BT_DBG("Unable to find a pending command");
2885		return;
2886	}
2887
2888	cmd->cmd_complete(cmd, mgmt_status(status));
2889	mgmt_pending_remove(cmd);
2890}
2891
2892static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2893		       u16 len)
2894{
2895	struct mgmt_cp_pair_device *cp = data;
2896	struct mgmt_rp_pair_device rp;
2897	struct mgmt_pending_cmd *cmd;
2898	u8 sec_level, auth_type;
2899	struct hci_conn *conn;
2900	int err;
2901
2902	bt_dev_dbg(hdev, "sock %p", sk);
2903
2904	memset(&rp, 0, sizeof(rp));
2905	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2906	rp.addr.type = cp->addr.type;
2907
2908	if (!bdaddr_type_is_valid(cp->addr.type))
2909		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2910					 MGMT_STATUS_INVALID_PARAMS,
2911					 &rp, sizeof(rp));
2912
2913	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2914		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2915					 MGMT_STATUS_INVALID_PARAMS,
2916					 &rp, sizeof(rp));
2917
2918	hci_dev_lock(hdev);
2919
2920	if (!hdev_is_powered(hdev)) {
2921		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2922					MGMT_STATUS_NOT_POWERED, &rp,
2923					sizeof(rp));
2924		goto unlock;
2925	}
2926
2927	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2928		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929					MGMT_STATUS_ALREADY_PAIRED, &rp,
2930					sizeof(rp));
2931		goto unlock;
2932	}
2933
2934	sec_level = BT_SECURITY_MEDIUM;
2935	auth_type = HCI_AT_DEDICATED_BONDING;
2936
2937	if (cp->addr.type == BDADDR_BREDR) {
2938		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2939				       auth_type, CONN_REASON_PAIR_DEVICE);
2940	} else {
2941		u8 addr_type = le_addr_type(cp->addr.type);
2942		struct hci_conn_params *p;
2943
2944		/* When pairing a new device, it is expected to remember
2945		 * this device for future connections. Adding the connection
2946		 * parameter information ahead of time allows tracking
2947		 * of the slave preferred values and will speed up any
2948		 * further connection establishment.
2949		 *
2950		 * If connection parameters already exist, then they
2951		 * will be kept and this function does nothing.
2952		 */
2953		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2954
2955		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2956			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2957
2958		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2959					   sec_level, HCI_LE_CONN_TIMEOUT,
2960					   CONN_REASON_PAIR_DEVICE);
2961	}
2962
2963	if (IS_ERR(conn)) {
2964		int status;
2965
2966		if (PTR_ERR(conn) == -EBUSY)
2967			status = MGMT_STATUS_BUSY;
2968		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2969			status = MGMT_STATUS_NOT_SUPPORTED;
2970		else if (PTR_ERR(conn) == -ECONNREFUSED)
2971			status = MGMT_STATUS_REJECTED;
2972		else
2973			status = MGMT_STATUS_CONNECT_FAILED;
2974
2975		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2976					status, &rp, sizeof(rp));
2977		goto unlock;
2978	}
2979
2980	if (conn->connect_cfm_cb) {
2981		hci_conn_drop(conn);
2982		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2983					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2984		goto unlock;
2985	}
2986
2987	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2988	if (!cmd) {
2989		err = -ENOMEM;
2990		hci_conn_drop(conn);
2991		goto unlock;
2992	}
2993
2994	cmd->cmd_complete = pairing_complete;
2995
2996	/* For LE, just connecting isn't a proof that the pairing finished */
2997	if (cp->addr.type == BDADDR_BREDR) {
2998		conn->connect_cfm_cb = pairing_complete_cb;
2999		conn->security_cfm_cb = pairing_complete_cb;
3000		conn->disconn_cfm_cb = pairing_complete_cb;
3001	} else {
3002		conn->connect_cfm_cb = le_pairing_complete_cb;
3003		conn->security_cfm_cb = le_pairing_complete_cb;
3004		conn->disconn_cfm_cb = le_pairing_complete_cb;
3005	}
3006
3007	conn->io_capability = cp->io_cap;
3008	cmd->user_data = hci_conn_get(conn);
3009
3010	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3011	    hci_conn_security(conn, sec_level, auth_type, true)) {
3012		cmd->cmd_complete(cmd, 0);
3013		mgmt_pending_remove(cmd);
3014	}
3015
3016	err = 0;
3017
3018unlock:
3019	hci_dev_unlock(hdev);
3020	return err;
3021}
3022
3023static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3024			      u16 len)
3025{
3026	struct mgmt_addr_info *addr = data;
3027	struct mgmt_pending_cmd *cmd;
3028	struct hci_conn *conn;
3029	int err;
3030
3031	bt_dev_dbg(hdev, "sock %p", sk);
3032
3033	hci_dev_lock(hdev);
3034
3035	if (!hdev_is_powered(hdev)) {
3036		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3037				      MGMT_STATUS_NOT_POWERED);
3038		goto unlock;
3039	}
3040
3041	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3042	if (!cmd) {
3043		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3044				      MGMT_STATUS_INVALID_PARAMS);
3045		goto unlock;
3046	}
3047
3048	conn = cmd->user_data;
3049
3050	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3051		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3052				      MGMT_STATUS_INVALID_PARAMS);
3053		goto unlock;
3054	}
3055
3056	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3057	mgmt_pending_remove(cmd);
3058
3059	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3060				addr, sizeof(*addr));
3061
3062	/* Since user doesn't want to proceed with the connection, abort any
3063	 * ongoing pairing and then terminate the link if it was created
3064	 * because of the pair device action.
3065	 */
3066	if (addr->type == BDADDR_BREDR)
3067		hci_remove_link_key(hdev, &addr->bdaddr);
3068	else
3069		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3070					      le_addr_type(addr->type));
3071
3072	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3073		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3074
3075unlock:
3076	hci_dev_unlock(hdev);
3077	return err;
3078}
3079
3080static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3081			     struct mgmt_addr_info *addr, u16 mgmt_op,
3082			     u16 hci_op, __le32 passkey)
3083{
3084	struct mgmt_pending_cmd *cmd;
3085	struct hci_conn *conn;
3086	int err;
3087
3088	hci_dev_lock(hdev);
3089
3090	if (!hdev_is_powered(hdev)) {
3091		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3092					MGMT_STATUS_NOT_POWERED, addr,
3093					sizeof(*addr));
3094		goto done;
3095	}
3096
3097	if (addr->type == BDADDR_BREDR)
3098		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3099	else
3100		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3101					       le_addr_type(addr->type));
3102
3103	if (!conn) {
3104		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3105					MGMT_STATUS_NOT_CONNECTED, addr,
3106					sizeof(*addr));
3107		goto done;
3108	}
3109
3110	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3111		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3112		if (!err)
3113			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3114						MGMT_STATUS_SUCCESS, addr,
3115						sizeof(*addr));
3116		else
3117			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3118						MGMT_STATUS_FAILED, addr,
3119						sizeof(*addr));
3120
3121		goto done;
3122	}
3123
3124	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3125	if (!cmd) {
3126		err = -ENOMEM;
3127		goto done;
3128	}
3129
3130	cmd->cmd_complete = addr_cmd_complete;
3131
3132	/* Continue with pairing via HCI */
3133	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3134		struct hci_cp_user_passkey_reply cp;
3135
3136		bacpy(&cp.bdaddr, &addr->bdaddr);
3137		cp.passkey = passkey;
3138		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3139	} else
3140		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3141				   &addr->bdaddr);
3142
3143	if (err < 0)
3144		mgmt_pending_remove(cmd);
3145
3146done:
3147	hci_dev_unlock(hdev);
3148	return err;
3149}
3150
3151static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3152			      void *data, u16 len)
3153{
3154	struct mgmt_cp_pin_code_neg_reply *cp = data;
3155
3156	bt_dev_dbg(hdev, "sock %p", sk);
3157
3158	return user_pairing_resp(sk, hdev, &cp->addr,
3159				MGMT_OP_PIN_CODE_NEG_REPLY,
3160				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3161}
3162
3163static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3164			      u16 len)
3165{
3166	struct mgmt_cp_user_confirm_reply *cp = data;
3167
3168	bt_dev_dbg(hdev, "sock %p", sk);
3169
3170	if (len != sizeof(*cp))
3171		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3172				       MGMT_STATUS_INVALID_PARAMS);
3173
3174	return user_pairing_resp(sk, hdev, &cp->addr,
3175				 MGMT_OP_USER_CONFIRM_REPLY,
3176				 HCI_OP_USER_CONFIRM_REPLY, 0);
3177}
3178
3179static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3180				  void *data, u16 len)
3181{
3182	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3183
3184	bt_dev_dbg(hdev, "sock %p", sk);
3185
3186	return user_pairing_resp(sk, hdev, &cp->addr,
3187				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3188				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3189}
3190
3191static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3192			      u16 len)
3193{
3194	struct mgmt_cp_user_passkey_reply *cp = data;
3195
3196	bt_dev_dbg(hdev, "sock %p", sk);
3197
3198	return user_pairing_resp(sk, hdev, &cp->addr,
3199				 MGMT_OP_USER_PASSKEY_REPLY,
3200				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3201}
3202
3203static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3204				  void *data, u16 len)
3205{
3206	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3207
3208	bt_dev_dbg(hdev, "sock %p", sk);
3209
3210	return user_pairing_resp(sk, hdev, &cp->addr,
3211				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3212				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3213}
3214
3215static void adv_expire(struct hci_dev *hdev, u32 flags)
3216{
3217	struct adv_info *adv_instance;
3218	struct hci_request req;
3219	int err;
3220
3221	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3222	if (!adv_instance)
3223		return;
3224
3225	/* stop if current instance doesn't need to be changed */
3226	if (!(adv_instance->flags & flags))
3227		return;
3228
3229	cancel_adv_timeout(hdev);
3230
3231	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3232	if (!adv_instance)
3233		return;
3234
3235	hci_req_init(&req, hdev);
3236	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3237					      true);
3238	if (err)
3239		return;
3240
3241	hci_req_run(&req, NULL);
3242}
3243
3244static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3245{
3246	struct mgmt_cp_set_local_name *cp;
3247	struct mgmt_pending_cmd *cmd;
3248
3249	bt_dev_dbg(hdev, "status 0x%02x", status);
3250
3251	hci_dev_lock(hdev);
3252
3253	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3254	if (!cmd)
3255		goto unlock;
3256
3257	cp = cmd->param;
3258
3259	if (status) {
3260		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3261			        mgmt_status(status));
3262	} else {
3263		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3264				  cp, sizeof(*cp));
3265
3266		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3267			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3268	}
3269
3270	mgmt_pending_remove(cmd);
3271
3272unlock:
3273	hci_dev_unlock(hdev);
3274}
3275
3276static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3277			  u16 len)
3278{
3279	struct mgmt_cp_set_local_name *cp = data;
3280	struct mgmt_pending_cmd *cmd;
3281	struct hci_request req;
3282	int err;
3283
3284	bt_dev_dbg(hdev, "sock %p", sk);
3285
3286	hci_dev_lock(hdev);
3287
3288	/* If the old values are the same as the new ones just return a
3289	 * direct command complete event.
3290	 */
3291	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3292	    !memcmp(hdev->short_name, cp->short_name,
3293		    sizeof(hdev->short_name))) {
3294		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3295					data, len);
3296		goto failed;
3297	}
3298
3299	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3300
3301	if (!hdev_is_powered(hdev)) {
3302		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3303
3304		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3305					data, len);
3306		if (err < 0)
3307			goto failed;
3308
3309		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3310					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3311		ext_info_changed(hdev, sk);
3312
3313		goto failed;
3314	}
3315
3316	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3317	if (!cmd) {
3318		err = -ENOMEM;
3319		goto failed;
3320	}
3321
3322	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3323
3324	hci_req_init(&req, hdev);
3325
3326	if (lmp_bredr_capable(hdev)) {
3327		__hci_req_update_name(&req);
3328		__hci_req_update_eir(&req);
3329	}
3330
3331	/* The name is stored in the scan response data and so
3332	 * no need to udpate the advertising data here.
3333	 */
3334	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3335		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3336
3337	err = hci_req_run(&req, set_name_complete);
3338	if (err < 0)
3339		mgmt_pending_remove(cmd);
3340
3341failed:
3342	hci_dev_unlock(hdev);
3343	return err;
3344}
3345
3346static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3347			  u16 len)
3348{
3349	struct mgmt_cp_set_appearance *cp = data;
3350	u16 appearance;
3351	int err;
3352
3353	bt_dev_dbg(hdev, "sock %p", sk);
3354
3355	if (!lmp_le_capable(hdev))
3356		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3357				       MGMT_STATUS_NOT_SUPPORTED);
3358
3359	appearance = le16_to_cpu(cp->appearance);
3360
3361	hci_dev_lock(hdev);
3362
3363	if (hdev->appearance != appearance) {
3364		hdev->appearance = appearance;
3365
3366		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3367			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3368
3369		ext_info_changed(hdev, sk);
3370	}
3371
3372	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3373				0);
3374
3375	hci_dev_unlock(hdev);
3376
3377	return err;
3378}
3379
3380static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3381				 void *data, u16 len)
3382{
3383	struct mgmt_rp_get_phy_confguration rp;
3384
3385	bt_dev_dbg(hdev, "sock %p", sk);
3386
3387	hci_dev_lock(hdev);
3388
3389	memset(&rp, 0, sizeof(rp));
3390
3391	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3392	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3393	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3394
3395	hci_dev_unlock(hdev);
3396
3397	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3398				 &rp, sizeof(rp));
3399}
3400
3401int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3402{
3403	struct mgmt_ev_phy_configuration_changed ev;
3404
3405	memset(&ev, 0, sizeof(ev));
3406
3407	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3408
3409	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3410			  sizeof(ev), skip);
3411}
3412
3413static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3414				     u16 opcode, struct sk_buff *skb)
3415{
3416	struct mgmt_pending_cmd *cmd;
3417
3418	bt_dev_dbg(hdev, "status 0x%02x", status);
3419
3420	hci_dev_lock(hdev);
3421
3422	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3423	if (!cmd)
3424		goto unlock;
3425
3426	if (status) {
3427		mgmt_cmd_status(cmd->sk, hdev->id,
3428				MGMT_OP_SET_PHY_CONFIGURATION,
3429				mgmt_status(status));
3430	} else {
3431		mgmt_cmd_complete(cmd->sk, hdev->id,
3432				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3433				  NULL, 0);
3434
3435		mgmt_phy_configuration_changed(hdev, cmd->sk);
3436	}
3437
3438	mgmt_pending_remove(cmd);
3439
3440unlock:
3441	hci_dev_unlock(hdev);
3442}
3443
3444static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3445				 void *data, u16 len)
3446{
3447	struct mgmt_cp_set_phy_confguration *cp = data;
3448	struct hci_cp_le_set_default_phy cp_phy;
3449	struct mgmt_pending_cmd *cmd;
3450	struct hci_request req;
3451	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3452	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3453	bool changed = false;
3454	int err;
3455
3456	bt_dev_dbg(hdev, "sock %p", sk);
3457
3458	configurable_phys = get_configurable_phys(hdev);
3459	supported_phys = get_supported_phys(hdev);
3460	selected_phys = __le32_to_cpu(cp->selected_phys);
3461
3462	if (selected_phys & ~supported_phys)
3463		return mgmt_cmd_status(sk, hdev->id,
3464				       MGMT_OP_SET_PHY_CONFIGURATION,
3465				       MGMT_STATUS_INVALID_PARAMS);
3466
3467	unconfigure_phys = supported_phys & ~configurable_phys;
3468
3469	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3470		return mgmt_cmd_status(sk, hdev->id,
3471				       MGMT_OP_SET_PHY_CONFIGURATION,
3472				       MGMT_STATUS_INVALID_PARAMS);
3473
3474	if (selected_phys == get_selected_phys(hdev))
3475		return mgmt_cmd_complete(sk, hdev->id,
3476					 MGMT_OP_SET_PHY_CONFIGURATION,
3477					 0, NULL, 0);
3478
3479	hci_dev_lock(hdev);
3480
3481	if (!hdev_is_powered(hdev)) {
3482		err = mgmt_cmd_status(sk, hdev->id,
3483				      MGMT_OP_SET_PHY_CONFIGURATION,
3484				      MGMT_STATUS_REJECTED);
3485		goto unlock;
3486	}
3487
3488	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3489		err = mgmt_cmd_status(sk, hdev->id,
3490				      MGMT_OP_SET_PHY_CONFIGURATION,
3491				      MGMT_STATUS_BUSY);
3492		goto unlock;
3493	}
3494
3495	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3496		pkt_type |= (HCI_DH3 | HCI_DM3);
3497	else
3498		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3499
3500	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3501		pkt_type |= (HCI_DH5 | HCI_DM5);
3502	else
3503		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3504
3505	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3506		pkt_type &= ~HCI_2DH1;
3507	else
3508		pkt_type |= HCI_2DH1;
3509
3510	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3511		pkt_type &= ~HCI_2DH3;
3512	else
3513		pkt_type |= HCI_2DH3;
3514
3515	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3516		pkt_type &= ~HCI_2DH5;
3517	else
3518		pkt_type |= HCI_2DH5;
3519
3520	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3521		pkt_type &= ~HCI_3DH1;
3522	else
3523		pkt_type |= HCI_3DH1;
3524
3525	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3526		pkt_type &= ~HCI_3DH3;
3527	else
3528		pkt_type |= HCI_3DH3;
3529
3530	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3531		pkt_type &= ~HCI_3DH5;
3532	else
3533		pkt_type |= HCI_3DH5;
3534
3535	if (pkt_type != hdev->pkt_type) {
3536		hdev->pkt_type = pkt_type;
3537		changed = true;
3538	}
3539
3540	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3541	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3542		if (changed)
3543			mgmt_phy_configuration_changed(hdev, sk);
3544
3545		err = mgmt_cmd_complete(sk, hdev->id,
3546					MGMT_OP_SET_PHY_CONFIGURATION,
3547					0, NULL, 0);
3548
3549		goto unlock;
3550	}
3551
3552	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3553			       len);
3554	if (!cmd) {
3555		err = -ENOMEM;
3556		goto unlock;
3557	}
3558
3559	hci_req_init(&req, hdev);
3560
3561	memset(&cp_phy, 0, sizeof(cp_phy));
3562
3563	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3564		cp_phy.all_phys |= 0x01;
3565
3566	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3567		cp_phy.all_phys |= 0x02;
3568
3569	if (selected_phys & MGMT_PHY_LE_1M_TX)
3570		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3571
3572	if (selected_phys & MGMT_PHY_LE_2M_TX)
3573		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3574
3575	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3576		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3577
3578	if (selected_phys & MGMT_PHY_LE_1M_RX)
3579		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3580
3581	if (selected_phys & MGMT_PHY_LE_2M_RX)
3582		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3583
3584	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3585		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3586
3587	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3588
3589	err = hci_req_run_skb(&req, set_default_phy_complete);
3590	if (err < 0)
3591		mgmt_pending_remove(cmd);
3592
3593unlock:
3594	hci_dev_unlock(hdev);
3595
3596	return err;
3597}
3598
3599static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3600			    u16 len)
3601{
3602	int err = MGMT_STATUS_SUCCESS;
3603	struct mgmt_cp_set_blocked_keys *keys = data;
3604	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3605				   sizeof(struct mgmt_blocked_key_info));
3606	u16 key_count, expected_len;
3607	int i;
3608
3609	bt_dev_dbg(hdev, "sock %p", sk);
3610
3611	key_count = __le16_to_cpu(keys->key_count);
3612	if (key_count > max_key_count) {
3613		bt_dev_err(hdev, "too big key_count value %u", key_count);
3614		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3615				       MGMT_STATUS_INVALID_PARAMS);
3616	}
3617
3618	expected_len = struct_size(keys, keys, key_count);
3619	if (expected_len != len) {
3620		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3621			   expected_len, len);
3622		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3623				       MGMT_STATUS_INVALID_PARAMS);
3624	}
3625
3626	hci_dev_lock(hdev);
3627
3628	hci_blocked_keys_clear(hdev);
3629
3630	for (i = 0; i < keys->key_count; ++i) {
3631		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3632
3633		if (!b) {
3634			err = MGMT_STATUS_NO_RESOURCES;
3635			break;
3636		}
3637
3638		b->type = keys->keys[i].type;
3639		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3640		list_add_rcu(&b->list, &hdev->blocked_keys);
3641	}
3642	hci_dev_unlock(hdev);
3643
3644	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3645				err, NULL, 0);
3646}
3647
3648static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3649			       void *data, u16 len)
3650{
3651	struct mgmt_mode *cp = data;
3652	int err;
3653	bool changed = false;
3654
3655	bt_dev_dbg(hdev, "sock %p", sk);
3656
3657	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3658		return mgmt_cmd_status(sk, hdev->id,
3659				       MGMT_OP_SET_WIDEBAND_SPEECH,
3660				       MGMT_STATUS_NOT_SUPPORTED);
3661
3662	if (cp->val != 0x00 && cp->val != 0x01)
3663		return mgmt_cmd_status(sk, hdev->id,
3664				       MGMT_OP_SET_WIDEBAND_SPEECH,
3665				       MGMT_STATUS_INVALID_PARAMS);
3666
3667	hci_dev_lock(hdev);
3668
3669	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3670		err = mgmt_cmd_status(sk, hdev->id,
3671				      MGMT_OP_SET_WIDEBAND_SPEECH,
3672				      MGMT_STATUS_BUSY);
3673		goto unlock;
3674	}
3675
3676	if (hdev_is_powered(hdev) &&
3677	    !!cp->val != hci_dev_test_flag(hdev,
3678					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3679		err = mgmt_cmd_status(sk, hdev->id,
3680				      MGMT_OP_SET_WIDEBAND_SPEECH,
3681				      MGMT_STATUS_REJECTED);
3682		goto unlock;
3683	}
3684
3685	if (cp->val)
3686		changed = !hci_dev_test_and_set_flag(hdev,
3687						   HCI_WIDEBAND_SPEECH_ENABLED);
3688	else
3689		changed = hci_dev_test_and_clear_flag(hdev,
3690						   HCI_WIDEBAND_SPEECH_ENABLED);
3691
3692	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3693	if (err < 0)
3694		goto unlock;
3695
3696	if (changed)
3697		err = new_settings(hdev, sk);
3698
3699unlock:
3700	hci_dev_unlock(hdev);
3701	return err;
3702}
3703
3704static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3705			      void *data, u16 data_len)
3706{
3707	char buf[16];
3708	struct mgmt_rp_read_security_info *rp = (void *)buf;
3709	u16 sec_len = 0;
3710	u8 flags = 0;
3711
3712	bt_dev_dbg(hdev, "sock %p", sk);
3713
3714	memset(&buf, 0, sizeof(buf));
3715
3716	hci_dev_lock(hdev);
3717
3718	/* When the Read Simple Pairing Options command is supported, then
3719	 * the remote public key validation is supported.
3720	 */
3721	if (hdev->commands[41] & 0x08)
3722		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3723
3724	flags |= 0x02;		/* Remote public key validation (LE) */
3725
3726	/* When the Read Encryption Key Size command is supported, then the
3727	 * encryption key size is enforced.
3728	 */
3729	if (hdev->commands[20] & 0x10)
3730		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3731
3732	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3733
3734	sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3735
3736	/* When the Read Simple Pairing Options command is supported, then
3737	 * also max encryption key size information is provided.
3738	 */
3739	if (hdev->commands[41] & 0x08)
3740		sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3741					  hdev->max_enc_key_size);
3742
3743	sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3744
3745	rp->sec_len = cpu_to_le16(sec_len);
3746
3747	hci_dev_unlock(hdev);
3748
3749	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3750				 rp, sizeof(*rp) + sec_len);
3751}
3752
3753#ifdef CONFIG_BT_FEATURE_DEBUG
3754/* d4992530-b9ec-469f-ab01-6c481c47da1c */
3755static const u8 debug_uuid[16] = {
3756	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3757	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3758};
3759#endif
3760
3761/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3762static const u8 simult_central_periph_uuid[16] = {
3763	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3764	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3765};
3766
3767/* 15c0a148-c273-11ea-b3de-0242ac130004 */
3768static const u8 rpa_resolution_uuid[16] = {
3769	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3770	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3771};
3772
3773static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3774				  void *data, u16 data_len)
3775{
3776	char buf[62];	/* Enough space for 3 features */
3777	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3778	u16 idx = 0;
3779	u32 flags;
3780
3781	bt_dev_dbg(hdev, "sock %p", sk);
3782
3783	memset(&buf, 0, sizeof(buf));
3784
3785#ifdef CONFIG_BT_FEATURE_DEBUG
3786	if (!hdev) {
3787		flags = bt_dbg_get() ? BIT(0) : 0;
3788
3789		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3790		rp->features[idx].flags = cpu_to_le32(flags);
3791		idx++;
3792	}
3793#endif
3794
3795	if (hdev) {
3796		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3797		    (hdev->le_states[4] & 0x08) &&	/* Central */
3798		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3799		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3800			flags = BIT(0);
3801		else
3802			flags = 0;
3803
3804		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3805		rp->features[idx].flags = cpu_to_le32(flags);
3806		idx++;
3807	}
3808
3809	if (hdev && use_ll_privacy(hdev)) {
3810		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3811			flags = BIT(0) | BIT(1);
3812		else
3813			flags = BIT(1);
3814
3815		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3816		rp->features[idx].flags = cpu_to_le32(flags);
3817		idx++;
3818	}
3819
3820	rp->feature_count = cpu_to_le16(idx);
3821
3822	/* After reading the experimental features information, enable
3823	 * the events to update client on any future change.
3824	 */
3825	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3826
3827	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3828				 MGMT_OP_READ_EXP_FEATURES_INFO,
3829				 0, rp, sizeof(*rp) + (20 * idx));
3830}
3831
3832static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3833					  struct sock *skip)
3834{
3835	struct mgmt_ev_exp_feature_changed ev;
3836
3837	memset(&ev, 0, sizeof(ev));
3838	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3839	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3840
3841	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3842				  &ev, sizeof(ev),
3843				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3844
3845}
3846
3847#ifdef CONFIG_BT_FEATURE_DEBUG
3848static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3849{
3850	struct mgmt_ev_exp_feature_changed ev;
3851
3852	memset(&ev, 0, sizeof(ev));
3853	memcpy(ev.uuid, debug_uuid, 16);
3854	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3855
3856	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3857				  &ev, sizeof(ev),
3858				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3859}
3860#endif
3861
3862static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3863			   void *data, u16 data_len)
3864{
3865	struct mgmt_cp_set_exp_feature *cp = data;
3866	struct mgmt_rp_set_exp_feature rp;
3867
3868	bt_dev_dbg(hdev, "sock %p", sk);
3869
3870	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3871		memset(rp.uuid, 0, 16);
3872		rp.flags = cpu_to_le32(0);
3873
3874#ifdef CONFIG_BT_FEATURE_DEBUG
3875		if (!hdev) {
3876			bool changed = bt_dbg_get();
3877
3878			bt_dbg_set(false);
3879
3880			if (changed)
3881				exp_debug_feature_changed(false, sk);
3882		}
3883#endif
3884
3885		if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3886			bool changed = hci_dev_test_flag(hdev,
3887							 HCI_ENABLE_LL_PRIVACY);
3888
3889			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3890
3891			if (changed)
3892				exp_ll_privacy_feature_changed(false, hdev, sk);
3893		}
3894
3895		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3896
3897		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3898					 MGMT_OP_SET_EXP_FEATURE, 0,
3899					 &rp, sizeof(rp));
3900	}
3901
3902#ifdef CONFIG_BT_FEATURE_DEBUG
3903	if (!memcmp(cp->uuid, debug_uuid, 16)) {
3904		bool val, changed;
3905		int err;
3906
3907		/* Command requires to use the non-controller index */
3908		if (hdev)
3909			return mgmt_cmd_status(sk, hdev->id,
3910					       MGMT_OP_SET_EXP_FEATURE,
3911					       MGMT_STATUS_INVALID_INDEX);
3912
3913		/* Parameters are limited to a single octet */
3914		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3915			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3916					       MGMT_OP_SET_EXP_FEATURE,
3917					       MGMT_STATUS_INVALID_PARAMS);
3918
3919		/* Only boolean on/off is supported */
3920		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3921			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3922					       MGMT_OP_SET_EXP_FEATURE,
3923					       MGMT_STATUS_INVALID_PARAMS);
3924
3925		val = !!cp->param[0];
3926		changed = val ? !bt_dbg_get() : bt_dbg_get();
3927		bt_dbg_set(val);
3928
3929		memcpy(rp.uuid, debug_uuid, 16);
3930		rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3931
3932		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3933
3934		err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3935					MGMT_OP_SET_EXP_FEATURE, 0,
3936					&rp, sizeof(rp));
3937
3938		if (changed)
3939			exp_debug_feature_changed(val, sk);
3940
3941		return err;
3942	}
3943#endif
3944
3945	if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3946		bool val, changed;
3947		int err;
3948		u32 flags;
3949
3950		/* Command requires to use the controller index */
3951		if (!hdev)
3952			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3953					       MGMT_OP_SET_EXP_FEATURE,
3954					       MGMT_STATUS_INVALID_INDEX);
3955
3956		/* Changes can only be made when controller is powered down */
3957		if (hdev_is_powered(hdev))
3958			return mgmt_cmd_status(sk, hdev->id,
3959					       MGMT_OP_SET_EXP_FEATURE,
3960					       MGMT_STATUS_NOT_POWERED);
3961
3962		/* Parameters are limited to a single octet */
3963		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3964			return mgmt_cmd_status(sk, hdev->id,
3965					       MGMT_OP_SET_EXP_FEATURE,
3966					       MGMT_STATUS_INVALID_PARAMS);
3967
3968		/* Only boolean on/off is supported */
3969		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3970			return mgmt_cmd_status(sk, hdev->id,
3971					       MGMT_OP_SET_EXP_FEATURE,
3972					       MGMT_STATUS_INVALID_PARAMS);
3973
3974		val = !!cp->param[0];
3975
3976		if (val) {
3977			changed = !hci_dev_test_flag(hdev,
3978						     HCI_ENABLE_LL_PRIVACY);
3979			hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3980			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3981
3982			/* Enable LL privacy + supported settings changed */
3983			flags = BIT(0) | BIT(1);
3984		} else {
3985			changed = hci_dev_test_flag(hdev,
3986						    HCI_ENABLE_LL_PRIVACY);
3987			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3988
3989			/* Disable LL privacy + supported settings changed */
3990			flags = BIT(1);
3991		}
3992
3993		memcpy(rp.uuid, rpa_resolution_uuid, 16);
3994		rp.flags = cpu_to_le32(flags);
3995
3996		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3997
3998		err = mgmt_cmd_complete(sk, hdev->id,
3999					MGMT_OP_SET_EXP_FEATURE, 0,
4000					&rp, sizeof(rp));
4001
4002		if (changed)
4003			exp_ll_privacy_feature_changed(val, hdev, sk);
4004
4005		return err;
4006	}
4007
4008	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4009			       MGMT_OP_SET_EXP_FEATURE,
4010			       MGMT_STATUS_NOT_SUPPORTED);
4011}
4012
4013#define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4014
4015static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4016			    u16 data_len)
4017{
4018	struct mgmt_cp_get_device_flags *cp = data;
4019	struct mgmt_rp_get_device_flags rp;
4020	struct bdaddr_list_with_flags *br_params;
4021	struct hci_conn_params *params;
4022	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4023	u32 current_flags = 0;
4024	u8 status = MGMT_STATUS_INVALID_PARAMS;
4025
4026	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4027		   &cp->addr.bdaddr, cp->addr.type);
4028
4029	hci_dev_lock(hdev);
4030
4031	if (cp->addr.type == BDADDR_BREDR) {
4032		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4033							      &cp->addr.bdaddr,
4034							      cp->addr.type);
4035		if (!br_params)
4036			goto done;
4037
4038		current_flags = br_params->current_flags;
4039	} else {
4040		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4041						le_addr_type(cp->addr.type));
4042
4043		if (!params)
4044			goto done;
4045
4046		current_flags = params->current_flags;
4047	}
4048
4049	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4050	rp.addr.type = cp->addr.type;
4051	rp.supported_flags = cpu_to_le32(supported_flags);
4052	rp.current_flags = cpu_to_le32(current_flags);
4053
4054	status = MGMT_STATUS_SUCCESS;
4055
4056done:
4057	hci_dev_unlock(hdev);
4058
4059	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4060				&rp, sizeof(rp));
4061}
4062
4063static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4064				 bdaddr_t *bdaddr, u8 bdaddr_type,
4065				 u32 supported_flags, u32 current_flags)
4066{
4067	struct mgmt_ev_device_flags_changed ev;
4068
4069	bacpy(&ev.addr.bdaddr, bdaddr);
4070	ev.addr.type = bdaddr_type;
4071	ev.supported_flags = cpu_to_le32(supported_flags);
4072	ev.current_flags = cpu_to_le32(current_flags);
4073
4074	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4075}
4076
4077static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4078			    u16 len)
4079{
4080	struct mgmt_cp_set_device_flags *cp = data;
4081	struct bdaddr_list_with_flags *br_params;
4082	struct hci_conn_params *params;
4083	u8 status = MGMT_STATUS_INVALID_PARAMS;
4084	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4085	u32 current_flags = __le32_to_cpu(cp->current_flags);
4086
4087	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4088		   &cp->addr.bdaddr, cp->addr.type,
4089		   __le32_to_cpu(current_flags));
4090
4091	if ((supported_flags | current_flags) != supported_flags) {
4092		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4093			    current_flags, supported_flags);
4094		goto done;
4095	}
4096
4097	hci_dev_lock(hdev);
4098
4099	if (cp->addr.type == BDADDR_BREDR) {
4100		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4101							      &cp->addr.bdaddr,
4102							      cp->addr.type);
4103
4104		if (br_params) {
4105			br_params->current_flags = current_flags;
4106			status = MGMT_STATUS_SUCCESS;
4107		} else {
4108			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4109				    &cp->addr.bdaddr, cp->addr.type);
4110		}
4111	} else {
4112		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4113						le_addr_type(cp->addr.type));
4114		if (params) {
4115			params->current_flags = current_flags;
4116			status = MGMT_STATUS_SUCCESS;
4117		} else {
4118			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4119				    &cp->addr.bdaddr,
4120				    le_addr_type(cp->addr.type));
4121		}
4122	}
4123
4124done:
4125	hci_dev_unlock(hdev);
4126
4127	if (status == MGMT_STATUS_SUCCESS)
4128		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4129				     supported_flags, current_flags);
4130
4131	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4132				 &cp->addr, sizeof(cp->addr));
4133}
4134
4135static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4136				   u16 handle)
4137{
4138	struct mgmt_ev_adv_monitor_added ev;
4139
4140	ev.monitor_handle = cpu_to_le16(handle);
4141
4142	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4143}
4144
4145static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4146				     u16 handle)
4147{
4148	struct mgmt_ev_adv_monitor_added ev;
4149
4150	ev.monitor_handle = cpu_to_le16(handle);
4151
4152	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4153}
4154
4155static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4156				 void *data, u16 len)
4157{
4158	struct adv_monitor *monitor = NULL;
4159	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4160	int handle;
4161	size_t rp_size = 0;
4162	__u32 supported = 0;
4163	__u16 num_handles = 0;
4164	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4165
4166	BT_DBG("request for %s", hdev->name);
4167
4168	hci_dev_lock(hdev);
4169
4170	if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4171		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4172
4173	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4174		handles[num_handles++] = monitor->handle;
4175	}
4176
4177	hci_dev_unlock(hdev);
4178
4179	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4180	rp = kmalloc(rp_size, GFP_KERNEL);
4181	if (!rp)
4182		return -ENOMEM;
4183
4184	/* Once controller-based monitoring is in place, the enabled_features
4185	 * should reflect the use.
4186	 */
4187	rp->supported_features = cpu_to_le32(supported);
4188	rp->enabled_features = 0;
4189	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4190	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4191	rp->num_handles = cpu_to_le16(num_handles);
4192	if (num_handles)
4193		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4194
4195	return mgmt_cmd_complete(sk, hdev->id,
4196				 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4197				 MGMT_STATUS_SUCCESS, rp, rp_size);
4198}
4199
4200static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4201				    void *data, u16 len)
4202{
4203	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4204	struct mgmt_rp_add_adv_patterns_monitor rp;
4205	struct adv_monitor *m = NULL;
4206	struct adv_pattern *p = NULL;
4207	unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4208	__u8 cp_ofst = 0, cp_len = 0;
4209	int err, i;
4210
4211	BT_DBG("request for %s", hdev->name);
4212
4213	if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4214		err = mgmt_cmd_status(sk, hdev->id,
4215				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4216				      MGMT_STATUS_INVALID_PARAMS);
4217		goto failed;
4218	}
4219
4220	m = kmalloc(sizeof(*m), GFP_KERNEL);
4221	if (!m) {
4222		err = -ENOMEM;
4223		goto failed;
4224	}
4225
4226	INIT_LIST_HEAD(&m->patterns);
4227	m->active = false;
4228
4229	for (i = 0; i < cp->pattern_count; i++) {
4230		if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4231			err = mgmt_cmd_status(sk, hdev->id,
4232					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4233					      MGMT_STATUS_INVALID_PARAMS);
4234			goto failed;
4235		}
4236
4237		cp_ofst = cp->patterns[i].offset;
4238		cp_len = cp->patterns[i].length;
4239		if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4240		    cp_len > HCI_MAX_AD_LENGTH ||
4241		    (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4242			err = mgmt_cmd_status(sk, hdev->id,
4243					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4244					      MGMT_STATUS_INVALID_PARAMS);
4245			goto failed;
4246		}
4247
4248		p = kmalloc(sizeof(*p), GFP_KERNEL);
4249		if (!p) {
4250			err = -ENOMEM;
4251			goto failed;
4252		}
4253
4254		p->ad_type = cp->patterns[i].ad_type;
4255		p->offset = cp->patterns[i].offset;
4256		p->length = cp->patterns[i].length;
4257		memcpy(p->value, cp->patterns[i].value, p->length);
4258
4259		INIT_LIST_HEAD(&p->list);
4260		list_add(&p->list, &m->patterns);
4261	}
4262
4263	if (mp_cnt != cp->pattern_count) {
4264		err = mgmt_cmd_status(sk, hdev->id,
4265				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4266				      MGMT_STATUS_INVALID_PARAMS);
4267		goto failed;
4268	}
4269
4270	hci_dev_lock(hdev);
4271
4272	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4273
4274	err = hci_add_adv_monitor(hdev, m);
4275	if (err) {
4276		if (err == -ENOSPC) {
4277			mgmt_cmd_status(sk, hdev->id,
4278					MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4279					MGMT_STATUS_NO_RESOURCES);
4280		}
4281		goto unlock;
4282	}
4283
4284	if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4285		mgmt_adv_monitor_added(sk, hdev, m->handle);
4286
4287	hci_dev_unlock(hdev);
4288
4289	rp.monitor_handle = cpu_to_le16(m->handle);
4290
4291	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4292				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4293
4294unlock:
4295	hci_dev_unlock(hdev);
4296
4297failed:
4298	hci_free_adv_monitor(m);
4299	return err;
4300}
4301
4302static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4303			      void *data, u16 len)
4304{
4305	struct mgmt_cp_remove_adv_monitor *cp = data;
4306	struct mgmt_rp_remove_adv_monitor rp;
4307	unsigned int prev_adv_monitors_cnt;
4308	u16 handle;
4309	int err;
4310
4311	BT_DBG("request for %s", hdev->name);
4312
4313	hci_dev_lock(hdev);
4314
4315	handle = __le16_to_cpu(cp->monitor_handle);
4316	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4317
4318	err = hci_remove_adv_monitor(hdev, handle);
4319	if (err == -ENOENT) {
4320		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4321				      MGMT_STATUS_INVALID_INDEX);
4322		goto unlock;
4323	}
4324
4325	if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4326		mgmt_adv_monitor_removed(sk, hdev, handle);
4327
4328	hci_dev_unlock(hdev);
4329
4330	rp.monitor_handle = cp->monitor_handle;
4331
4332	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4333				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4334
4335unlock:
4336	hci_dev_unlock(hdev);
4337	return err;
4338}
4339
4340static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4341				         u16 opcode, struct sk_buff *skb)
4342{
4343	struct mgmt_rp_read_local_oob_data mgmt_rp;
4344	size_t rp_size = sizeof(mgmt_rp);
4345	struct mgmt_pending_cmd *cmd;
4346
4347	bt_dev_dbg(hdev, "status %u", status);
4348
4349	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4350	if (!cmd)
4351		return;
4352
4353	if (status || !skb) {
4354		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4355				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4356		goto remove;
4357	}
4358
4359	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4360
4361	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4362		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4363
4364		if (skb->len < sizeof(*rp)) {
4365			mgmt_cmd_status(cmd->sk, hdev->id,
4366					MGMT_OP_READ_LOCAL_OOB_DATA,
4367					MGMT_STATUS_FAILED);
4368			goto remove;
4369		}
4370
4371		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4372		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4373
4374		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4375	} else {
4376		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4377
4378		if (skb->len < sizeof(*rp)) {
4379			mgmt_cmd_status(cmd->sk, hdev->id,
4380					MGMT_OP_READ_LOCAL_OOB_DATA,
4381					MGMT_STATUS_FAILED);
4382			goto remove;
4383		}
4384
4385		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4386		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4387
4388		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4389		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4390	}
4391
4392	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4393			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4394
4395remove:
4396	mgmt_pending_remove(cmd);
4397}
4398
4399static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4400			       void *data, u16 data_len)
4401{
4402	struct mgmt_pending_cmd *cmd;
4403	struct hci_request req;
4404	int err;
4405
4406	bt_dev_dbg(hdev, "sock %p", sk);
4407
4408	hci_dev_lock(hdev);
4409
4410	if (!hdev_is_powered(hdev)) {
4411		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4412				      MGMT_STATUS_NOT_POWERED);
4413		goto unlock;
4414	}
4415
4416	if (!lmp_ssp_capable(hdev)) {
4417		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4418				      MGMT_STATUS_NOT_SUPPORTED);
4419		goto unlock;
4420	}
4421
4422	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4423		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4424				      MGMT_STATUS_BUSY);
4425		goto unlock;
4426	}
4427
4428	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4429	if (!cmd) {
4430		err = -ENOMEM;
4431		goto unlock;
4432	}
4433
4434	hci_req_init(&req, hdev);
4435
4436	if (bredr_sc_enabled(hdev))
4437		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4438	else
4439		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4440
4441	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4442	if (err < 0)
4443		mgmt_pending_remove(cmd);
4444
4445unlock:
4446	hci_dev_unlock(hdev);
4447	return err;
4448}
4449
4450static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4451			       void *data, u16 len)
4452{
4453	struct mgmt_addr_info *addr = data;
4454	int err;
4455
4456	bt_dev_dbg(hdev, "sock %p", sk);
4457
4458	if (!bdaddr_type_is_valid(addr->type))
4459		return mgmt_cmd_complete(sk, hdev->id,
4460					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4461					 MGMT_STATUS_INVALID_PARAMS,
4462					 addr, sizeof(*addr));
4463
4464	hci_dev_lock(hdev);
4465
4466	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4467		struct mgmt_cp_add_remote_oob_data *cp = data;
4468		u8 status;
4469
4470		if (cp->addr.type != BDADDR_BREDR) {
4471			err = mgmt_cmd_complete(sk, hdev->id,
4472						MGMT_OP_ADD_REMOTE_OOB_DATA,
4473						MGMT_STATUS_INVALID_PARAMS,
4474						&cp->addr, sizeof(cp->addr));
4475			goto unlock;
4476		}
4477
4478		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4479					      cp->addr.type, cp->hash,
4480					      cp->rand, NULL, NULL);
4481		if (err < 0)
4482			status = MGMT_STATUS_FAILED;
4483		else
4484			status = MGMT_STATUS_SUCCESS;
4485
4486		err = mgmt_cmd_complete(sk, hdev->id,
4487					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4488					&cp->addr, sizeof(cp->addr));
4489	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4490		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4491		u8 *rand192, *hash192, *rand256, *hash256;
4492		u8 status;
4493
4494		if (bdaddr_type_is_le(cp->addr.type)) {
4495			/* Enforce zero-valued 192-bit parameters as
4496			 * long as legacy SMP OOB isn't implemented.
4497			 */
4498			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4499			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4500				err = mgmt_cmd_complete(sk, hdev->id,
4501							MGMT_OP_ADD_REMOTE_OOB_DATA,
4502							MGMT_STATUS_INVALID_PARAMS,
4503							addr, sizeof(*addr));
4504				goto unlock;
4505			}
4506
4507			rand192 = NULL;
4508			hash192 = NULL;
4509		} else {
4510			/* In case one of the P-192 values is set to zero,
4511			 * then just disable OOB data for P-192.
4512			 */
4513			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4514			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4515				rand192 = NULL;
4516				hash192 = NULL;
4517			} else {
4518				rand192 = cp->rand192;
4519				hash192 = cp->hash192;
4520			}
4521		}
4522
4523		/* In case one of the P-256 values is set to zero, then just
4524		 * disable OOB data for P-256.
4525		 */
4526		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4527		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4528			rand256 = NULL;
4529			hash256 = NULL;
4530		} else {
4531			rand256 = cp->rand256;
4532			hash256 = cp->hash256;
4533		}
4534
4535		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4536					      cp->addr.type, hash192, rand192,
4537					      hash256, rand256);
4538		if (err < 0)
4539			status = MGMT_STATUS_FAILED;
4540		else
4541			status = MGMT_STATUS_SUCCESS;
4542
4543		err = mgmt_cmd_complete(sk, hdev->id,
4544					MGMT_OP_ADD_REMOTE_OOB_DATA,
4545					status, &cp->addr, sizeof(cp->addr));
4546	} else {
4547		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4548			   len);
4549		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4550				      MGMT_STATUS_INVALID_PARAMS);
4551	}
4552
4553unlock:
4554	hci_dev_unlock(hdev);
4555	return err;
4556}
4557
4558static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4559				  void *data, u16 len)
4560{
4561	struct mgmt_cp_remove_remote_oob_data *cp = data;
4562	u8 status;
4563	int err;
4564
4565	bt_dev_dbg(hdev, "sock %p", sk);
4566
4567	if (cp->addr.type != BDADDR_BREDR)
4568		return mgmt_cmd_complete(sk, hdev->id,
4569					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4570					 MGMT_STATUS_INVALID_PARAMS,
4571					 &cp->addr, sizeof(cp->addr));
4572
4573	hci_dev_lock(hdev);
4574
4575	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4576		hci_remote_oob_data_clear(hdev);
4577		status = MGMT_STATUS_SUCCESS;
4578		goto done;
4579	}
4580
4581	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4582	if (err < 0)
4583		status = MGMT_STATUS_INVALID_PARAMS;
4584	else
4585		status = MGMT_STATUS_SUCCESS;
4586
4587done:
4588	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4589				status, &cp->addr, sizeof(cp->addr));
4590
4591	hci_dev_unlock(hdev);
4592	return err;
4593}
4594
4595void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4596{
4597	struct mgmt_pending_cmd *cmd;
4598
4599	bt_dev_dbg(hdev, "status %d", status);
4600
4601	hci_dev_lock(hdev);
4602
4603	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4604	if (!cmd)
4605		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4606
4607	if (!cmd)
4608		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4609
4610	if (cmd) {
4611		cmd->cmd_complete(cmd, mgmt_status(status));
4612		mgmt_pending_remove(cmd);
4613	}
4614
4615	hci_dev_unlock(hdev);
4616
4617	/* Handle suspend notifier */
4618	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4619			       hdev->suspend_tasks)) {
4620		bt_dev_dbg(hdev, "Unpaused discovery");
4621		wake_up(&hdev->suspend_wait_q);
4622	}
4623}
4624
4625static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4626				    uint8_t *mgmt_status)
4627{
4628	switch (type) {
4629	case DISCOV_TYPE_LE:
4630		*mgmt_status = mgmt_le_support(hdev);
4631		if (*mgmt_status)
4632			return false;
4633		break;
4634	case DISCOV_TYPE_INTERLEAVED:
4635		*mgmt_status = mgmt_le_support(hdev);
4636		if (*mgmt_status)
4637			return false;
4638		fallthrough;
4639	case DISCOV_TYPE_BREDR:
4640		*mgmt_status = mgmt_bredr_support(hdev);
4641		if (*mgmt_status)
4642			return false;
4643		break;
4644	default:
4645		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4646		return false;
4647	}
4648
4649	return true;
4650}
4651
4652static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4653				    u16 op, void *data, u16 len)
4654{
4655	struct mgmt_cp_start_discovery *cp = data;
4656	struct mgmt_pending_cmd *cmd;
4657	u8 status;
4658	int err;
4659
4660	bt_dev_dbg(hdev, "sock %p", sk);
4661
4662	hci_dev_lock(hdev);
4663
4664	if (!hdev_is_powered(hdev)) {
4665		err = mgmt_cmd_complete(sk, hdev->id, op,
4666					MGMT_STATUS_NOT_POWERED,
4667					&cp->type, sizeof(cp->type));
4668		goto failed;
4669	}
4670
4671	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4672	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4673		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4674					&cp->type, sizeof(cp->type));
4675		goto failed;
4676	}
4677
4678	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4679		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4680					&cp->type, sizeof(cp->type));
4681		goto failed;
4682	}
4683
4684	/* Can't start discovery when it is paused */
4685	if (hdev->discovery_paused) {
4686		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4687					&cp->type, sizeof(cp->type));
4688		goto failed;
4689	}
4690
4691	/* Clear the discovery filter first to free any previously
4692	 * allocated memory for the UUID list.
4693	 */
4694	hci_discovery_filter_clear(hdev);
4695
4696	hdev->discovery.type = cp->type;
4697	hdev->discovery.report_invalid_rssi = false;
4698	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4699		hdev->discovery.limited = true;
4700	else
4701		hdev->discovery.limited = false;
4702
4703	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4704	if (!cmd) {
4705		err = -ENOMEM;
4706		goto failed;
4707	}
4708
4709	cmd->cmd_complete = generic_cmd_complete;
4710
4711	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4712	queue_work(hdev->req_workqueue, &hdev->discov_update);
4713	err = 0;
4714
4715failed:
4716	hci_dev_unlock(hdev);
4717	return err;
4718}
4719
4720static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4721			   void *data, u16 len)
4722{
4723	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4724					data, len);
4725}
4726
4727static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4728				   void *data, u16 len)
4729{
4730	return start_discovery_internal(sk, hdev,
4731					MGMT_OP_START_LIMITED_DISCOVERY,
4732					data, len);
4733}
4734
4735static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4736					  u8 status)
4737{
4738	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4739				 cmd->param, 1);
4740}
4741
4742static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4743				   void *data, u16 len)
4744{
4745	struct mgmt_cp_start_service_discovery *cp = data;
4746	struct mgmt_pending_cmd *cmd;
4747	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4748	u16 uuid_count, expected_len;
4749	u8 status;
4750	int err;
4751
4752	bt_dev_dbg(hdev, "sock %p", sk);
4753
4754	hci_dev_lock(hdev);
4755
4756	if (!hdev_is_powered(hdev)) {
4757		err = mgmt_cmd_complete(sk, hdev->id,
4758					MGMT_OP_START_SERVICE_DISCOVERY,
4759					MGMT_STATUS_NOT_POWERED,
4760					&cp->type, sizeof(cp->type));
4761		goto failed;
4762	}
4763
4764	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4765	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4766		err = mgmt_cmd_complete(sk, hdev->id,
4767					MGMT_OP_START_SERVICE_DISCOVERY,
4768					MGMT_STATUS_BUSY, &cp->type,
4769					sizeof(cp->type));
4770		goto failed;
4771	}
4772
4773	uuid_count = __le16_to_cpu(cp->uuid_count);
4774	if (uuid_count > max_uuid_count) {
4775		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4776			   uuid_count);
4777		err = mgmt_cmd_complete(sk, hdev->id,
4778					MGMT_OP_START_SERVICE_DISCOVERY,
4779					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4780					sizeof(cp->type));
4781		goto failed;
4782	}
4783
4784	expected_len = sizeof(*cp) + uuid_count * 16;
4785	if (expected_len != len) {
4786		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4787			   expected_len, len);
4788		err = mgmt_cmd_complete(sk, hdev->id,
4789					MGMT_OP_START_SERVICE_DISCOVERY,
4790					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4791					sizeof(cp->type));
4792		goto failed;
4793	}
4794
4795	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4796		err = mgmt_cmd_complete(sk, hdev->id,
4797					MGMT_OP_START_SERVICE_DISCOVERY,
4798					status, &cp->type, sizeof(cp->type));
4799		goto failed;
4800	}
4801
4802	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4803			       hdev, data, len);
4804	if (!cmd) {
4805		err = -ENOMEM;
4806		goto failed;
4807	}
4808
4809	cmd->cmd_complete = service_discovery_cmd_complete;
4810
4811	/* Clear the discovery filter first to free any previously
4812	 * allocated memory for the UUID list.
4813	 */
4814	hci_discovery_filter_clear(hdev);
4815
4816	hdev->discovery.result_filtering = true;
4817	hdev->discovery.type = cp->type;
4818	hdev->discovery.rssi = cp->rssi;
4819	hdev->discovery.uuid_count = uuid_count;
4820
4821	if (uuid_count > 0) {
4822		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4823						GFP_KERNEL);
4824		if (!hdev->discovery.uuids) {
4825			err = mgmt_cmd_complete(sk, hdev->id,
4826						MGMT_OP_START_SERVICE_DISCOVERY,
4827						MGMT_STATUS_FAILED,
4828						&cp->type, sizeof(cp->type));
4829			mgmt_pending_remove(cmd);
4830			goto failed;
4831		}
4832	}
4833
4834	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4835	queue_work(hdev->req_workqueue, &hdev->discov_update);
4836	err = 0;
4837
4838failed:
4839	hci_dev_unlock(hdev);
4840	return err;
4841}
4842
4843void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4844{
4845	struct mgmt_pending_cmd *cmd;
4846
4847	bt_dev_dbg(hdev, "status %d", status);
4848
4849	hci_dev_lock(hdev);
4850
4851	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4852	if (cmd) {
4853		cmd->cmd_complete(cmd, mgmt_status(status));
4854		mgmt_pending_remove(cmd);
4855	}
4856
4857	hci_dev_unlock(hdev);
4858
4859	/* Handle suspend notifier */
4860	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4861		bt_dev_dbg(hdev, "Paused discovery");
4862		wake_up(&hdev->suspend_wait_q);
4863	}
4864}
4865
4866static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4867			  u16 len)
4868{
4869	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4870	struct mgmt_pending_cmd *cmd;
4871	int err;
4872
4873	bt_dev_dbg(hdev, "sock %p", sk);
4874
4875	hci_dev_lock(hdev);
4876
4877	if (!hci_discovery_active(hdev)) {
4878		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4879					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4880					sizeof(mgmt_cp->type));
4881		goto unlock;
4882	}
4883
4884	if (hdev->discovery.type != mgmt_cp->type) {
4885		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4886					MGMT_STATUS_INVALID_PARAMS,
4887					&mgmt_cp->type, sizeof(mgmt_cp->type));
4888		goto unlock;
4889	}
4890
4891	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4892	if (!cmd) {
4893		err = -ENOMEM;
4894		goto unlock;
4895	}
4896
4897	cmd->cmd_complete = generic_cmd_complete;
4898
4899	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4900	queue_work(hdev->req_workqueue, &hdev->discov_update);
4901	err = 0;
4902
4903unlock:
4904	hci_dev_unlock(hdev);
4905	return err;
4906}
4907
4908static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4909			u16 len)
4910{
4911	struct mgmt_cp_confirm_name *cp = data;
4912	struct inquiry_entry *e;
4913	int err;
4914
4915	bt_dev_dbg(hdev, "sock %p", sk);
4916
4917	hci_dev_lock(hdev);
4918
4919	if (!hci_discovery_active(hdev)) {
4920		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4921					MGMT_STATUS_FAILED, &cp->addr,
4922					sizeof(cp->addr));
4923		goto failed;
4924	}
4925
4926	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4927	if (!e) {
4928		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4929					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4930					sizeof(cp->addr));
4931		goto failed;
4932	}
4933
4934	if (cp->name_known) {
4935		e->name_state = NAME_KNOWN;
4936		list_del(&e->list);
4937	} else {
4938		e->name_state = NAME_NEEDED;
4939		hci_inquiry_cache_update_resolve(hdev, e);
4940	}
4941
4942	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4943				&cp->addr, sizeof(cp->addr));
4944
4945failed:
4946	hci_dev_unlock(hdev);
4947	return err;
4948}
4949
4950static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4951			u16 len)
4952{
4953	struct mgmt_cp_block_device *cp = data;
4954	u8 status;
4955	int err;
4956
4957	bt_dev_dbg(hdev, "sock %p", sk);
4958
4959	if (!bdaddr_type_is_valid(cp->addr.type))
4960		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4961					 MGMT_STATUS_INVALID_PARAMS,
4962					 &cp->addr, sizeof(cp->addr));
4963
4964	hci_dev_lock(hdev);
4965
4966	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4967				  cp->addr.type);
4968	if (err < 0) {
4969		status = MGMT_STATUS_FAILED;
4970		goto done;
4971	}
4972
4973	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4974		   sk);
4975	status = MGMT_STATUS_SUCCESS;
4976
4977done:
4978	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4979				&cp->addr, sizeof(cp->addr));
4980
4981	hci_dev_unlock(hdev);
4982
4983	return err;
4984}
4985
4986static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4987			  u16 len)
4988{
4989	struct mgmt_cp_unblock_device *cp = data;
4990	u8 status;
4991	int err;
4992
4993	bt_dev_dbg(hdev, "sock %p", sk);
4994
4995	if (!bdaddr_type_is_valid(cp->addr.type))
4996		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4997					 MGMT_STATUS_INVALID_PARAMS,
4998					 &cp->addr, sizeof(cp->addr));
4999
5000	hci_dev_lock(hdev);
5001
5002	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5003				  cp->addr.type);
5004	if (err < 0) {
5005		status = MGMT_STATUS_INVALID_PARAMS;
5006		goto done;
5007	}
5008
5009	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5010		   sk);
5011	status = MGMT_STATUS_SUCCESS;
5012
5013done:
5014	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5015				&cp->addr, sizeof(cp->addr));
5016
5017	hci_dev_unlock(hdev);
5018
5019	return err;
5020}
5021
5022static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5023			 u16 len)
5024{
5025	struct mgmt_cp_set_device_id *cp = data;
5026	struct hci_request req;
5027	int err;
5028	__u16 source;
5029
5030	bt_dev_dbg(hdev, "sock %p", sk);
5031
5032	source = __le16_to_cpu(cp->source);
5033
5034	if (source > 0x0002)
5035		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5036				       MGMT_STATUS_INVALID_PARAMS);
5037
5038	hci_dev_lock(hdev);
5039
5040	hdev->devid_source = source;
5041	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5042	hdev->devid_product = __le16_to_cpu(cp->product);
5043	hdev->devid_version = __le16_to_cpu(cp->version);
5044
5045	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5046				NULL, 0);
5047
5048	hci_req_init(&req, hdev);
5049	__hci_req_update_eir(&req);
5050	hci_req_run(&req, NULL);
5051
5052	hci_dev_unlock(hdev);
5053
5054	return err;
5055}
5056
5057static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5058					u16 opcode)
5059{
5060	bt_dev_dbg(hdev, "status %d", status);
5061}
5062
5063static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5064				     u16 opcode)
5065{
5066	struct cmd_lookup match = { NULL, hdev };
5067	struct hci_request req;
5068	u8 instance;
5069	struct adv_info *adv_instance;
5070	int err;
5071
5072	hci_dev_lock(hdev);
5073
5074	if (status) {
5075		u8 mgmt_err = mgmt_status(status);
5076
5077		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5078				     cmd_status_rsp, &mgmt_err);
5079		goto unlock;
5080	}
5081
5082	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5083		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5084	else
5085		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5086
5087	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5088			     &match);
5089
5090	new_settings(hdev, match.sk);
5091
5092	if (match.sk)
5093		sock_put(match.sk);
5094
5095	/* Handle suspend notifier */
5096	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5097			       hdev->suspend_tasks)) {
5098		bt_dev_dbg(hdev, "Paused advertising");
5099		wake_up(&hdev->suspend_wait_q);
5100	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5101				      hdev->suspend_tasks)) {
5102		bt_dev_dbg(hdev, "Unpaused advertising");
5103		wake_up(&hdev->suspend_wait_q);
5104	}
5105
5106	/* If "Set Advertising" was just disabled and instance advertising was
5107	 * set up earlier, then re-enable multi-instance advertising.
5108	 */
5109	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5110	    list_empty(&hdev->adv_instances))
5111		goto unlock;
5112
5113	instance = hdev->cur_adv_instance;
5114	if (!instance) {
5115		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5116							struct adv_info, list);
5117		if (!adv_instance)
5118			goto unlock;
5119
5120		instance = adv_instance->instance;
5121	}
5122
5123	hci_req_init(&req, hdev);
5124
5125	err = __hci_req_schedule_adv_instance(&req, instance, true);
5126
5127	if (!err)
5128		err = hci_req_run(&req, enable_advertising_instance);
5129
5130	if (err)
5131		bt_dev_err(hdev, "failed to re-configure advertising");
5132
5133unlock:
5134	hci_dev_unlock(hdev);
5135}
5136
5137static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5138			   u16 len)
5139{
5140	struct mgmt_mode *cp = data;
5141	struct mgmt_pending_cmd *cmd;
5142	struct hci_request req;
5143	u8 val, status;
5144	int err;
5145
5146	bt_dev_dbg(hdev, "sock %p", sk);
5147
5148	status = mgmt_le_support(hdev);
5149	if (status)
5150		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5151				       status);
5152
5153	/* Enabling the experimental LL Privay support disables support for
5154	 * advertising.
5155	 */
5156	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5157		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5158				       MGMT_STATUS_NOT_SUPPORTED);
5159
5160	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5161		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5162				       MGMT_STATUS_INVALID_PARAMS);
5163
5164	if (hdev->advertising_paused)
5165		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5166				       MGMT_STATUS_BUSY);
5167
5168	hci_dev_lock(hdev);
5169
5170	val = !!cp->val;
5171
5172	/* The following conditions are ones which mean that we should
5173	 * not do any HCI communication but directly send a mgmt
5174	 * response to user space (after toggling the flag if
5175	 * necessary).
5176	 */
5177	if (!hdev_is_powered(hdev) ||
5178	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5179	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5180	    hci_conn_num(hdev, LE_LINK) > 0 ||
5181	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5182	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5183		bool changed;
5184
5185		if (cp->val) {
5186			hdev->cur_adv_instance = 0x00;
5187			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5188			if (cp->val == 0x02)
5189				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5190			else
5191				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5192		} else {
5193			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5194			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5195		}
5196
5197		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5198		if (err < 0)
5199			goto unlock;
5200
5201		if (changed)
5202			err = new_settings(hdev, sk);
5203
5204		goto unlock;
5205	}
5206
5207	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5208	    pending_find(MGMT_OP_SET_LE, hdev)) {
5209		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5210				      MGMT_STATUS_BUSY);
5211		goto unlock;
5212	}
5213
5214	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5215	if (!cmd) {
5216		err = -ENOMEM;
5217		goto unlock;
5218	}
5219
5220	hci_req_init(&req, hdev);
5221
5222	if (cp->val == 0x02)
5223		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5224	else
5225		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5226
5227	cancel_adv_timeout(hdev);
5228
5229	if (val) {
5230		/* Switch to instance "0" for the Set Advertising setting.
5231		 * We cannot use update_[adv|scan_rsp]_data() here as the
5232		 * HCI_ADVERTISING flag is not yet set.
5233		 */
5234		hdev->cur_adv_instance = 0x00;
5235
5236		if (ext_adv_capable(hdev)) {
5237			__hci_req_start_ext_adv(&req, 0x00);
5238		} else {
5239			__hci_req_update_adv_data(&req, 0x00);
5240			__hci_req_update_scan_rsp_data(&req, 0x00);
5241			__hci_req_enable_advertising(&req);
5242		}
5243	} else {
5244		__hci_req_disable_advertising(&req);
5245	}
5246
5247	err = hci_req_run(&req, set_advertising_complete);
5248	if (err < 0)
5249		mgmt_pending_remove(cmd);
5250
5251unlock:
5252	hci_dev_unlock(hdev);
5253	return err;
5254}
5255
5256static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5257			      void *data, u16 len)
5258{
5259	struct mgmt_cp_set_static_address *cp = data;
5260	int err;
5261
5262	bt_dev_dbg(hdev, "sock %p", sk);
5263
5264	if (!lmp_le_capable(hdev))
5265		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5266				       MGMT_STATUS_NOT_SUPPORTED);
5267
5268	if (hdev_is_powered(hdev))
5269		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5270				       MGMT_STATUS_REJECTED);
5271
5272	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5273		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5274			return mgmt_cmd_status(sk, hdev->id,
5275					       MGMT_OP_SET_STATIC_ADDRESS,
5276					       MGMT_STATUS_INVALID_PARAMS);
5277
5278		/* Two most significant bits shall be set */
5279		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5280			return mgmt_cmd_status(sk, hdev->id,
5281					       MGMT_OP_SET_STATIC_ADDRESS,
5282					       MGMT_STATUS_INVALID_PARAMS);
5283	}
5284
5285	hci_dev_lock(hdev);
5286
5287	bacpy(&hdev->static_addr, &cp->bdaddr);
5288
5289	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5290	if (err < 0)
5291		goto unlock;
5292
5293	err = new_settings(hdev, sk);
5294
5295unlock:
5296	hci_dev_unlock(hdev);
5297	return err;
5298}
5299
5300static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5301			   void *data, u16 len)
5302{
5303	struct mgmt_cp_set_scan_params *cp = data;
5304	__u16 interval, window;
5305	int err;
5306
5307	bt_dev_dbg(hdev, "sock %p", sk);
5308
5309	if (!lmp_le_capable(hdev))
5310		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5311				       MGMT_STATUS_NOT_SUPPORTED);
5312
5313	interval = __le16_to_cpu(cp->interval);
5314
5315	if (interval < 0x0004 || interval > 0x4000)
5316		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5317				       MGMT_STATUS_INVALID_PARAMS);
5318
5319	window = __le16_to_cpu(cp->window);
5320
5321	if (window < 0x0004 || window > 0x4000)
5322		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5323				       MGMT_STATUS_INVALID_PARAMS);
5324
5325	if (window > interval)
5326		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5327				       MGMT_STATUS_INVALID_PARAMS);
5328
5329	hci_dev_lock(hdev);
5330
5331	hdev->le_scan_interval = interval;
5332	hdev->le_scan_window = window;
5333
5334	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5335				NULL, 0);
5336
5337	/* If background scan is running, restart it so new parameters are
5338	 * loaded.
5339	 */
5340	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5341	    hdev->discovery.state == DISCOVERY_STOPPED) {
5342		struct hci_request req;
5343
5344		hci_req_init(&req, hdev);
5345
5346		hci_req_add_le_scan_disable(&req, false);
5347		hci_req_add_le_passive_scan(&req);
5348
5349		hci_req_run(&req, NULL);
5350	}
5351
5352	hci_dev_unlock(hdev);
5353
5354	return err;
5355}
5356
5357static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5358				      u16 opcode)
5359{
5360	struct mgmt_pending_cmd *cmd;
5361
5362	bt_dev_dbg(hdev, "status 0x%02x", status);
5363
5364	hci_dev_lock(hdev);
5365
5366	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5367	if (!cmd)
5368		goto unlock;
5369
5370	if (status) {
5371		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5372			        mgmt_status(status));
5373	} else {
5374		struct mgmt_mode *cp = cmd->param;
5375
5376		if (cp->val)
5377			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5378		else
5379			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5380
5381		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5382		new_settings(hdev, cmd->sk);
5383	}
5384
5385	mgmt_pending_remove(cmd);
5386
5387unlock:
5388	hci_dev_unlock(hdev);
5389}
5390
5391static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5392				void *data, u16 len)
5393{
5394	struct mgmt_mode *cp = data;
5395	struct mgmt_pending_cmd *cmd;
5396	struct hci_request req;
5397	int err;
5398
5399	bt_dev_dbg(hdev, "sock %p", sk);
5400
5401	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5402	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5403		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5404				       MGMT_STATUS_NOT_SUPPORTED);
5405
5406	if (cp->val != 0x00 && cp->val != 0x01)
5407		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5408				       MGMT_STATUS_INVALID_PARAMS);
5409
5410	hci_dev_lock(hdev);
5411
5412	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5413		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5414				      MGMT_STATUS_BUSY);
5415		goto unlock;
5416	}
5417
5418	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5419		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5420					hdev);
5421		goto unlock;
5422	}
5423
5424	if (!hdev_is_powered(hdev)) {
5425		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5426		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5427					hdev);
5428		new_settings(hdev, sk);
5429		goto unlock;
5430	}
5431
5432	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5433			       data, len);
5434	if (!cmd) {
5435		err = -ENOMEM;
5436		goto unlock;
5437	}
5438
5439	hci_req_init(&req, hdev);
5440
5441	__hci_req_write_fast_connectable(&req, cp->val);
5442
5443	err = hci_req_run(&req, fast_connectable_complete);
5444	if (err < 0) {
5445		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5446				      MGMT_STATUS_FAILED);
5447		mgmt_pending_remove(cmd);
5448	}
5449
5450unlock:
5451	hci_dev_unlock(hdev);
5452
5453	return err;
5454}
5455
5456static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5457{
5458	struct mgmt_pending_cmd *cmd;
5459
5460	bt_dev_dbg(hdev, "status 0x%02x", status);
5461
5462	hci_dev_lock(hdev);
5463
5464	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5465	if (!cmd)
5466		goto unlock;
5467
5468	if (status) {
5469		u8 mgmt_err = mgmt_status(status);
5470
5471		/* We need to restore the flag if related HCI commands
5472		 * failed.
5473		 */
5474		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5475
5476		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5477	} else {
5478		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5479		new_settings(hdev, cmd->sk);
5480	}
5481
5482	mgmt_pending_remove(cmd);
5483
5484unlock:
5485	hci_dev_unlock(hdev);
5486}
5487
5488static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5489{
5490	struct mgmt_mode *cp = data;
5491	struct mgmt_pending_cmd *cmd;
5492	struct hci_request req;
5493	int err;
5494
5495	bt_dev_dbg(hdev, "sock %p", sk);
5496
5497	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5498		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5499				       MGMT_STATUS_NOT_SUPPORTED);
5500
5501	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5502		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5503				       MGMT_STATUS_REJECTED);
5504
5505	if (cp->val != 0x00 && cp->val != 0x01)
5506		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5507				       MGMT_STATUS_INVALID_PARAMS);
5508
5509	hci_dev_lock(hdev);
5510
5511	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5512		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5513		goto unlock;
5514	}
5515
5516	if (!hdev_is_powered(hdev)) {
5517		if (!cp->val) {
5518			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5519			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5520			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5521			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5522			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5523		}
5524
5525		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5526
5527		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5528		if (err < 0)
5529			goto unlock;
5530
5531		err = new_settings(hdev, sk);
5532		goto unlock;
5533	}
5534
5535	/* Reject disabling when powered on */
5536	if (!cp->val) {
5537		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5538				      MGMT_STATUS_REJECTED);
5539		goto unlock;
5540	} else {
5541		/* When configuring a dual-mode controller to operate
5542		 * with LE only and using a static address, then switching
5543		 * BR/EDR back on is not allowed.
5544		 *
5545		 * Dual-mode controllers shall operate with the public
5546		 * address as its identity address for BR/EDR and LE. So
5547		 * reject the attempt to create an invalid configuration.
5548		 *
5549		 * The same restrictions applies when secure connections
5550		 * has been enabled. For BR/EDR this is a controller feature
5551		 * while for LE it is a host stack feature. This means that
5552		 * switching BR/EDR back on when secure connections has been
5553		 * enabled is not a supported transaction.
5554		 */
5555		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5556		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5557		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5558			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5559					      MGMT_STATUS_REJECTED);
5560			goto unlock;
5561		}
5562	}
5563
5564	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5565		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5566				      MGMT_STATUS_BUSY);
5567		goto unlock;
5568	}
5569
5570	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5571	if (!cmd) {
5572		err = -ENOMEM;
5573		goto unlock;
5574	}
5575
5576	/* We need to flip the bit already here so that
5577	 * hci_req_update_adv_data generates the correct flags.
5578	 */
5579	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5580
5581	hci_req_init(&req, hdev);
5582
5583	__hci_req_write_fast_connectable(&req, false);
5584	__hci_req_update_scan(&req);
5585
5586	/* Since only the advertising data flags will change, there
5587	 * is no need to update the scan response data.
5588	 */
5589	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5590
5591	err = hci_req_run(&req, set_bredr_complete);
5592	if (err < 0)
5593		mgmt_pending_remove(cmd);
5594
5595unlock:
5596	hci_dev_unlock(hdev);
5597	return err;
5598}
5599
5600static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5601{
5602	struct mgmt_pending_cmd *cmd;
5603	struct mgmt_mode *cp;
5604
5605	bt_dev_dbg(hdev, "status %u", status);
5606
5607	hci_dev_lock(hdev);
5608
5609	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5610	if (!cmd)
5611		goto unlock;
5612
5613	if (status) {
5614		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5615			        mgmt_status(status));
5616		goto remove;
5617	}
5618
5619	cp = cmd->param;
5620
5621	switch (cp->val) {
5622	case 0x00:
5623		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5624		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5625		break;
5626	case 0x01:
5627		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5628		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5629		break;
5630	case 0x02:
5631		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5632		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5633		break;
5634	}
5635
5636	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5637	new_settings(hdev, cmd->sk);
5638
5639remove:
5640	mgmt_pending_remove(cmd);
5641unlock:
5642	hci_dev_unlock(hdev);
5643}
5644
5645static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5646			   void *data, u16 len)
5647{
5648	struct mgmt_mode *cp = data;
5649	struct mgmt_pending_cmd *cmd;
5650	struct hci_request req;
5651	u8 val;
5652	int err;
5653
5654	bt_dev_dbg(hdev, "sock %p", sk);
5655
5656	if (!lmp_sc_capable(hdev) &&
5657	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5658		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5659				       MGMT_STATUS_NOT_SUPPORTED);
5660
5661	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5662	    lmp_sc_capable(hdev) &&
5663	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5664		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5665				       MGMT_STATUS_REJECTED);
5666
5667	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5668		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5669				  MGMT_STATUS_INVALID_PARAMS);
5670
5671	hci_dev_lock(hdev);
5672
5673	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5674	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5675		bool changed;
5676
5677		if (cp->val) {
5678			changed = !hci_dev_test_and_set_flag(hdev,
5679							     HCI_SC_ENABLED);
5680			if (cp->val == 0x02)
5681				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5682			else
5683				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5684		} else {
5685			changed = hci_dev_test_and_clear_flag(hdev,
5686							      HCI_SC_ENABLED);
5687			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5688		}
5689
5690		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5691		if (err < 0)
5692			goto failed;
5693
5694		if (changed)
5695			err = new_settings(hdev, sk);
5696
5697		goto failed;
5698	}
5699
5700	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5701		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5702				      MGMT_STATUS_BUSY);
5703		goto failed;
5704	}
5705
5706	val = !!cp->val;
5707
5708	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5709	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5710		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5711		goto failed;
5712	}
5713
5714	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5715	if (!cmd) {
5716		err = -ENOMEM;
5717		goto failed;
5718	}
5719
5720	hci_req_init(&req, hdev);
5721	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5722	err = hci_req_run(&req, sc_enable_complete);
5723	if (err < 0) {
5724		mgmt_pending_remove(cmd);
5725		goto failed;
5726	}
5727
5728failed:
5729	hci_dev_unlock(hdev);
5730	return err;
5731}
5732
5733static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5734			  void *data, u16 len)
5735{
5736	struct mgmt_mode *cp = data;
5737	bool changed, use_changed;
5738	int err;
5739
5740	bt_dev_dbg(hdev, "sock %p", sk);
5741
5742	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5743		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5744				       MGMT_STATUS_INVALID_PARAMS);
5745
5746	hci_dev_lock(hdev);
5747
5748	if (cp->val)
5749		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5750	else
5751		changed = hci_dev_test_and_clear_flag(hdev,
5752						      HCI_KEEP_DEBUG_KEYS);
5753
5754	if (cp->val == 0x02)
5755		use_changed = !hci_dev_test_and_set_flag(hdev,
5756							 HCI_USE_DEBUG_KEYS);
5757	else
5758		use_changed = hci_dev_test_and_clear_flag(hdev,
5759							  HCI_USE_DEBUG_KEYS);
5760
5761	if (hdev_is_powered(hdev) && use_changed &&
5762	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5763		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5764		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5765			     sizeof(mode), &mode);
5766	}
5767
5768	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5769	if (err < 0)
5770		goto unlock;
5771
5772	if (changed)
5773		err = new_settings(hdev, sk);
5774
5775unlock:
5776	hci_dev_unlock(hdev);
5777	return err;
5778}
5779
5780static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5781		       u16 len)
5782{
5783	struct mgmt_cp_set_privacy *cp = cp_data;
5784	bool changed;
5785	int err;
5786
5787	bt_dev_dbg(hdev, "sock %p", sk);
5788
5789	if (!lmp_le_capable(hdev))
5790		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5791				       MGMT_STATUS_NOT_SUPPORTED);
5792
5793	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5794		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5795				       MGMT_STATUS_INVALID_PARAMS);
5796
5797	if (hdev_is_powered(hdev))
5798		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5799				       MGMT_STATUS_REJECTED);
5800
5801	hci_dev_lock(hdev);
5802
5803	/* If user space supports this command it is also expected to
5804	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5805	 */
5806	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5807
5808	if (cp->privacy) {
5809		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5810		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5811		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5812		hci_adv_instances_set_rpa_expired(hdev, true);
5813		if (cp->privacy == 0x02)
5814			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5815		else
5816			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5817	} else {
5818		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5819		memset(hdev->irk, 0, sizeof(hdev->irk));
5820		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5821		hci_adv_instances_set_rpa_expired(hdev, false);
5822		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5823	}
5824
5825	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5826	if (err < 0)
5827		goto unlock;
5828
5829	if (changed)
5830		err = new_settings(hdev, sk);
5831
5832unlock:
5833	hci_dev_unlock(hdev);
5834	return err;
5835}
5836
5837static bool irk_is_valid(struct mgmt_irk_info *irk)
5838{
5839	switch (irk->addr.type) {
5840	case BDADDR_LE_PUBLIC:
5841		return true;
5842
5843	case BDADDR_LE_RANDOM:
5844		/* Two most significant bits shall be set */
5845		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5846			return false;
5847		return true;
5848	}
5849
5850	return false;
5851}
5852
5853static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5854		     u16 len)
5855{
5856	struct mgmt_cp_load_irks *cp = cp_data;
5857	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5858				   sizeof(struct mgmt_irk_info));
5859	u16 irk_count, expected_len;
5860	int i, err;
5861
5862	bt_dev_dbg(hdev, "sock %p", sk);
5863
5864	if (!lmp_le_capable(hdev))
5865		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5866				       MGMT_STATUS_NOT_SUPPORTED);
5867
5868	irk_count = __le16_to_cpu(cp->irk_count);
5869	if (irk_count > max_irk_count) {
5870		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5871			   irk_count);
5872		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5873				       MGMT_STATUS_INVALID_PARAMS);
5874	}
5875
5876	expected_len = struct_size(cp, irks, irk_count);
5877	if (expected_len != len) {
5878		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5879			   expected_len, len);
5880		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5881				       MGMT_STATUS_INVALID_PARAMS);
5882	}
5883
5884	bt_dev_dbg(hdev, "irk_count %u", irk_count);
5885
5886	for (i = 0; i < irk_count; i++) {
5887		struct mgmt_irk_info *key = &cp->irks[i];
5888
5889		if (!irk_is_valid(key))
5890			return mgmt_cmd_status(sk, hdev->id,
5891					       MGMT_OP_LOAD_IRKS,
5892					       MGMT_STATUS_INVALID_PARAMS);
5893	}
5894
5895	hci_dev_lock(hdev);
5896
5897	hci_smp_irks_clear(hdev);
5898
5899	for (i = 0; i < irk_count; i++) {
5900		struct mgmt_irk_info *irk = &cp->irks[i];
5901
5902		if (hci_is_blocked_key(hdev,
5903				       HCI_BLOCKED_KEY_TYPE_IRK,
5904				       irk->val)) {
5905			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5906				    &irk->addr.bdaddr);
5907			continue;
5908		}
5909
5910		hci_add_irk(hdev, &irk->addr.bdaddr,
5911			    le_addr_type(irk->addr.type), irk->val,
5912			    BDADDR_ANY);
5913	}
5914
5915	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5916
5917	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5918
5919	hci_dev_unlock(hdev);
5920
5921	return err;
5922}
5923
5924static bool ltk_is_valid(struct mgmt_ltk_info *key)
5925{
5926	if (key->master != 0x00 && key->master != 0x01)
5927		return false;
5928
5929	switch (key->addr.type) {
5930	case BDADDR_LE_PUBLIC:
5931		return true;
5932
5933	case BDADDR_LE_RANDOM:
5934		/* Two most significant bits shall be set */
5935		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5936			return false;
5937		return true;
5938	}
5939
5940	return false;
5941}
5942
5943static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5944			       void *cp_data, u16 len)
5945{
5946	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5947	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5948				   sizeof(struct mgmt_ltk_info));
5949	u16 key_count, expected_len;
5950	int i, err;
5951
5952	bt_dev_dbg(hdev, "sock %p", sk);
5953
5954	if (!lmp_le_capable(hdev))
5955		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5956				       MGMT_STATUS_NOT_SUPPORTED);
5957
5958	key_count = __le16_to_cpu(cp->key_count);
5959	if (key_count > max_key_count) {
5960		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5961			   key_count);
5962		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5963				       MGMT_STATUS_INVALID_PARAMS);
5964	}
5965
5966	expected_len = struct_size(cp, keys, key_count);
5967	if (expected_len != len) {
5968		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5969			   expected_len, len);
5970		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5971				       MGMT_STATUS_INVALID_PARAMS);
5972	}
5973
5974	bt_dev_dbg(hdev, "key_count %u", key_count);
5975
5976	for (i = 0; i < key_count; i++) {
5977		struct mgmt_ltk_info *key = &cp->keys[i];
5978
5979		if (!ltk_is_valid(key))
5980			return mgmt_cmd_status(sk, hdev->id,
5981					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5982					       MGMT_STATUS_INVALID_PARAMS);
5983	}
5984
5985	hci_dev_lock(hdev);
5986
5987	hci_smp_ltks_clear(hdev);
5988
5989	for (i = 0; i < key_count; i++) {
5990		struct mgmt_ltk_info *key = &cp->keys[i];
5991		u8 type, authenticated;
5992
5993		if (hci_is_blocked_key(hdev,
5994				       HCI_BLOCKED_KEY_TYPE_LTK,
5995				       key->val)) {
5996			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
5997				    &key->addr.bdaddr);
5998			continue;
5999		}
6000
6001		switch (key->type) {
6002		case MGMT_LTK_UNAUTHENTICATED:
6003			authenticated = 0x00;
6004			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6005			break;
6006		case MGMT_LTK_AUTHENTICATED:
6007			authenticated = 0x01;
6008			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6009			break;
6010		case MGMT_LTK_P256_UNAUTH:
6011			authenticated = 0x00;
6012			type = SMP_LTK_P256;
6013			break;
6014		case MGMT_LTK_P256_AUTH:
6015			authenticated = 0x01;
6016			type = SMP_LTK_P256;
6017			break;
6018		case MGMT_LTK_P256_DEBUG:
6019			authenticated = 0x00;
6020			type = SMP_LTK_P256_DEBUG;
6021			fallthrough;
6022		default:
6023			continue;
6024		}
6025
6026		hci_add_ltk(hdev, &key->addr.bdaddr,
6027			    le_addr_type(key->addr.type), type, authenticated,
6028			    key->val, key->enc_size, key->ediv, key->rand);
6029	}
6030
6031	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6032			   NULL, 0);
6033
6034	hci_dev_unlock(hdev);
6035
6036	return err;
6037}
6038
6039static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6040{
6041	struct hci_conn *conn = cmd->user_data;
6042	struct mgmt_rp_get_conn_info rp;
6043	int err;
6044
6045	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6046
6047	if (status == MGMT_STATUS_SUCCESS) {
6048		rp.rssi = conn->rssi;
6049		rp.tx_power = conn->tx_power;
6050		rp.max_tx_power = conn->max_tx_power;
6051	} else {
6052		rp.rssi = HCI_RSSI_INVALID;
6053		rp.tx_power = HCI_TX_POWER_INVALID;
6054		rp.max_tx_power = HCI_TX_POWER_INVALID;
6055	}
6056
6057	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6058				status, &rp, sizeof(rp));
6059
6060	hci_conn_drop(conn);
6061	hci_conn_put(conn);
6062
6063	return err;
6064}
6065
6066static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6067				       u16 opcode)
6068{
6069	struct hci_cp_read_rssi *cp;
6070	struct mgmt_pending_cmd *cmd;
6071	struct hci_conn *conn;
6072	u16 handle;
6073	u8 status;
6074
6075	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6076
6077	hci_dev_lock(hdev);
6078
6079	/* Commands sent in request are either Read RSSI or Read Transmit Power
6080	 * Level so we check which one was last sent to retrieve connection
6081	 * handle.  Both commands have handle as first parameter so it's safe to
6082	 * cast data on the same command struct.
6083	 *
6084	 * First command sent is always Read RSSI and we fail only if it fails.
6085	 * In other case we simply override error to indicate success as we
6086	 * already remembered if TX power value is actually valid.
6087	 */
6088	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6089	if (!cp) {
6090		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6091		status = MGMT_STATUS_SUCCESS;
6092	} else {
6093		status = mgmt_status(hci_status);
6094	}
6095
6096	if (!cp) {
6097		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6098		goto unlock;
6099	}
6100
6101	handle = __le16_to_cpu(cp->handle);
6102	conn = hci_conn_hash_lookup_handle(hdev, handle);
6103	if (!conn) {
6104		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6105			   handle);
6106		goto unlock;
6107	}
6108
6109	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6110	if (!cmd)
6111		goto unlock;
6112
6113	cmd->cmd_complete(cmd, status);
6114	mgmt_pending_remove(cmd);
6115
6116unlock:
6117	hci_dev_unlock(hdev);
6118}
6119
6120static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6121			 u16 len)
6122{
6123	struct mgmt_cp_get_conn_info *cp = data;
6124	struct mgmt_rp_get_conn_info rp;
6125	struct hci_conn *conn;
6126	unsigned long conn_info_age;
6127	int err = 0;
6128
6129	bt_dev_dbg(hdev, "sock %p", sk);
6130
6131	memset(&rp, 0, sizeof(rp));
6132	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6133	rp.addr.type = cp->addr.type;
6134
6135	if (!bdaddr_type_is_valid(cp->addr.type))
6136		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6137					 MGMT_STATUS_INVALID_PARAMS,
6138					 &rp, sizeof(rp));
6139
6140	hci_dev_lock(hdev);
6141
6142	if (!hdev_is_powered(hdev)) {
6143		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6144					MGMT_STATUS_NOT_POWERED, &rp,
6145					sizeof(rp));
6146		goto unlock;
6147	}
6148
6149	if (cp->addr.type == BDADDR_BREDR)
6150		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6151					       &cp->addr.bdaddr);
6152	else
6153		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6154
6155	if (!conn || conn->state != BT_CONNECTED) {
6156		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6157					MGMT_STATUS_NOT_CONNECTED, &rp,
6158					sizeof(rp));
6159		goto unlock;
6160	}
6161
6162	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6163		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6164					MGMT_STATUS_BUSY, &rp, sizeof(rp));
6165		goto unlock;
6166	}
6167
6168	/* To avoid client trying to guess when to poll again for information we
6169	 * calculate conn info age as random value between min/max set in hdev.
6170	 */
6171	conn_info_age = hdev->conn_info_min_age +
6172			prandom_u32_max(hdev->conn_info_max_age -
6173					hdev->conn_info_min_age);
6174
6175	/* Query controller to refresh cached values if they are too old or were
6176	 * never read.
6177	 */
6178	if (time_after(jiffies, conn->conn_info_timestamp +
6179		       msecs_to_jiffies(conn_info_age)) ||
6180	    !conn->conn_info_timestamp) {
6181		struct hci_request req;
6182		struct hci_cp_read_tx_power req_txp_cp;
6183		struct hci_cp_read_rssi req_rssi_cp;
6184		struct mgmt_pending_cmd *cmd;
6185
6186		hci_req_init(&req, hdev);
6187		req_rssi_cp.handle = cpu_to_le16(conn->handle);
6188		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6189			    &req_rssi_cp);
6190
6191		/* For LE links TX power does not change thus we don't need to
6192		 * query for it once value is known.
6193		 */
6194		if (!bdaddr_type_is_le(cp->addr.type) ||
6195		    conn->tx_power == HCI_TX_POWER_INVALID) {
6196			req_txp_cp.handle = cpu_to_le16(conn->handle);
6197			req_txp_cp.type = 0x00;
6198			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6199				    sizeof(req_txp_cp), &req_txp_cp);
6200		}
6201
6202		/* Max TX power needs to be read only once per connection */
6203		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6204			req_txp_cp.handle = cpu_to_le16(conn->handle);
6205			req_txp_cp.type = 0x01;
6206			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6207				    sizeof(req_txp_cp), &req_txp_cp);
6208		}
6209
6210		err = hci_req_run(&req, conn_info_refresh_complete);
6211		if (err < 0)
6212			goto unlock;
6213
6214		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6215				       data, len);
6216		if (!cmd) {
6217			err = -ENOMEM;
6218			goto unlock;
6219		}
6220
6221		hci_conn_hold(conn);
6222		cmd->user_data = hci_conn_get(conn);
6223		cmd->cmd_complete = conn_info_cmd_complete;
6224
6225		conn->conn_info_timestamp = jiffies;
6226	} else {
6227		/* Cache is valid, just reply with values cached in hci_conn */
6228		rp.rssi = conn->rssi;
6229		rp.tx_power = conn->tx_power;
6230		rp.max_tx_power = conn->max_tx_power;
6231
6232		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6233					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6234	}
6235
6236unlock:
6237	hci_dev_unlock(hdev);
6238	return err;
6239}
6240
6241static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6242{
6243	struct hci_conn *conn = cmd->user_data;
6244	struct mgmt_rp_get_clock_info rp;
6245	struct hci_dev *hdev;
6246	int err;
6247
6248	memset(&rp, 0, sizeof(rp));
6249	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6250
6251	if (status)
6252		goto complete;
6253
6254	hdev = hci_dev_get(cmd->index);
6255	if (hdev) {
6256		rp.local_clock = cpu_to_le32(hdev->clock);
6257		hci_dev_put(hdev);
6258	}
6259
6260	if (conn) {
6261		rp.piconet_clock = cpu_to_le32(conn->clock);
6262		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6263	}
6264
6265complete:
6266	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6267				sizeof(rp));
6268
6269	if (conn) {
6270		hci_conn_drop(conn);
6271		hci_conn_put(conn);
6272	}
6273
6274	return err;
6275}
6276
6277static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6278{
6279	struct hci_cp_read_clock *hci_cp;
6280	struct mgmt_pending_cmd *cmd;
6281	struct hci_conn *conn;
6282
6283	bt_dev_dbg(hdev, "status %u", status);
6284
6285	hci_dev_lock(hdev);
6286
6287	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6288	if (!hci_cp)
6289		goto unlock;
6290
6291	if (hci_cp->which) {
6292		u16 handle = __le16_to_cpu(hci_cp->handle);
6293		conn = hci_conn_hash_lookup_handle(hdev, handle);
6294	} else {
6295		conn = NULL;
6296	}
6297
6298	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6299	if (!cmd)
6300		goto unlock;
6301
6302	cmd->cmd_complete(cmd, mgmt_status(status));
6303	mgmt_pending_remove(cmd);
6304
6305unlock:
6306	hci_dev_unlock(hdev);
6307}
6308
6309static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6310			 u16 len)
6311{
6312	struct mgmt_cp_get_clock_info *cp = data;
6313	struct mgmt_rp_get_clock_info rp;
6314	struct hci_cp_read_clock hci_cp;
6315	struct mgmt_pending_cmd *cmd;
6316	struct hci_request req;
6317	struct hci_conn *conn;
6318	int err;
6319
6320	bt_dev_dbg(hdev, "sock %p", sk);
6321
6322	memset(&rp, 0, sizeof(rp));
6323	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6324	rp.addr.type = cp->addr.type;
6325
6326	if (cp->addr.type != BDADDR_BREDR)
6327		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6328					 MGMT_STATUS_INVALID_PARAMS,
6329					 &rp, sizeof(rp));
6330
6331	hci_dev_lock(hdev);
6332
6333	if (!hdev_is_powered(hdev)) {
6334		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6335					MGMT_STATUS_NOT_POWERED, &rp,
6336					sizeof(rp));
6337		goto unlock;
6338	}
6339
6340	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6341		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6342					       &cp->addr.bdaddr);
6343		if (!conn || conn->state != BT_CONNECTED) {
6344			err = mgmt_cmd_complete(sk, hdev->id,
6345						MGMT_OP_GET_CLOCK_INFO,
6346						MGMT_STATUS_NOT_CONNECTED,
6347						&rp, sizeof(rp));
6348			goto unlock;
6349		}
6350	} else {
6351		conn = NULL;
6352	}
6353
6354	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6355	if (!cmd) {
6356		err = -ENOMEM;
6357		goto unlock;
6358	}
6359
6360	cmd->cmd_complete = clock_info_cmd_complete;
6361
6362	hci_req_init(&req, hdev);
6363
6364	memset(&hci_cp, 0, sizeof(hci_cp));
6365	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6366
6367	if (conn) {
6368		hci_conn_hold(conn);
6369		cmd->user_data = hci_conn_get(conn);
6370
6371		hci_cp.handle = cpu_to_le16(conn->handle);
6372		hci_cp.which = 0x01; /* Piconet clock */
6373		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6374	}
6375
6376	err = hci_req_run(&req, get_clock_info_complete);
6377	if (err < 0)
6378		mgmt_pending_remove(cmd);
6379
6380unlock:
6381	hci_dev_unlock(hdev);
6382	return err;
6383}
6384
6385static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6386{
6387	struct hci_conn *conn;
6388
6389	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6390	if (!conn)
6391		return false;
6392
6393	if (conn->dst_type != type)
6394		return false;
6395
6396	if (conn->state != BT_CONNECTED)
6397		return false;
6398
6399	return true;
6400}
6401
6402/* This function requires the caller holds hdev->lock */
6403static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6404			       u8 addr_type, u8 auto_connect)
6405{
6406	struct hci_conn_params *params;
6407
6408	params = hci_conn_params_add(hdev, addr, addr_type);
6409	if (!params)
6410		return -EIO;
6411
6412	if (params->auto_connect == auto_connect)
6413		return 0;
6414
6415	list_del_init(&params->action);
6416
6417	switch (auto_connect) {
6418	case HCI_AUTO_CONN_DISABLED:
6419	case HCI_AUTO_CONN_LINK_LOSS:
6420		/* If auto connect is being disabled when we're trying to
6421		 * connect to device, keep connecting.
6422		 */
6423		if (params->explicit_connect)
6424			list_add(&params->action, &hdev->pend_le_conns);
6425		break;
6426	case HCI_AUTO_CONN_REPORT:
6427		if (params->explicit_connect)
6428			list_add(&params->action, &hdev->pend_le_conns);
6429		else
6430			list_add(&params->action, &hdev->pend_le_reports);
6431		break;
6432	case HCI_AUTO_CONN_DIRECT:
6433	case HCI_AUTO_CONN_ALWAYS:
6434		if (!is_connected(hdev, addr, addr_type))
6435			list_add(&params->action, &hdev->pend_le_conns);
6436		break;
6437	}
6438
6439	params->auto_connect = auto_connect;
6440
6441	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6442		   addr, addr_type, auto_connect);
6443
6444	return 0;
6445}
6446
6447static void device_added(struct sock *sk, struct hci_dev *hdev,
6448			 bdaddr_t *bdaddr, u8 type, u8 action)
6449{
6450	struct mgmt_ev_device_added ev;
6451
6452	bacpy(&ev.addr.bdaddr, bdaddr);
6453	ev.addr.type = type;
6454	ev.action = action;
6455
6456	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6457}
6458
6459static int add_device(struct sock *sk, struct hci_dev *hdev,
6460		      void *data, u16 len)
6461{
6462	struct mgmt_cp_add_device *cp = data;
6463	u8 auto_conn, addr_type;
6464	struct hci_conn_params *params;
6465	int err;
6466	u32 current_flags = 0;
6467
6468	bt_dev_dbg(hdev, "sock %p", sk);
6469
6470	if (!bdaddr_type_is_valid(cp->addr.type) ||
6471	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6472		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6473					 MGMT_STATUS_INVALID_PARAMS,
6474					 &cp->addr, sizeof(cp->addr));
6475
6476	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6477		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6478					 MGMT_STATUS_INVALID_PARAMS,
6479					 &cp->addr, sizeof(cp->addr));
6480
6481	hci_dev_lock(hdev);
6482
6483	if (cp->addr.type == BDADDR_BREDR) {
6484		/* Only incoming connections action is supported for now */
6485		if (cp->action != 0x01) {
6486			err = mgmt_cmd_complete(sk, hdev->id,
6487						MGMT_OP_ADD_DEVICE,
6488						MGMT_STATUS_INVALID_PARAMS,
6489						&cp->addr, sizeof(cp->addr));
6490			goto unlock;
6491		}
6492
6493		err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6494						     &cp->addr.bdaddr,
6495						     cp->addr.type, 0);
6496		if (err)
6497			goto unlock;
6498
6499		hci_req_update_scan(hdev);
6500
6501		goto added;
6502	}
6503
6504	addr_type = le_addr_type(cp->addr.type);
6505
6506	if (cp->action == 0x02)
6507		auto_conn = HCI_AUTO_CONN_ALWAYS;
6508	else if (cp->action == 0x01)
6509		auto_conn = HCI_AUTO_CONN_DIRECT;
6510	else
6511		auto_conn = HCI_AUTO_CONN_REPORT;
6512
6513	/* Kernel internally uses conn_params with resolvable private
6514	 * address, but Add Device allows only identity addresses.
6515	 * Make sure it is enforced before calling
6516	 * hci_conn_params_lookup.
6517	 */
6518	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6519		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6520					MGMT_STATUS_INVALID_PARAMS,
6521					&cp->addr, sizeof(cp->addr));
6522		goto unlock;
6523	}
6524
6525	/* If the connection parameters don't exist for this device,
6526	 * they will be created and configured with defaults.
6527	 */
6528	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6529				auto_conn) < 0) {
6530		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6531					MGMT_STATUS_FAILED, &cp->addr,
6532					sizeof(cp->addr));
6533		goto unlock;
6534	} else {
6535		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6536						addr_type);
6537		if (params)
6538			current_flags = params->current_flags;
6539	}
6540
6541	hci_update_background_scan(hdev);
6542
6543added:
6544	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6545	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6546			     SUPPORTED_DEVICE_FLAGS(), current_flags);
6547
6548	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6549				MGMT_STATUS_SUCCESS, &cp->addr,
6550				sizeof(cp->addr));
6551
6552unlock:
6553	hci_dev_unlock(hdev);
6554	return err;
6555}
6556
6557static void device_removed(struct sock *sk, struct hci_dev *hdev,
6558			   bdaddr_t *bdaddr, u8 type)
6559{
6560	struct mgmt_ev_device_removed ev;
6561
6562	bacpy(&ev.addr.bdaddr, bdaddr);
6563	ev.addr.type = type;
6564
6565	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6566}
6567
6568static int remove_device(struct sock *sk, struct hci_dev *hdev,
6569			 void *data, u16 len)
6570{
6571	struct mgmt_cp_remove_device *cp = data;
6572	int err;
6573
6574	bt_dev_dbg(hdev, "sock %p", sk);
6575
6576	hci_dev_lock(hdev);
6577
6578	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6579		struct hci_conn_params *params;
6580		u8 addr_type;
6581
6582		if (!bdaddr_type_is_valid(cp->addr.type)) {
6583			err = mgmt_cmd_complete(sk, hdev->id,
6584						MGMT_OP_REMOVE_DEVICE,
6585						MGMT_STATUS_INVALID_PARAMS,
6586						&cp->addr, sizeof(cp->addr));
6587			goto unlock;
6588		}
6589
6590		if (cp->addr.type == BDADDR_BREDR) {
6591			err = hci_bdaddr_list_del(&hdev->whitelist,
6592						  &cp->addr.bdaddr,
6593						  cp->addr.type);
6594			if (err) {
6595				err = mgmt_cmd_complete(sk, hdev->id,
6596							MGMT_OP_REMOVE_DEVICE,
6597							MGMT_STATUS_INVALID_PARAMS,
6598							&cp->addr,
6599							sizeof(cp->addr));
6600				goto unlock;
6601			}
6602
6603			hci_req_update_scan(hdev);
6604
6605			device_removed(sk, hdev, &cp->addr.bdaddr,
6606				       cp->addr.type);
6607			goto complete;
6608		}
6609
6610		addr_type = le_addr_type(cp->addr.type);
6611
6612		/* Kernel internally uses conn_params with resolvable private
6613		 * address, but Remove Device allows only identity addresses.
6614		 * Make sure it is enforced before calling
6615		 * hci_conn_params_lookup.
6616		 */
6617		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6618			err = mgmt_cmd_complete(sk, hdev->id,
6619						MGMT_OP_REMOVE_DEVICE,
6620						MGMT_STATUS_INVALID_PARAMS,
6621						&cp->addr, sizeof(cp->addr));
6622			goto unlock;
6623		}
6624
6625		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6626						addr_type);
6627		if (!params) {
6628			err = mgmt_cmd_complete(sk, hdev->id,
6629						MGMT_OP_REMOVE_DEVICE,
6630						MGMT_STATUS_INVALID_PARAMS,
6631						&cp->addr, sizeof(cp->addr));
6632			goto unlock;
6633		}
6634
6635		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6636		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6637			err = mgmt_cmd_complete(sk, hdev->id,
6638						MGMT_OP_REMOVE_DEVICE,
6639						MGMT_STATUS_INVALID_PARAMS,
6640						&cp->addr, sizeof(cp->addr));
6641			goto unlock;
6642		}
6643
6644		list_del(&params->action);
6645		list_del(&params->list);
6646		kfree(params);
6647		hci_update_background_scan(hdev);
6648
6649		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6650	} else {
6651		struct hci_conn_params *p, *tmp;
6652		struct bdaddr_list *b, *btmp;
6653
6654		if (cp->addr.type) {
6655			err = mgmt_cmd_complete(sk, hdev->id,
6656						MGMT_OP_REMOVE_DEVICE,
6657						MGMT_STATUS_INVALID_PARAMS,
6658						&cp->addr, sizeof(cp->addr));
6659			goto unlock;
6660		}
6661
6662		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6663			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6664			list_del(&b->list);
6665			kfree(b);
6666		}
6667
6668		hci_req_update_scan(hdev);
6669
6670		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6671			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6672				continue;
6673			device_removed(sk, hdev, &p->addr, p->addr_type);
6674			if (p->explicit_connect) {
6675				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6676				continue;
6677			}
6678			list_del(&p->action);
6679			list_del(&p->list);
6680			kfree(p);
6681		}
6682
6683		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6684
6685		hci_update_background_scan(hdev);
6686	}
6687
6688complete:
6689	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6690				MGMT_STATUS_SUCCESS, &cp->addr,
6691				sizeof(cp->addr));
6692unlock:
6693	hci_dev_unlock(hdev);
6694	return err;
6695}
6696
6697static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6698			   u16 len)
6699{
6700	struct mgmt_cp_load_conn_param *cp = data;
6701	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6702				     sizeof(struct mgmt_conn_param));
6703	u16 param_count, expected_len;
6704	int i;
6705
6706	if (!lmp_le_capable(hdev))
6707		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6708				       MGMT_STATUS_NOT_SUPPORTED);
6709
6710	param_count = __le16_to_cpu(cp->param_count);
6711	if (param_count > max_param_count) {
6712		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6713			   param_count);
6714		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6715				       MGMT_STATUS_INVALID_PARAMS);
6716	}
6717
6718	expected_len = struct_size(cp, params, param_count);
6719	if (expected_len != len) {
6720		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6721			   expected_len, len);
6722		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6723				       MGMT_STATUS_INVALID_PARAMS);
6724	}
6725
6726	bt_dev_dbg(hdev, "param_count %u", param_count);
6727
6728	hci_dev_lock(hdev);
6729
6730	hci_conn_params_clear_disabled(hdev);
6731
6732	for (i = 0; i < param_count; i++) {
6733		struct mgmt_conn_param *param = &cp->params[i];
6734		struct hci_conn_params *hci_param;
6735		u16 min, max, latency, timeout;
6736		u8 addr_type;
6737
6738		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6739			   param->addr.type);
6740
6741		if (param->addr.type == BDADDR_LE_PUBLIC) {
6742			addr_type = ADDR_LE_DEV_PUBLIC;
6743		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6744			addr_type = ADDR_LE_DEV_RANDOM;
6745		} else {
6746			bt_dev_err(hdev, "ignoring invalid connection parameters");
6747			continue;
6748		}
6749
6750		min = le16_to_cpu(param->min_interval);
6751		max = le16_to_cpu(param->max_interval);
6752		latency = le16_to_cpu(param->latency);
6753		timeout = le16_to_cpu(param->timeout);
6754
6755		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6756			   min, max, latency, timeout);
6757
6758		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6759			bt_dev_err(hdev, "ignoring invalid connection parameters");
6760			continue;
6761		}
6762
6763		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6764						addr_type);
6765		if (!hci_param) {
6766			bt_dev_err(hdev, "failed to add connection parameters");
6767			continue;
6768		}
6769
6770		hci_param->conn_min_interval = min;
6771		hci_param->conn_max_interval = max;
6772		hci_param->conn_latency = latency;
6773		hci_param->supervision_timeout = timeout;
6774	}
6775
6776	hci_dev_unlock(hdev);
6777
6778	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6779				 NULL, 0);
6780}
6781
6782static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6783			       void *data, u16 len)
6784{
6785	struct mgmt_cp_set_external_config *cp = data;
6786	bool changed;
6787	int err;
6788
6789	bt_dev_dbg(hdev, "sock %p", sk);
6790
6791	if (hdev_is_powered(hdev))
6792		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6793				       MGMT_STATUS_REJECTED);
6794
6795	if (cp->config != 0x00 && cp->config != 0x01)
6796		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6797				         MGMT_STATUS_INVALID_PARAMS);
6798
6799	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6800		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6801				       MGMT_STATUS_NOT_SUPPORTED);
6802
6803	hci_dev_lock(hdev);
6804
6805	if (cp->config)
6806		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6807	else
6808		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6809
6810	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6811	if (err < 0)
6812		goto unlock;
6813
6814	if (!changed)
6815		goto unlock;
6816
6817	err = new_options(hdev, sk);
6818
6819	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6820		mgmt_index_removed(hdev);
6821
6822		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6823			hci_dev_set_flag(hdev, HCI_CONFIG);
6824			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6825
6826			queue_work(hdev->req_workqueue, &hdev->power_on);
6827		} else {
6828			set_bit(HCI_RAW, &hdev->flags);
6829			mgmt_index_added(hdev);
6830		}
6831	}
6832
6833unlock:
6834	hci_dev_unlock(hdev);
6835	return err;
6836}
6837
6838static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6839			      void *data, u16 len)
6840{
6841	struct mgmt_cp_set_public_address *cp = data;
6842	bool changed;
6843	int err;
6844
6845	bt_dev_dbg(hdev, "sock %p", sk);
6846
6847	if (hdev_is_powered(hdev))
6848		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6849				       MGMT_STATUS_REJECTED);
6850
6851	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6852		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6853				       MGMT_STATUS_INVALID_PARAMS);
6854
6855	if (!hdev->set_bdaddr)
6856		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6857				       MGMT_STATUS_NOT_SUPPORTED);
6858
6859	hci_dev_lock(hdev);
6860
6861	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6862	bacpy(&hdev->public_addr, &cp->bdaddr);
6863
6864	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6865	if (err < 0)
6866		goto unlock;
6867
6868	if (!changed)
6869		goto unlock;
6870
6871	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6872		err = new_options(hdev, sk);
6873
6874	if (is_configured(hdev)) {
6875		mgmt_index_removed(hdev);
6876
6877		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6878
6879		hci_dev_set_flag(hdev, HCI_CONFIG);
6880		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6881
6882		queue_work(hdev->req_workqueue, &hdev->power_on);
6883	}
6884
6885unlock:
6886	hci_dev_unlock(hdev);
6887	return err;
6888}
6889
6890static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6891					     u16 opcode, struct sk_buff *skb)
6892{
6893	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6894	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6895	u8 *h192, *r192, *h256, *r256;
6896	struct mgmt_pending_cmd *cmd;
6897	u16 eir_len;
6898	int err;
6899
6900	bt_dev_dbg(hdev, "status %u", status);
6901
6902	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6903	if (!cmd)
6904		return;
6905
6906	mgmt_cp = cmd->param;
6907
6908	if (status) {
6909		status = mgmt_status(status);
6910		eir_len = 0;
6911
6912		h192 = NULL;
6913		r192 = NULL;
6914		h256 = NULL;
6915		r256 = NULL;
6916	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6917		struct hci_rp_read_local_oob_data *rp;
6918
6919		if (skb->len != sizeof(*rp)) {
6920			status = MGMT_STATUS_FAILED;
6921			eir_len = 0;
6922		} else {
6923			status = MGMT_STATUS_SUCCESS;
6924			rp = (void *)skb->data;
6925
6926			eir_len = 5 + 18 + 18;
6927			h192 = rp->hash;
6928			r192 = rp->rand;
6929			h256 = NULL;
6930			r256 = NULL;
6931		}
6932	} else {
6933		struct hci_rp_read_local_oob_ext_data *rp;
6934
6935		if (skb->len != sizeof(*rp)) {
6936			status = MGMT_STATUS_FAILED;
6937			eir_len = 0;
6938		} else {
6939			status = MGMT_STATUS_SUCCESS;
6940			rp = (void *)skb->data;
6941
6942			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6943				eir_len = 5 + 18 + 18;
6944				h192 = NULL;
6945				r192 = NULL;
6946			} else {
6947				eir_len = 5 + 18 + 18 + 18 + 18;
6948				h192 = rp->hash192;
6949				r192 = rp->rand192;
6950			}
6951
6952			h256 = rp->hash256;
6953			r256 = rp->rand256;
6954		}
6955	}
6956
6957	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6958	if (!mgmt_rp)
6959		goto done;
6960
6961	if (status)
6962		goto send_rsp;
6963
6964	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6965				  hdev->dev_class, 3);
6966
6967	if (h192 && r192) {
6968		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6969					  EIR_SSP_HASH_C192, h192, 16);
6970		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6971					  EIR_SSP_RAND_R192, r192, 16);
6972	}
6973
6974	if (h256 && r256) {
6975		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6976					  EIR_SSP_HASH_C256, h256, 16);
6977		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6978					  EIR_SSP_RAND_R256, r256, 16);
6979	}
6980
6981send_rsp:
6982	mgmt_rp->type = mgmt_cp->type;
6983	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6984
6985	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6986				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6987				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6988	if (err < 0 || status)
6989		goto done;
6990
6991	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6992
6993	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6994				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6995				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6996done:
6997	kfree(mgmt_rp);
6998	mgmt_pending_remove(cmd);
6999}
7000
7001static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7002				  struct mgmt_cp_read_local_oob_ext_data *cp)
7003{
7004	struct mgmt_pending_cmd *cmd;
7005	struct hci_request req;
7006	int err;
7007
7008	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7009			       cp, sizeof(*cp));
7010	if (!cmd)
7011		return -ENOMEM;
7012
7013	hci_req_init(&req, hdev);
7014
7015	if (bredr_sc_enabled(hdev))
7016		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7017	else
7018		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7019
7020	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7021	if (err < 0) {
7022		mgmt_pending_remove(cmd);
7023		return err;
7024	}
7025
7026	return 0;
7027}
7028
7029static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7030				   void *data, u16 data_len)
7031{
7032	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7033	struct mgmt_rp_read_local_oob_ext_data *rp;
7034	size_t rp_len;
7035	u16 eir_len;
7036	u8 status, flags, role, addr[7], hash[16], rand[16];
7037	int err;
7038
7039	bt_dev_dbg(hdev, "sock %p", sk);
7040
7041	if (hdev_is_powered(hdev)) {
7042		switch (cp->type) {
7043		case BIT(BDADDR_BREDR):
7044			status = mgmt_bredr_support(hdev);
7045			if (status)
7046				eir_len = 0;
7047			else
7048				eir_len = 5;
7049			break;
7050		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7051			status = mgmt_le_support(hdev);
7052			if (status)
7053				eir_len = 0;
7054			else
7055				eir_len = 9 + 3 + 18 + 18 + 3;
7056			break;
7057		default:
7058			status = MGMT_STATUS_INVALID_PARAMS;
7059			eir_len = 0;
7060			break;
7061		}
7062	} else {
7063		status = MGMT_STATUS_NOT_POWERED;
7064		eir_len = 0;
7065	}
7066
7067	rp_len = sizeof(*rp) + eir_len;
7068	rp = kmalloc(rp_len, GFP_ATOMIC);
7069	if (!rp)
7070		return -ENOMEM;
7071
7072	if (status)
7073		goto complete;
7074
7075	hci_dev_lock(hdev);
7076
7077	eir_len = 0;
7078	switch (cp->type) {
7079	case BIT(BDADDR_BREDR):
7080		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7081			err = read_local_ssp_oob_req(hdev, sk, cp);
7082			hci_dev_unlock(hdev);
7083			if (!err)
7084				goto done;
7085
7086			status = MGMT_STATUS_FAILED;
7087			goto complete;
7088		} else {
7089			eir_len = eir_append_data(rp->eir, eir_len,
7090						  EIR_CLASS_OF_DEV,
7091						  hdev->dev_class, 3);
7092		}
7093		break;
7094	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7095		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7096		    smp_generate_oob(hdev, hash, rand) < 0) {
7097			hci_dev_unlock(hdev);
7098			status = MGMT_STATUS_FAILED;
7099			goto complete;
7100		}
7101
7102		/* This should return the active RPA, but since the RPA
7103		 * is only programmed on demand, it is really hard to fill
7104		 * this in at the moment. For now disallow retrieving
7105		 * local out-of-band data when privacy is in use.
7106		 *
7107		 * Returning the identity address will not help here since
7108		 * pairing happens before the identity resolving key is
7109		 * known and thus the connection establishment happens
7110		 * based on the RPA and not the identity address.
7111		 */
7112		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7113			hci_dev_unlock(hdev);
7114			status = MGMT_STATUS_REJECTED;
7115			goto complete;
7116		}
7117
7118		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7119		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7120		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7121		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7122			memcpy(addr, &hdev->static_addr, 6);
7123			addr[6] = 0x01;
7124		} else {
7125			memcpy(addr, &hdev->bdaddr, 6);
7126			addr[6] = 0x00;
7127		}
7128
7129		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7130					  addr, sizeof(addr));
7131
7132		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7133			role = 0x02;
7134		else
7135			role = 0x01;
7136
7137		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7138					  &role, sizeof(role));
7139
7140		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7141			eir_len = eir_append_data(rp->eir, eir_len,
7142						  EIR_LE_SC_CONFIRM,
7143						  hash, sizeof(hash));
7144
7145			eir_len = eir_append_data(rp->eir, eir_len,
7146						  EIR_LE_SC_RANDOM,
7147						  rand, sizeof(rand));
7148		}
7149
7150		flags = mgmt_get_adv_discov_flags(hdev);
7151
7152		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7153			flags |= LE_AD_NO_BREDR;
7154
7155		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7156					  &flags, sizeof(flags));
7157		break;
7158	}
7159
7160	hci_dev_unlock(hdev);
7161
7162	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7163
7164	status = MGMT_STATUS_SUCCESS;
7165
7166complete:
7167	rp->type = cp->type;
7168	rp->eir_len = cpu_to_le16(eir_len);
7169
7170	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7171				status, rp, sizeof(*rp) + eir_len);
7172	if (err < 0 || status)
7173		goto done;
7174
7175	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7176				 rp, sizeof(*rp) + eir_len,
7177				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7178
7179done:
7180	kfree(rp);
7181
7182	return err;
7183}
7184
7185static u32 get_supported_adv_flags(struct hci_dev *hdev)
7186{
7187	u32 flags = 0;
7188
7189	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7190	flags |= MGMT_ADV_FLAG_DISCOV;
7191	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7192	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7193	flags |= MGMT_ADV_FLAG_APPEARANCE;
7194	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7195
7196	/* In extended adv TX_POWER returned from Set Adv Param
7197	 * will be always valid.
7198	 */
7199	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7200	    ext_adv_capable(hdev))
7201		flags |= MGMT_ADV_FLAG_TX_POWER;
7202
7203	if (ext_adv_capable(hdev)) {
7204		flags |= MGMT_ADV_FLAG_SEC_1M;
7205
7206		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7207			flags |= MGMT_ADV_FLAG_SEC_2M;
7208
7209		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7210			flags |= MGMT_ADV_FLAG_SEC_CODED;
7211	}
7212
7213	return flags;
7214}
7215
7216static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7217			     void *data, u16 data_len)
7218{
7219	struct mgmt_rp_read_adv_features *rp;
7220	size_t rp_len;
7221	int err;
7222	struct adv_info *adv_instance;
7223	u32 supported_flags;
7224	u8 *instance;
7225
7226	bt_dev_dbg(hdev, "sock %p", sk);
7227
7228	if (!lmp_le_capable(hdev))
7229		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7230				       MGMT_STATUS_REJECTED);
7231
7232	/* Enabling the experimental LL Privay support disables support for
7233	 * advertising.
7234	 */
7235	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7236		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7237				       MGMT_STATUS_NOT_SUPPORTED);
7238
7239	hci_dev_lock(hdev);
7240
7241	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7242	rp = kmalloc(rp_len, GFP_ATOMIC);
7243	if (!rp) {
7244		hci_dev_unlock(hdev);
7245		return -ENOMEM;
7246	}
7247
7248	supported_flags = get_supported_adv_flags(hdev);
7249
7250	rp->supported_flags = cpu_to_le32(supported_flags);
7251	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7252	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7253	rp->max_instances = HCI_MAX_ADV_INSTANCES;
7254	rp->num_instances = hdev->adv_instance_cnt;
7255
7256	instance = rp->instance;
7257	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7258		*instance = adv_instance->instance;
7259		instance++;
7260	}
7261
7262	hci_dev_unlock(hdev);
7263
7264	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7265				MGMT_STATUS_SUCCESS, rp, rp_len);
7266
7267	kfree(rp);
7268
7269	return err;
7270}
7271
7272static u8 calculate_name_len(struct hci_dev *hdev)
7273{
7274	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7275
7276	return append_local_name(hdev, buf, 0);
7277}
7278
7279static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7280			   bool is_adv_data)
7281{
7282	u8 max_len = HCI_MAX_AD_LENGTH;
7283
7284	if (is_adv_data) {
7285		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7286				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7287				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7288			max_len -= 3;
7289
7290		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7291			max_len -= 3;
7292	} else {
7293		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7294			max_len -= calculate_name_len(hdev);
7295
7296		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7297			max_len -= 4;
7298	}
7299
7300	return max_len;
7301}
7302
7303static bool flags_managed(u32 adv_flags)
7304{
7305	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7306			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7307			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7308}
7309
7310static bool tx_power_managed(u32 adv_flags)
7311{
7312	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7313}
7314
7315static bool name_managed(u32 adv_flags)
7316{
7317	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7318}
7319
7320static bool appearance_managed(u32 adv_flags)
7321{
7322	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7323}
7324
7325static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7326			      u8 len, bool is_adv_data)
7327{
7328	int i, cur_len;
7329	u8 max_len;
7330
7331	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7332
7333	if (len > max_len)
7334		return false;
7335
7336	/* Make sure that the data is correctly formatted. */
7337	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7338		cur_len = data[i];
7339
7340		if (data[i + 1] == EIR_FLAGS &&
7341		    (!is_adv_data || flags_managed(adv_flags)))
7342			return false;
7343
7344		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7345			return false;
7346
7347		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7348			return false;
7349
7350		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7351			return false;
7352
7353		if (data[i + 1] == EIR_APPEARANCE &&
7354		    appearance_managed(adv_flags))
7355			return false;
7356
7357		/* If the current field length would exceed the total data
7358		 * length, then it's invalid.
7359		 */
7360		if (i + cur_len >= len)
7361			return false;
7362	}
7363
7364	return true;
7365}
7366
7367static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7368				     u16 opcode)
7369{
7370	struct mgmt_pending_cmd *cmd;
7371	struct mgmt_cp_add_advertising *cp;
7372	struct mgmt_rp_add_advertising rp;
7373	struct adv_info *adv_instance, *n;
7374	u8 instance;
7375
7376	bt_dev_dbg(hdev, "status %d", status);
7377
7378	hci_dev_lock(hdev);
7379
7380	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7381
7382	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7383		if (!adv_instance->pending)
7384			continue;
7385
7386		if (!status) {
7387			adv_instance->pending = false;
7388			continue;
7389		}
7390
7391		instance = adv_instance->instance;
7392
7393		if (hdev->cur_adv_instance == instance)
7394			cancel_adv_timeout(hdev);
7395
7396		hci_remove_adv_instance(hdev, instance);
7397		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7398	}
7399
7400	if (!cmd)
7401		goto unlock;
7402
7403	cp = cmd->param;
7404	rp.instance = cp->instance;
7405
7406	if (status)
7407		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7408				mgmt_status(status));
7409	else
7410		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7411				  mgmt_status(status), &rp, sizeof(rp));
7412
7413	mgmt_pending_remove(cmd);
7414
7415unlock:
7416	hci_dev_unlock(hdev);
7417}
7418
7419static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7420			   void *data, u16 data_len)
7421{
7422	struct mgmt_cp_add_advertising *cp = data;
7423	struct mgmt_rp_add_advertising rp;
7424	u32 flags;
7425	u32 supported_flags, phy_flags;
7426	u8 status;
7427	u16 timeout, duration;
7428	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7429	u8 schedule_instance = 0;
7430	struct adv_info *next_instance;
7431	int err;
7432	struct mgmt_pending_cmd *cmd;
7433	struct hci_request req;
7434
7435	bt_dev_dbg(hdev, "sock %p", sk);
7436
7437	status = mgmt_le_support(hdev);
7438	if (status)
7439		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7440				       status);
7441
7442	/* Enabling the experimental LL Privay support disables support for
7443	 * advertising.
7444	 */
7445	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7446		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7447				       MGMT_STATUS_NOT_SUPPORTED);
7448
7449	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7450		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7451				       MGMT_STATUS_INVALID_PARAMS);
7452
7453	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7454		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7455				       MGMT_STATUS_INVALID_PARAMS);
7456
7457	flags = __le32_to_cpu(cp->flags);
7458	timeout = __le16_to_cpu(cp->timeout);
7459	duration = __le16_to_cpu(cp->duration);
7460
7461	/* The current implementation only supports a subset of the specified
7462	 * flags. Also need to check mutual exclusiveness of sec flags.
7463	 */
7464	supported_flags = get_supported_adv_flags(hdev);
7465	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7466	if (flags & ~supported_flags ||
7467	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7468		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7469				       MGMT_STATUS_INVALID_PARAMS);
7470
7471	hci_dev_lock(hdev);
7472
7473	if (timeout && !hdev_is_powered(hdev)) {
7474		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7475				      MGMT_STATUS_REJECTED);
7476		goto unlock;
7477	}
7478
7479	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7480	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7481	    pending_find(MGMT_OP_SET_LE, hdev)) {
7482		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7483				      MGMT_STATUS_BUSY);
7484		goto unlock;
7485	}
7486
7487	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7488	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7489			       cp->scan_rsp_len, false)) {
7490		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7491				      MGMT_STATUS_INVALID_PARAMS);
7492		goto unlock;
7493	}
7494
7495	err = hci_add_adv_instance(hdev, cp->instance, flags,
7496				   cp->adv_data_len, cp->data,
7497				   cp->scan_rsp_len,
7498				   cp->data + cp->adv_data_len,
7499				   timeout, duration);
7500	if (err < 0) {
7501		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7502				      MGMT_STATUS_FAILED);
7503		goto unlock;
7504	}
7505
7506	/* Only trigger an advertising added event if a new instance was
7507	 * actually added.
7508	 */
7509	if (hdev->adv_instance_cnt > prev_instance_cnt)
7510		mgmt_advertising_added(sk, hdev, cp->instance);
7511
7512	if (hdev->cur_adv_instance == cp->instance) {
7513		/* If the currently advertised instance is being changed then
7514		 * cancel the current advertising and schedule the next
7515		 * instance. If there is only one instance then the overridden
7516		 * advertising data will be visible right away.
7517		 */
7518		cancel_adv_timeout(hdev);
7519
7520		next_instance = hci_get_next_instance(hdev, cp->instance);
7521		if (next_instance)
7522			schedule_instance = next_instance->instance;
7523	} else if (!hdev->adv_instance_timeout) {
7524		/* Immediately advertise the new instance if no other
7525		 * instance is currently being advertised.
7526		 */
7527		schedule_instance = cp->instance;
7528	}
7529
7530	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
7531	 * there is no instance to be advertised then we have no HCI
7532	 * communication to make. Simply return.
7533	 */
7534	if (!hdev_is_powered(hdev) ||
7535	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7536	    !schedule_instance) {
7537		rp.instance = cp->instance;
7538		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7539					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7540		goto unlock;
7541	}
7542
7543	/* We're good to go, update advertising data, parameters, and start
7544	 * advertising.
7545	 */
7546	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7547			       data_len);
7548	if (!cmd) {
7549		err = -ENOMEM;
7550		goto unlock;
7551	}
7552
7553	hci_req_init(&req, hdev);
7554
7555	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7556
7557	if (!err)
7558		err = hci_req_run(&req, add_advertising_complete);
7559
7560	if (err < 0) {
7561		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7562				      MGMT_STATUS_FAILED);
7563		mgmt_pending_remove(cmd);
7564	}
7565
7566unlock:
7567	hci_dev_unlock(hdev);
7568
7569	return err;
7570}
7571
7572static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7573					u16 opcode)
7574{
7575	struct mgmt_pending_cmd *cmd;
7576	struct mgmt_cp_remove_advertising *cp;
7577	struct mgmt_rp_remove_advertising rp;
7578
7579	bt_dev_dbg(hdev, "status %d", status);
7580
7581	hci_dev_lock(hdev);
7582
7583	/* A failure status here only means that we failed to disable
7584	 * advertising. Otherwise, the advertising instance has been removed,
7585	 * so report success.
7586	 */
7587	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7588	if (!cmd)
7589		goto unlock;
7590
7591	cp = cmd->param;
7592	rp.instance = cp->instance;
7593
7594	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7595			  &rp, sizeof(rp));
7596	mgmt_pending_remove(cmd);
7597
7598unlock:
7599	hci_dev_unlock(hdev);
7600}
7601
7602static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7603			      void *data, u16 data_len)
7604{
7605	struct mgmt_cp_remove_advertising *cp = data;
7606	struct mgmt_rp_remove_advertising rp;
7607	struct mgmt_pending_cmd *cmd;
7608	struct hci_request req;
7609	int err;
7610
7611	bt_dev_dbg(hdev, "sock %p", sk);
7612
7613	/* Enabling the experimental LL Privay support disables support for
7614	 * advertising.
7615	 */
7616	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7617		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7618				       MGMT_STATUS_NOT_SUPPORTED);
7619
7620	hci_dev_lock(hdev);
7621
7622	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7623		err = mgmt_cmd_status(sk, hdev->id,
7624				      MGMT_OP_REMOVE_ADVERTISING,
7625				      MGMT_STATUS_INVALID_PARAMS);
7626		goto unlock;
7627	}
7628
7629	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7630	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7631	    pending_find(MGMT_OP_SET_LE, hdev)) {
7632		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7633				      MGMT_STATUS_BUSY);
7634		goto unlock;
7635	}
7636
7637	if (list_empty(&hdev->adv_instances)) {
7638		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7639				      MGMT_STATUS_INVALID_PARAMS);
7640		goto unlock;
7641	}
7642
7643	hci_req_init(&req, hdev);
7644
7645	/* If we use extended advertising, instance is disabled and removed */
7646	if (ext_adv_capable(hdev)) {
7647		__hci_req_disable_ext_adv_instance(&req, cp->instance);
7648		__hci_req_remove_ext_adv_instance(&req, cp->instance);
7649	}
7650
7651	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7652
7653	if (list_empty(&hdev->adv_instances))
7654		__hci_req_disable_advertising(&req);
7655
7656	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
7657	 * flag is set or the device isn't powered then we have no HCI
7658	 * communication to make. Simply return.
7659	 */
7660	if (skb_queue_empty(&req.cmd_q) ||
7661	    !hdev_is_powered(hdev) ||
7662	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7663		hci_req_purge(&req);
7664		rp.instance = cp->instance;
7665		err = mgmt_cmd_complete(sk, hdev->id,
7666					MGMT_OP_REMOVE_ADVERTISING,
7667					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7668		goto unlock;
7669	}
7670
7671	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7672			       data_len);
7673	if (!cmd) {
7674		err = -ENOMEM;
7675		goto unlock;
7676	}
7677
7678	err = hci_req_run(&req, remove_advertising_complete);
7679	if (err < 0)
7680		mgmt_pending_remove(cmd);
7681
7682unlock:
7683	hci_dev_unlock(hdev);
7684
7685	return err;
7686}
7687
7688static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7689			     void *data, u16 data_len)
7690{
7691	struct mgmt_cp_get_adv_size_info *cp = data;
7692	struct mgmt_rp_get_adv_size_info rp;
7693	u32 flags, supported_flags;
7694	int err;
7695
7696	bt_dev_dbg(hdev, "sock %p", sk);
7697
7698	if (!lmp_le_capable(hdev))
7699		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7700				       MGMT_STATUS_REJECTED);
7701
7702	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7703		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7704				       MGMT_STATUS_INVALID_PARAMS);
7705
7706	flags = __le32_to_cpu(cp->flags);
7707
7708	/* The current implementation only supports a subset of the specified
7709	 * flags.
7710	 */
7711	supported_flags = get_supported_adv_flags(hdev);
7712	if (flags & ~supported_flags)
7713		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7714				       MGMT_STATUS_INVALID_PARAMS);
7715
7716	rp.instance = cp->instance;
7717	rp.flags = cp->flags;
7718	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7719	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7720
7721	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7722				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7723
7724	return err;
7725}
7726
7727static const struct hci_mgmt_handler mgmt_handlers[] = {
7728	{ NULL }, /* 0x0000 (no command) */
7729	{ read_version,            MGMT_READ_VERSION_SIZE,
7730						HCI_MGMT_NO_HDEV |
7731						HCI_MGMT_UNTRUSTED },
7732	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7733						HCI_MGMT_NO_HDEV |
7734						HCI_MGMT_UNTRUSTED },
7735	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7736						HCI_MGMT_NO_HDEV |
7737						HCI_MGMT_UNTRUSTED },
7738	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7739						HCI_MGMT_UNTRUSTED },
7740	{ set_powered,             MGMT_SETTING_SIZE },
7741	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7742	{ set_connectable,         MGMT_SETTING_SIZE },
7743	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7744	{ set_bondable,            MGMT_SETTING_SIZE },
7745	{ set_link_security,       MGMT_SETTING_SIZE },
7746	{ set_ssp,                 MGMT_SETTING_SIZE },
7747	{ set_hs,                  MGMT_SETTING_SIZE },
7748	{ set_le,                  MGMT_SETTING_SIZE },
7749	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7750	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7751	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7752	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7753	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7754						HCI_MGMT_VAR_LEN },
7755	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7756						HCI_MGMT_VAR_LEN },
7757	{ disconnect,              MGMT_DISCONNECT_SIZE },
7758	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7759	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7760	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7761	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7762	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7763	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7764	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7765	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7766	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7767	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7768	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7769	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7770	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7771						HCI_MGMT_VAR_LEN },
7772	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7773	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7774	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7775	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7776	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7777	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7778	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7779	{ set_advertising,         MGMT_SETTING_SIZE },
7780	{ set_bredr,               MGMT_SETTING_SIZE },
7781	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7782	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7783	{ set_secure_conn,         MGMT_SETTING_SIZE },
7784	{ set_debug_keys,          MGMT_SETTING_SIZE },
7785	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7786	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7787						HCI_MGMT_VAR_LEN },
7788	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7789	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7790	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7791	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7792	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7793						HCI_MGMT_VAR_LEN },
7794	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7795						HCI_MGMT_NO_HDEV |
7796						HCI_MGMT_UNTRUSTED },
7797	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7798						HCI_MGMT_UNCONFIGURED |
7799						HCI_MGMT_UNTRUSTED },
7800	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7801						HCI_MGMT_UNCONFIGURED },
7802	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7803						HCI_MGMT_UNCONFIGURED },
7804	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7805						HCI_MGMT_VAR_LEN },
7806	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7807	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7808						HCI_MGMT_NO_HDEV |
7809						HCI_MGMT_UNTRUSTED },
7810	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7811	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7812						HCI_MGMT_VAR_LEN },
7813	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7814	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
7815	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7816	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7817						HCI_MGMT_UNTRUSTED },
7818	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
7819	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
7820	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
7821	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7822						HCI_MGMT_VAR_LEN },
7823	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
7824	{ read_security_info,      MGMT_READ_SECURITY_INFO_SIZE,
7825						HCI_MGMT_UNTRUSTED },
7826	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
7827						HCI_MGMT_UNTRUSTED |
7828						HCI_MGMT_HDEV_OPTIONAL },
7829	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
7830						HCI_MGMT_VAR_LEN |
7831						HCI_MGMT_HDEV_OPTIONAL },
7832	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7833						HCI_MGMT_UNTRUSTED },
7834	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7835						HCI_MGMT_VAR_LEN },
7836	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7837						HCI_MGMT_UNTRUSTED },
7838	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7839						HCI_MGMT_VAR_LEN },
7840	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
7841	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
7842	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7843	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7844						HCI_MGMT_VAR_LEN },
7845	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
7846};
7847
7848void mgmt_index_added(struct hci_dev *hdev)
7849{
7850	struct mgmt_ev_ext_index ev;
7851
7852	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7853		return;
7854
7855	switch (hdev->dev_type) {
7856	case HCI_PRIMARY:
7857		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7858			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7859					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7860			ev.type = 0x01;
7861		} else {
7862			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7863					 HCI_MGMT_INDEX_EVENTS);
7864			ev.type = 0x00;
7865		}
7866		break;
7867	case HCI_AMP:
7868		ev.type = 0x02;
7869		break;
7870	default:
7871		return;
7872	}
7873
7874	ev.bus = hdev->bus;
7875
7876	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7877			 HCI_MGMT_EXT_INDEX_EVENTS);
7878}
7879
7880void mgmt_index_removed(struct hci_dev *hdev)
7881{
7882	struct mgmt_ev_ext_index ev;
7883	u8 status = MGMT_STATUS_INVALID_INDEX;
7884
7885	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7886		return;
7887
7888	switch (hdev->dev_type) {
7889	case HCI_PRIMARY:
7890		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7891
7892		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7893			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7894					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7895			ev.type = 0x01;
7896		} else {
7897			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7898					 HCI_MGMT_INDEX_EVENTS);
7899			ev.type = 0x00;
7900		}
7901		break;
7902	case HCI_AMP:
7903		ev.type = 0x02;
7904		break;
7905	default:
7906		return;
7907	}
7908
7909	ev.bus = hdev->bus;
7910
7911	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7912			 HCI_MGMT_EXT_INDEX_EVENTS);
7913}
7914
7915/* This function requires the caller holds hdev->lock */
7916static void restart_le_actions(struct hci_dev *hdev)
7917{
7918	struct hci_conn_params *p;
7919
7920	list_for_each_entry(p, &hdev->le_conn_params, list) {
7921		/* Needed for AUTO_OFF case where might not "really"
7922		 * have been powered off.
7923		 */
7924		list_del_init(&p->action);
7925
7926		switch (p->auto_connect) {
7927		case HCI_AUTO_CONN_DIRECT:
7928		case HCI_AUTO_CONN_ALWAYS:
7929			list_add(&p->action, &hdev->pend_le_conns);
7930			break;
7931		case HCI_AUTO_CONN_REPORT:
7932			list_add(&p->action, &hdev->pend_le_reports);
7933			break;
7934		default:
7935			break;
7936		}
7937	}
7938}
7939
7940void mgmt_power_on(struct hci_dev *hdev, int err)
7941{
7942	struct cmd_lookup match = { NULL, hdev };
7943
7944	bt_dev_dbg(hdev, "err %d", err);
7945
7946	hci_dev_lock(hdev);
7947
7948	if (!err) {
7949		restart_le_actions(hdev);
7950		hci_update_background_scan(hdev);
7951	}
7952
7953	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7954
7955	new_settings(hdev, match.sk);
7956
7957	if (match.sk)
7958		sock_put(match.sk);
7959
7960	hci_dev_unlock(hdev);
7961}
7962
7963void __mgmt_power_off(struct hci_dev *hdev)
7964{
7965	struct cmd_lookup match = { NULL, hdev };
7966	u8 status, zero_cod[] = { 0, 0, 0 };
7967
7968	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7969
7970	/* If the power off is because of hdev unregistration let
7971	 * use the appropriate INVALID_INDEX status. Otherwise use
7972	 * NOT_POWERED. We cover both scenarios here since later in
7973	 * mgmt_index_removed() any hci_conn callbacks will have already
7974	 * been triggered, potentially causing misleading DISCONNECTED
7975	 * status responses.
7976	 */
7977	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7978		status = MGMT_STATUS_INVALID_INDEX;
7979	else
7980		status = MGMT_STATUS_NOT_POWERED;
7981
7982	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7983
7984	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7985		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7986				   zero_cod, sizeof(zero_cod),
7987				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7988		ext_info_changed(hdev, NULL);
7989	}
7990
7991	new_settings(hdev, match.sk);
7992
7993	if (match.sk)
7994		sock_put(match.sk);
7995}
7996
7997void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7998{
7999	struct mgmt_pending_cmd *cmd;
8000	u8 status;
8001
8002	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8003	if (!cmd)
8004		return;
8005
8006	if (err == -ERFKILL)
8007		status = MGMT_STATUS_RFKILLED;
8008	else
8009		status = MGMT_STATUS_FAILED;
8010
8011	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8012
8013	mgmt_pending_remove(cmd);
8014}
8015
8016void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8017		       bool persistent)
8018{
8019	struct mgmt_ev_new_link_key ev;
8020
8021	memset(&ev, 0, sizeof(ev));
8022
8023	ev.store_hint = persistent;
8024	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8025	ev.key.addr.type = BDADDR_BREDR;
8026	ev.key.type = key->type;
8027	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8028	ev.key.pin_len = key->pin_len;
8029
8030	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8031}
8032
8033static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8034{
8035	switch (ltk->type) {
8036	case SMP_LTK:
8037	case SMP_LTK_SLAVE:
8038		if (ltk->authenticated)
8039			return MGMT_LTK_AUTHENTICATED;
8040		return MGMT_LTK_UNAUTHENTICATED;
8041	case SMP_LTK_P256:
8042		if (ltk->authenticated)
8043			return MGMT_LTK_P256_AUTH;
8044		return MGMT_LTK_P256_UNAUTH;
8045	case SMP_LTK_P256_DEBUG:
8046		return MGMT_LTK_P256_DEBUG;
8047	}
8048
8049	return MGMT_LTK_UNAUTHENTICATED;
8050}
8051
8052void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8053{
8054	struct mgmt_ev_new_long_term_key ev;
8055
8056	memset(&ev, 0, sizeof(ev));
8057
8058	/* Devices using resolvable or non-resolvable random addresses
8059	 * without providing an identity resolving key don't require
8060	 * to store long term keys. Their addresses will change the
8061	 * next time around.
8062	 *
8063	 * Only when a remote device provides an identity address
8064	 * make sure the long term key is stored. If the remote
8065	 * identity is known, the long term keys are internally
8066	 * mapped to the identity address. So allow static random
8067	 * and public addresses here.
8068	 */
8069	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8070	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8071		ev.store_hint = 0x00;
8072	else
8073		ev.store_hint = persistent;
8074
8075	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8076	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8077	ev.key.type = mgmt_ltk_type(key);
8078	ev.key.enc_size = key->enc_size;
8079	ev.key.ediv = key->ediv;
8080	ev.key.rand = key->rand;
8081
8082	if (key->type == SMP_LTK)
8083		ev.key.master = 1;
8084
8085	/* Make sure we copy only the significant bytes based on the
8086	 * encryption key size, and set the rest of the value to zeroes.
8087	 */
8088	memcpy(ev.key.val, key->val, key->enc_size);
8089	memset(ev.key.val + key->enc_size, 0,
8090	       sizeof(ev.key.val) - key->enc_size);
8091
8092	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8093}
8094
8095void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8096{
8097	struct mgmt_ev_new_irk ev;
8098
8099	memset(&ev, 0, sizeof(ev));
8100
8101	ev.store_hint = persistent;
8102
8103	bacpy(&ev.rpa, &irk->rpa);
8104	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8105	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8106	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8107
8108	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8109}
8110
8111void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8112		   bool persistent)
8113{
8114	struct mgmt_ev_new_csrk ev;
8115
8116	memset(&ev, 0, sizeof(ev));
8117
8118	/* Devices using resolvable or non-resolvable random addresses
8119	 * without providing an identity resolving key don't require
8120	 * to store signature resolving keys. Their addresses will change
8121	 * the next time around.
8122	 *
8123	 * Only when a remote device provides an identity address
8124	 * make sure the signature resolving key is stored. So allow
8125	 * static random and public addresses here.
8126	 */
8127	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8128	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8129		ev.store_hint = 0x00;
8130	else
8131		ev.store_hint = persistent;
8132
8133	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8134	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8135	ev.key.type = csrk->type;
8136	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8137
8138	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8139}
8140
8141void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8142			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8143			 u16 max_interval, u16 latency, u16 timeout)
8144{
8145	struct mgmt_ev_new_conn_param ev;
8146
8147	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8148		return;
8149
8150	memset(&ev, 0, sizeof(ev));
8151	bacpy(&ev.addr.bdaddr, bdaddr);
8152	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8153	ev.store_hint = store_hint;
8154	ev.min_interval = cpu_to_le16(min_interval);
8155	ev.max_interval = cpu_to_le16(max_interval);
8156	ev.latency = cpu_to_le16(latency);
8157	ev.timeout = cpu_to_le16(timeout);
8158
8159	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8160}
8161
8162void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8163			   u32 flags, u8 *name, u8 name_len)
8164{
8165	char buf[512];
8166	struct mgmt_ev_device_connected *ev = (void *) buf;
8167	u16 eir_len = 0;
8168
8169	bacpy(&ev->addr.bdaddr, &conn->dst);
8170	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8171
8172	ev->flags = __cpu_to_le32(flags);
8173
8174	/* We must ensure that the EIR Data fields are ordered and
8175	 * unique. Keep it simple for now and avoid the problem by not
8176	 * adding any BR/EDR data to the LE adv.
8177	 */
8178	if (conn->le_adv_data_len > 0) {
8179		memcpy(&ev->eir[eir_len],
8180		       conn->le_adv_data, conn->le_adv_data_len);
8181		eir_len = conn->le_adv_data_len;
8182	} else {
8183		if (name_len > 0)
8184			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8185						  name, name_len);
8186
8187		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8188			eir_len = eir_append_data(ev->eir, eir_len,
8189						  EIR_CLASS_OF_DEV,
8190						  conn->dev_class, 3);
8191	}
8192
8193	ev->eir_len = cpu_to_le16(eir_len);
8194
8195	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8196		    sizeof(*ev) + eir_len, NULL);
8197}
8198
8199static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8200{
8201	struct sock **sk = data;
8202
8203	cmd->cmd_complete(cmd, 0);
8204
8205	*sk = cmd->sk;
8206	sock_hold(*sk);
8207
8208	mgmt_pending_remove(cmd);
8209}
8210
8211static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8212{
8213	struct hci_dev *hdev = data;
8214	struct mgmt_cp_unpair_device *cp = cmd->param;
8215
8216	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8217
8218	cmd->cmd_complete(cmd, 0);
8219	mgmt_pending_remove(cmd);
8220}
8221
8222bool mgmt_powering_down(struct hci_dev *hdev)
8223{
8224	struct mgmt_pending_cmd *cmd;
8225	struct mgmt_mode *cp;
8226
8227	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8228	if (!cmd)
8229		return false;
8230
8231	cp = cmd->param;
8232	if (!cp->val)
8233		return true;
8234
8235	return false;
8236}
8237
8238void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8239			      u8 link_type, u8 addr_type, u8 reason,
8240			      bool mgmt_connected)
8241{
8242	struct mgmt_ev_device_disconnected ev;
8243	struct sock *sk = NULL;
8244
8245	/* The connection is still in hci_conn_hash so test for 1
8246	 * instead of 0 to know if this is the last one.
8247	 */
8248	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8249		cancel_delayed_work(&hdev->power_off);
8250		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8251	}
8252
8253	if (!mgmt_connected)
8254		return;
8255
8256	if (link_type != ACL_LINK && link_type != LE_LINK)
8257		return;
8258
8259	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8260
8261	bacpy(&ev.addr.bdaddr, bdaddr);
8262	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8263	ev.reason = reason;
8264
8265	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8266
8267	if (sk)
8268		sock_put(sk);
8269
8270	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8271			     hdev);
8272}
8273
8274void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8275			    u8 link_type, u8 addr_type, u8 status)
8276{
8277	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8278	struct mgmt_cp_disconnect *cp;
8279	struct mgmt_pending_cmd *cmd;
8280
8281	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8282			     hdev);
8283
8284	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8285	if (!cmd)
8286		return;
8287
8288	cp = cmd->param;
8289
8290	if (bacmp(bdaddr, &cp->addr.bdaddr))
8291		return;
8292
8293	if (cp->addr.type != bdaddr_type)
8294		return;
8295
8296	cmd->cmd_complete(cmd, mgmt_status(status));
8297	mgmt_pending_remove(cmd);
8298}
8299
8300void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8301			 u8 addr_type, u8 status)
8302{
8303	struct mgmt_ev_connect_failed ev;
8304
8305	/* The connection is still in hci_conn_hash so test for 1
8306	 * instead of 0 to know if this is the last one.
8307	 */
8308	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8309		cancel_delayed_work(&hdev->power_off);
8310		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8311	}
8312
8313	bacpy(&ev.addr.bdaddr, bdaddr);
8314	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8315	ev.status = mgmt_status(status);
8316
8317	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8318}
8319
8320void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8321{
8322	struct mgmt_ev_pin_code_request ev;
8323
8324	bacpy(&ev.addr.bdaddr, bdaddr);
8325	ev.addr.type = BDADDR_BREDR;
8326	ev.secure = secure;
8327
8328	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8329}
8330
8331void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8332				  u8 status)
8333{
8334	struct mgmt_pending_cmd *cmd;
8335
8336	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8337	if (!cmd)
8338		return;
8339
8340	cmd->cmd_complete(cmd, mgmt_status(status));
8341	mgmt_pending_remove(cmd);
8342}
8343
8344void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8345				      u8 status)
8346{
8347	struct mgmt_pending_cmd *cmd;
8348
8349	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8350	if (!cmd)
8351		return;
8352
8353	cmd->cmd_complete(cmd, mgmt_status(status));
8354	mgmt_pending_remove(cmd);
8355}
8356
8357int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8358			      u8 link_type, u8 addr_type, u32 value,
8359			      u8 confirm_hint)
8360{
8361	struct mgmt_ev_user_confirm_request ev;
8362
8363	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8364
8365	bacpy(&ev.addr.bdaddr, bdaddr);
8366	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8367	ev.confirm_hint = confirm_hint;
8368	ev.value = cpu_to_le32(value);
8369
8370	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8371			  NULL);
8372}
8373
8374int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8375			      u8 link_type, u8 addr_type)
8376{
8377	struct mgmt_ev_user_passkey_request ev;
8378
8379	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8380
8381	bacpy(&ev.addr.bdaddr, bdaddr);
8382	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8383
8384	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8385			  NULL);
8386}
8387
8388static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8389				      u8 link_type, u8 addr_type, u8 status,
8390				      u8 opcode)
8391{
8392	struct mgmt_pending_cmd *cmd;
8393
8394	cmd = pending_find(opcode, hdev);
8395	if (!cmd)
8396		return -ENOENT;
8397
8398	cmd->cmd_complete(cmd, mgmt_status(status));
8399	mgmt_pending_remove(cmd);
8400
8401	return 0;
8402}
8403
8404int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8405				     u8 link_type, u8 addr_type, u8 status)
8406{
8407	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8408					  status, MGMT_OP_USER_CONFIRM_REPLY);
8409}
8410
8411int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8412					 u8 link_type, u8 addr_type, u8 status)
8413{
8414	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8415					  status,
8416					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
8417}
8418
8419int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8420				     u8 link_type, u8 addr_type, u8 status)
8421{
8422	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8423					  status, MGMT_OP_USER_PASSKEY_REPLY);
8424}
8425
8426int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8427					 u8 link_type, u8 addr_type, u8 status)
8428{
8429	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8430					  status,
8431					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
8432}
8433
8434int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8435			     u8 link_type, u8 addr_type, u32 passkey,
8436			     u8 entered)
8437{
8438	struct mgmt_ev_passkey_notify ev;
8439
8440	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8441
8442	bacpy(&ev.addr.bdaddr, bdaddr);
8443	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8444	ev.passkey = __cpu_to_le32(passkey);
8445	ev.entered = entered;
8446
8447	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8448}
8449
8450void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8451{
8452	struct mgmt_ev_auth_failed ev;
8453	struct mgmt_pending_cmd *cmd;
8454	u8 status = mgmt_status(hci_status);
8455
8456	bacpy(&ev.addr.bdaddr, &conn->dst);
8457	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8458	ev.status = status;
8459
8460	cmd = find_pairing(conn);
8461
8462	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8463		    cmd ? cmd->sk : NULL);
8464
8465	if (cmd) {
8466		cmd->cmd_complete(cmd, status);
8467		mgmt_pending_remove(cmd);
8468	}
8469}
8470
8471void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8472{
8473	struct cmd_lookup match = { NULL, hdev };
8474	bool changed;
8475
8476	if (status) {
8477		u8 mgmt_err = mgmt_status(status);
8478		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8479				     cmd_status_rsp, &mgmt_err);
8480		return;
8481	}
8482
8483	if (test_bit(HCI_AUTH, &hdev->flags))
8484		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8485	else
8486		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8487
8488	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8489			     &match);
8490
8491	if (changed)
8492		new_settings(hdev, match.sk);
8493
8494	if (match.sk)
8495		sock_put(match.sk);
8496}
8497
8498static void clear_eir(struct hci_request *req)
8499{
8500	struct hci_dev *hdev = req->hdev;
8501	struct hci_cp_write_eir cp;
8502
8503	if (!lmp_ext_inq_capable(hdev))
8504		return;
8505
8506	memset(hdev->eir, 0, sizeof(hdev->eir));
8507
8508	memset(&cp, 0, sizeof(cp));
8509
8510	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8511}
8512
8513void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8514{
8515	struct cmd_lookup match = { NULL, hdev };
8516	struct hci_request req;
8517	bool changed = false;
8518
8519	if (status) {
8520		u8 mgmt_err = mgmt_status(status);
8521
8522		if (enable && hci_dev_test_and_clear_flag(hdev,
8523							  HCI_SSP_ENABLED)) {
8524			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8525			new_settings(hdev, NULL);
8526		}
8527
8528		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8529				     &mgmt_err);
8530		return;
8531	}
8532
8533	if (enable) {
8534		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8535	} else {
8536		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8537		if (!changed)
8538			changed = hci_dev_test_and_clear_flag(hdev,
8539							      HCI_HS_ENABLED);
8540		else
8541			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8542	}
8543
8544	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8545
8546	if (changed)
8547		new_settings(hdev, match.sk);
8548
8549	if (match.sk)
8550		sock_put(match.sk);
8551
8552	hci_req_init(&req, hdev);
8553
8554	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8555		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8556			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8557				    sizeof(enable), &enable);
8558		__hci_req_update_eir(&req);
8559	} else {
8560		clear_eir(&req);
8561	}
8562
8563	hci_req_run(&req, NULL);
8564}
8565
8566static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8567{
8568	struct cmd_lookup *match = data;
8569
8570	if (match->sk == NULL) {
8571		match->sk = cmd->sk;
8572		sock_hold(match->sk);
8573	}
8574}
8575
8576void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8577				    u8 status)
8578{
8579	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8580
8581	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8582	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8583	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8584
8585	if (!status) {
8586		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8587				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8588		ext_info_changed(hdev, NULL);
8589	}
8590
8591	if (match.sk)
8592		sock_put(match.sk);
8593}
8594
8595void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8596{
8597	struct mgmt_cp_set_local_name ev;
8598	struct mgmt_pending_cmd *cmd;
8599
8600	if (status)
8601		return;
8602
8603	memset(&ev, 0, sizeof(ev));
8604	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8605	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8606
8607	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8608	if (!cmd) {
8609		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8610
8611		/* If this is a HCI command related to powering on the
8612		 * HCI dev don't send any mgmt signals.
8613		 */
8614		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8615			return;
8616	}
8617
8618	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8619			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8620	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8621}
8622
8623static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8624{
8625	int i;
8626
8627	for (i = 0; i < uuid_count; i++) {
8628		if (!memcmp(uuid, uuids[i], 16))
8629			return true;
8630	}
8631
8632	return false;
8633}
8634
8635static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8636{
8637	u16 parsed = 0;
8638
8639	while (parsed < eir_len) {
8640		u8 field_len = eir[0];
8641		u8 uuid[16];
8642		int i;
8643
8644		if (field_len == 0)
8645			break;
8646
8647		if (eir_len - parsed < field_len + 1)
8648			break;
8649
8650		switch (eir[1]) {
8651		case EIR_UUID16_ALL:
8652		case EIR_UUID16_SOME:
8653			for (i = 0; i + 3 <= field_len; i += 2) {
8654				memcpy(uuid, bluetooth_base_uuid, 16);
8655				uuid[13] = eir[i + 3];
8656				uuid[12] = eir[i + 2];
8657				if (has_uuid(uuid, uuid_count, uuids))
8658					return true;
8659			}
8660			break;
8661		case EIR_UUID32_ALL:
8662		case EIR_UUID32_SOME:
8663			for (i = 0; i + 5 <= field_len; i += 4) {
8664				memcpy(uuid, bluetooth_base_uuid, 16);
8665				uuid[15] = eir[i + 5];
8666				uuid[14] = eir[i + 4];
8667				uuid[13] = eir[i + 3];
8668				uuid[12] = eir[i + 2];
8669				if (has_uuid(uuid, uuid_count, uuids))
8670					return true;
8671			}
8672			break;
8673		case EIR_UUID128_ALL:
8674		case EIR_UUID128_SOME:
8675			for (i = 0; i + 17 <= field_len; i += 16) {
8676				memcpy(uuid, eir + i + 2, 16);
8677				if (has_uuid(uuid, uuid_count, uuids))
8678					return true;
8679			}
8680			break;
8681		}
8682
8683		parsed += field_len + 1;
8684		eir += field_len + 1;
8685	}
8686
8687	return false;
8688}
8689
8690static void restart_le_scan(struct hci_dev *hdev)
8691{
8692	/* If controller is not scanning we are done. */
8693	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8694		return;
8695
8696	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8697		       hdev->discovery.scan_start +
8698		       hdev->discovery.scan_duration))
8699		return;
8700
8701	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8702			   DISCOV_LE_RESTART_DELAY);
8703}
8704
8705static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8706			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8707{
8708	/* If a RSSI threshold has been specified, and
8709	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8710	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8711	 * is set, let it through for further processing, as we might need to
8712	 * restart the scan.
8713	 *
8714	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8715	 * the results are also dropped.
8716	 */
8717	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8718	    (rssi == HCI_RSSI_INVALID ||
8719	    (rssi < hdev->discovery.rssi &&
8720	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8721		return  false;
8722
8723	if (hdev->discovery.uuid_count != 0) {
8724		/* If a list of UUIDs is provided in filter, results with no
8725		 * matching UUID should be dropped.
8726		 */
8727		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8728				   hdev->discovery.uuids) &&
8729		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8730				   hdev->discovery.uuid_count,
8731				   hdev->discovery.uuids))
8732			return false;
8733	}
8734
8735	/* If duplicate filtering does not report RSSI changes, then restart
8736	 * scanning to ensure updated result with updated RSSI values.
8737	 */
8738	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8739		restart_le_scan(hdev);
8740
8741		/* Validate RSSI value against the RSSI threshold once more. */
8742		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8743		    rssi < hdev->discovery.rssi)
8744			return false;
8745	}
8746
8747	return true;
8748}
8749
8750void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8751		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8752		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8753{
8754	char buf[512];
8755	struct mgmt_ev_device_found *ev = (void *)buf;
8756	size_t ev_size;
8757
8758	/* Don't send events for a non-kernel initiated discovery. With
8759	 * LE one exception is if we have pend_le_reports > 0 in which
8760	 * case we're doing passive scanning and want these events.
8761	 */
8762	if (!hci_discovery_active(hdev)) {
8763		if (link_type == ACL_LINK)
8764			return;
8765		if (link_type == LE_LINK &&
8766		    list_empty(&hdev->pend_le_reports) &&
8767		    !hci_is_adv_monitoring(hdev)) {
8768			return;
8769		}
8770	}
8771
8772	if (hdev->discovery.result_filtering) {
8773		/* We are using service discovery */
8774		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8775				     scan_rsp_len))
8776			return;
8777	}
8778
8779	if (hdev->discovery.limited) {
8780		/* Check for limited discoverable bit */
8781		if (dev_class) {
8782			if (!(dev_class[1] & 0x20))
8783				return;
8784		} else {
8785			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8786			if (!flags || !(flags[0] & LE_AD_LIMITED))
8787				return;
8788		}
8789	}
8790
8791	/* Make sure that the buffer is big enough. The 5 extra bytes
8792	 * are for the potential CoD field.
8793	 */
8794	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8795		return;
8796
8797	memset(buf, 0, sizeof(buf));
8798
8799	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8800	 * RSSI value was reported as 0 when not available. This behavior
8801	 * is kept when using device discovery. This is required for full
8802	 * backwards compatibility with the API.
8803	 *
8804	 * However when using service discovery, the value 127 will be
8805	 * returned when the RSSI is not available.
8806	 */
8807	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8808	    link_type == ACL_LINK)
8809		rssi = 0;
8810
8811	bacpy(&ev->addr.bdaddr, bdaddr);
8812	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8813	ev->rssi = rssi;
8814	ev->flags = cpu_to_le32(flags);
8815
8816	if (eir_len > 0)
8817		/* Copy EIR or advertising data into event */
8818		memcpy(ev->eir, eir, eir_len);
8819
8820	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8821				       NULL))
8822		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8823					  dev_class, 3);
8824
8825	if (scan_rsp_len > 0)
8826		/* Append scan response data to event */
8827		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8828
8829	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8830	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8831
8832	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8833}
8834
8835void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8836		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8837{
8838	struct mgmt_ev_device_found *ev;
8839	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8840	u16 eir_len;
8841
8842	ev = (struct mgmt_ev_device_found *) buf;
8843
8844	memset(buf, 0, sizeof(buf));
8845
8846	bacpy(&ev->addr.bdaddr, bdaddr);
8847	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8848	ev->rssi = rssi;
8849
8850	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8851				  name_len);
8852
8853	ev->eir_len = cpu_to_le16(eir_len);
8854
8855	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8856}
8857
8858void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8859{
8860	struct mgmt_ev_discovering ev;
8861
8862	bt_dev_dbg(hdev, "discovering %u", discovering);
8863
8864	memset(&ev, 0, sizeof(ev));
8865	ev.type = hdev->discovery.type;
8866	ev.discovering = discovering;
8867
8868	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8869}
8870
8871static struct hci_mgmt_chan chan = {
8872	.channel	= HCI_CHANNEL_CONTROL,
8873	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8874	.handlers	= mgmt_handlers,
8875	.hdev_init	= mgmt_init_hdev,
8876};
8877
8878int mgmt_init(void)
8879{
8880	return hci_mgmt_chan_register(&chan);
8881}
8882
8883void mgmt_exit(void)
8884{
8885	hci_mgmt_chan_unregister(&chan);
8886}