Linux Audio

Check our new training course

Loading...
v6.8
    1/*
    2   BlueZ - Bluetooth protocol stack for Linux
    3
    4   Copyright (C) 2010  Nokia Corporation
    5   Copyright (C) 2011-2012 Intel Corporation
    6
    7   This program is free software; you can redistribute it and/or modify
    8   it under the terms of the GNU General Public License version 2 as
    9   published by the Free Software Foundation;
   10
   11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   19
   20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   22   SOFTWARE IS DISCLAIMED.
   23*/
   24
   25/* Bluetooth HCI Management interface */
   26
 
 
   27#include <linux/module.h>
   28#include <asm/unaligned.h>
   29
   30#include <net/bluetooth/bluetooth.h>
   31#include <net/bluetooth/hci_core.h>
   32#include <net/bluetooth/hci_sock.h>
   33#include <net/bluetooth/l2cap.h>
   34#include <net/bluetooth/mgmt.h>
 
   35
   36#include "hci_request.h"
   37#include "smp.h"
   38#include "mgmt_util.h"
   39#include "mgmt_config.h"
   40#include "msft.h"
   41#include "eir.h"
   42#include "aosp.h"
   43
   44#define MGMT_VERSION	1
   45#define MGMT_REVISION	22
   46
   47static const u16 mgmt_commands[] = {
   48	MGMT_OP_READ_INDEX_LIST,
   49	MGMT_OP_READ_INFO,
   50	MGMT_OP_SET_POWERED,
   51	MGMT_OP_SET_DISCOVERABLE,
   52	MGMT_OP_SET_CONNECTABLE,
   53	MGMT_OP_SET_FAST_CONNECTABLE,
   54	MGMT_OP_SET_BONDABLE,
   55	MGMT_OP_SET_LINK_SECURITY,
   56	MGMT_OP_SET_SSP,
   57	MGMT_OP_SET_HS,
   58	MGMT_OP_SET_LE,
   59	MGMT_OP_SET_DEV_CLASS,
   60	MGMT_OP_SET_LOCAL_NAME,
   61	MGMT_OP_ADD_UUID,
   62	MGMT_OP_REMOVE_UUID,
   63	MGMT_OP_LOAD_LINK_KEYS,
   64	MGMT_OP_LOAD_LONG_TERM_KEYS,
   65	MGMT_OP_DISCONNECT,
   66	MGMT_OP_GET_CONNECTIONS,
   67	MGMT_OP_PIN_CODE_REPLY,
   68	MGMT_OP_PIN_CODE_NEG_REPLY,
   69	MGMT_OP_SET_IO_CAPABILITY,
   70	MGMT_OP_PAIR_DEVICE,
   71	MGMT_OP_CANCEL_PAIR_DEVICE,
   72	MGMT_OP_UNPAIR_DEVICE,
   73	MGMT_OP_USER_CONFIRM_REPLY,
   74	MGMT_OP_USER_CONFIRM_NEG_REPLY,
   75	MGMT_OP_USER_PASSKEY_REPLY,
   76	MGMT_OP_USER_PASSKEY_NEG_REPLY,
   77	MGMT_OP_READ_LOCAL_OOB_DATA,
   78	MGMT_OP_ADD_REMOTE_OOB_DATA,
   79	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
   80	MGMT_OP_START_DISCOVERY,
   81	MGMT_OP_STOP_DISCOVERY,
   82	MGMT_OP_CONFIRM_NAME,
   83	MGMT_OP_BLOCK_DEVICE,
   84	MGMT_OP_UNBLOCK_DEVICE,
   85	MGMT_OP_SET_DEVICE_ID,
   86	MGMT_OP_SET_ADVERTISING,
   87	MGMT_OP_SET_BREDR,
   88	MGMT_OP_SET_STATIC_ADDRESS,
   89	MGMT_OP_SET_SCAN_PARAMS,
   90	MGMT_OP_SET_SECURE_CONN,
   91	MGMT_OP_SET_DEBUG_KEYS,
   92	MGMT_OP_SET_PRIVACY,
   93	MGMT_OP_LOAD_IRKS,
   94	MGMT_OP_GET_CONN_INFO,
   95	MGMT_OP_GET_CLOCK_INFO,
   96	MGMT_OP_ADD_DEVICE,
   97	MGMT_OP_REMOVE_DEVICE,
   98	MGMT_OP_LOAD_CONN_PARAM,
   99	MGMT_OP_READ_UNCONF_INDEX_LIST,
  100	MGMT_OP_READ_CONFIG_INFO,
  101	MGMT_OP_SET_EXTERNAL_CONFIG,
  102	MGMT_OP_SET_PUBLIC_ADDRESS,
  103	MGMT_OP_START_SERVICE_DISCOVERY,
  104	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
  105	MGMT_OP_READ_EXT_INDEX_LIST,
  106	MGMT_OP_READ_ADV_FEATURES,
  107	MGMT_OP_ADD_ADVERTISING,
  108	MGMT_OP_REMOVE_ADVERTISING,
  109	MGMT_OP_GET_ADV_SIZE_INFO,
  110	MGMT_OP_START_LIMITED_DISCOVERY,
  111	MGMT_OP_READ_EXT_INFO,
  112	MGMT_OP_SET_APPEARANCE,
  113	MGMT_OP_GET_PHY_CONFIGURATION,
  114	MGMT_OP_SET_PHY_CONFIGURATION,
  115	MGMT_OP_SET_BLOCKED_KEYS,
  116	MGMT_OP_SET_WIDEBAND_SPEECH,
  117	MGMT_OP_READ_CONTROLLER_CAP,
  118	MGMT_OP_READ_EXP_FEATURES_INFO,
  119	MGMT_OP_SET_EXP_FEATURE,
  120	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
  121	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
  122	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
  123	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
  124	MGMT_OP_GET_DEVICE_FLAGS,
  125	MGMT_OP_SET_DEVICE_FLAGS,
  126	MGMT_OP_READ_ADV_MONITOR_FEATURES,
  127	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
  128	MGMT_OP_REMOVE_ADV_MONITOR,
  129	MGMT_OP_ADD_EXT_ADV_PARAMS,
  130	MGMT_OP_ADD_EXT_ADV_DATA,
  131	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
  132	MGMT_OP_SET_MESH_RECEIVER,
  133	MGMT_OP_MESH_READ_FEATURES,
  134	MGMT_OP_MESH_SEND,
  135	MGMT_OP_MESH_SEND_CANCEL,
  136};
  137
  138static const u16 mgmt_events[] = {
  139	MGMT_EV_CONTROLLER_ERROR,
  140	MGMT_EV_INDEX_ADDED,
  141	MGMT_EV_INDEX_REMOVED,
  142	MGMT_EV_NEW_SETTINGS,
  143	MGMT_EV_CLASS_OF_DEV_CHANGED,
  144	MGMT_EV_LOCAL_NAME_CHANGED,
  145	MGMT_EV_NEW_LINK_KEY,
  146	MGMT_EV_NEW_LONG_TERM_KEY,
  147	MGMT_EV_DEVICE_CONNECTED,
  148	MGMT_EV_DEVICE_DISCONNECTED,
  149	MGMT_EV_CONNECT_FAILED,
  150	MGMT_EV_PIN_CODE_REQUEST,
  151	MGMT_EV_USER_CONFIRM_REQUEST,
  152	MGMT_EV_USER_PASSKEY_REQUEST,
  153	MGMT_EV_AUTH_FAILED,
  154	MGMT_EV_DEVICE_FOUND,
  155	MGMT_EV_DISCOVERING,
  156	MGMT_EV_DEVICE_BLOCKED,
  157	MGMT_EV_DEVICE_UNBLOCKED,
  158	MGMT_EV_DEVICE_UNPAIRED,
  159	MGMT_EV_PASSKEY_NOTIFY,
  160	MGMT_EV_NEW_IRK,
  161	MGMT_EV_NEW_CSRK,
  162	MGMT_EV_DEVICE_ADDED,
  163	MGMT_EV_DEVICE_REMOVED,
  164	MGMT_EV_NEW_CONN_PARAM,
  165	MGMT_EV_UNCONF_INDEX_ADDED,
  166	MGMT_EV_UNCONF_INDEX_REMOVED,
  167	MGMT_EV_NEW_CONFIG_OPTIONS,
  168	MGMT_EV_EXT_INDEX_ADDED,
  169	MGMT_EV_EXT_INDEX_REMOVED,
  170	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
  171	MGMT_EV_ADVERTISING_ADDED,
  172	MGMT_EV_ADVERTISING_REMOVED,
  173	MGMT_EV_EXT_INFO_CHANGED,
  174	MGMT_EV_PHY_CONFIGURATION_CHANGED,
  175	MGMT_EV_EXP_FEATURE_CHANGED,
  176	MGMT_EV_DEVICE_FLAGS_CHANGED,
  177	MGMT_EV_ADV_MONITOR_ADDED,
  178	MGMT_EV_ADV_MONITOR_REMOVED,
  179	MGMT_EV_CONTROLLER_SUSPEND,
  180	MGMT_EV_CONTROLLER_RESUME,
  181	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
  182	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
  183};
  184
  185static const u16 mgmt_untrusted_commands[] = {
  186	MGMT_OP_READ_INDEX_LIST,
  187	MGMT_OP_READ_INFO,
  188	MGMT_OP_READ_UNCONF_INDEX_LIST,
  189	MGMT_OP_READ_CONFIG_INFO,
  190	MGMT_OP_READ_EXT_INDEX_LIST,
  191	MGMT_OP_READ_EXT_INFO,
  192	MGMT_OP_READ_CONTROLLER_CAP,
  193	MGMT_OP_READ_EXP_FEATURES_INFO,
  194	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
  195	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
  196};
  197
  198static const u16 mgmt_untrusted_events[] = {
  199	MGMT_EV_INDEX_ADDED,
  200	MGMT_EV_INDEX_REMOVED,
  201	MGMT_EV_NEW_SETTINGS,
  202	MGMT_EV_CLASS_OF_DEV_CHANGED,
  203	MGMT_EV_LOCAL_NAME_CHANGED,
  204	MGMT_EV_UNCONF_INDEX_ADDED,
  205	MGMT_EV_UNCONF_INDEX_REMOVED,
  206	MGMT_EV_NEW_CONFIG_OPTIONS,
  207	MGMT_EV_EXT_INDEX_ADDED,
  208	MGMT_EV_EXT_INDEX_REMOVED,
  209	MGMT_EV_EXT_INFO_CHANGED,
  210	MGMT_EV_EXP_FEATURE_CHANGED,
  211};
  212
  213#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
  214
  215#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
  216		 "\x00\x00\x00\x00\x00\x00\x00\x00"
 
 
 
 
 
 
 
 
 
  217
  218/* HCI to MGMT error code conversion table */
  219static const u8 mgmt_status_table[] = {
  220	MGMT_STATUS_SUCCESS,
  221	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
  222	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
  223	MGMT_STATUS_FAILED,		/* Hardware Failure */
  224	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
  225	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
  226	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
  227	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
  228	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
  229	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
  230	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
  231	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
  232	MGMT_STATUS_BUSY,		/* Command Disallowed */
  233	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
  234	MGMT_STATUS_REJECTED,		/* Rejected Security */
  235	MGMT_STATUS_REJECTED,		/* Rejected Personal */
  236	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
  237	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
  238	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
  239	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
  240	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
  241	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
  242	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
  243	MGMT_STATUS_BUSY,		/* Repeated Attempts */
  244	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
  245	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
  246	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
  247	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
  248	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
  249	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
  250	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
  251	MGMT_STATUS_FAILED,		/* Unspecified Error */
  252	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
  253	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
  254	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
  255	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
  256	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
  257	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
  258	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
  259	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
  260	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
  261	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
  262	MGMT_STATUS_FAILED,		/* Transaction Collision */
  263	MGMT_STATUS_FAILED,		/* Reserved for future use */
  264	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
  265	MGMT_STATUS_REJECTED,		/* QoS Rejected */
  266	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
  267	MGMT_STATUS_REJECTED,		/* Insufficient Security */
  268	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
  269	MGMT_STATUS_FAILED,		/* Reserved for future use */
  270	MGMT_STATUS_BUSY,		/* Role Switch Pending */
  271	MGMT_STATUS_FAILED,		/* Reserved for future use */
  272	MGMT_STATUS_FAILED,		/* Slot Violation */
  273	MGMT_STATUS_FAILED,		/* Role Switch Failed */
  274	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
  275	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
  276	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
  277	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
  278	MGMT_STATUS_BUSY,		/* Controller Busy */
  279	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
  280	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
  281	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
  282	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
  283	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
  284};
  285
  286static u8 mgmt_errno_status(int err)
  287{
  288	switch (err) {
  289	case 0:
  290		return MGMT_STATUS_SUCCESS;
  291	case -EPERM:
  292		return MGMT_STATUS_REJECTED;
  293	case -EINVAL:
  294		return MGMT_STATUS_INVALID_PARAMS;
  295	case -EOPNOTSUPP:
  296		return MGMT_STATUS_NOT_SUPPORTED;
  297	case -EBUSY:
  298		return MGMT_STATUS_BUSY;
  299	case -ETIMEDOUT:
  300		return MGMT_STATUS_AUTH_FAILED;
  301	case -ENOMEM:
  302		return MGMT_STATUS_NO_RESOURCES;
  303	case -EISCONN:
  304		return MGMT_STATUS_ALREADY_CONNECTED;
  305	case -ENOTCONN:
  306		return MGMT_STATUS_DISCONNECTED;
  307	}
  308
  309	return MGMT_STATUS_FAILED;
  310}
  311
  312static u8 mgmt_status(int err)
  313{
  314	if (err < 0)
  315		return mgmt_errno_status(err);
 
 
  316
  317	if (err < ARRAY_SIZE(mgmt_status_table))
  318		return mgmt_status_table[err];
  319
  320	return MGMT_STATUS_FAILED;
  321}
 
  322
  323static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
  324			    u16 len, int flag)
  325{
  326	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
  327			       flag, NULL);
  328}
  329
  330static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
  331			      u16 len, int flag, struct sock *skip_sk)
  332{
  333	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
  334			       flag, skip_sk);
  335}
  336
  337static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
  338		      struct sock *skip_sk)
  339{
  340	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
  341			       HCI_SOCK_TRUSTED, skip_sk);
  342}
  343
  344static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
  345{
  346	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
  347				   skip_sk);
  348}
  349
  350static u8 le_addr_type(u8 mgmt_addr_type)
  351{
  352	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
  353		return ADDR_LE_DEV_PUBLIC;
  354	else
  355		return ADDR_LE_DEV_RANDOM;
  356}
  357
  358void mgmt_fill_version_info(void *ver)
 
  359{
  360	struct mgmt_rp_read_version *rp = ver;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  361
  362	rp->version = MGMT_VERSION;
  363	rp->revision = cpu_to_le16(MGMT_REVISION);
 
 
 
 
 
 
  364}
  365
  366static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
  367			u16 data_len)
  368{
  369	struct mgmt_rp_read_version rp;
  370
  371	bt_dev_dbg(hdev, "sock %p", sk);
  372
  373	mgmt_fill_version_info(&rp);
 
  374
  375	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
  376				 &rp, sizeof(rp));
  377}
  378
  379static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
  380			 u16 data_len)
  381{
  382	struct mgmt_rp_read_commands *rp;
  383	u16 num_commands, num_events;
 
 
  384	size_t rp_size;
  385	int i, err;
  386
  387	bt_dev_dbg(hdev, "sock %p", sk);
  388
  389	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
  390		num_commands = ARRAY_SIZE(mgmt_commands);
  391		num_events = ARRAY_SIZE(mgmt_events);
  392	} else {
  393		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
  394		num_events = ARRAY_SIZE(mgmt_untrusted_events);
  395	}
  396
  397	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
  398
  399	rp = kmalloc(rp_size, GFP_KERNEL);
  400	if (!rp)
  401		return -ENOMEM;
  402
  403	rp->num_commands = cpu_to_le16(num_commands);
  404	rp->num_events = cpu_to_le16(num_events);
  405
  406	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
  407		__le16 *opcode = rp->opcodes;
  408
  409		for (i = 0; i < num_commands; i++, opcode++)
  410			put_unaligned_le16(mgmt_commands[i], opcode);
  411
  412		for (i = 0; i < num_events; i++, opcode++)
  413			put_unaligned_le16(mgmt_events[i], opcode);
  414	} else {
  415		__le16 *opcode = rp->opcodes;
  416
  417		for (i = 0; i < num_commands; i++, opcode++)
  418			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
  419
  420		for (i = 0; i < num_events; i++, opcode++)
  421			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
  422	}
  423
  424	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
  425				rp, rp_size);
  426	kfree(rp);
  427
  428	return err;
  429}
  430
  431static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
  432			   u16 data_len)
  433{
  434	struct mgmt_rp_read_index_list *rp;
 
  435	struct hci_dev *d;
  436	size_t rp_len;
  437	u16 count;
  438	int err;
  439
  440	bt_dev_dbg(hdev, "sock %p", sk);
  441
  442	read_lock(&hci_dev_list_lock);
  443
  444	count = 0;
  445	list_for_each_entry(d, &hci_dev_list, list) {
  446		if (d->dev_type == HCI_PRIMARY &&
  447		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
  448			count++;
  449	}
  450
  451	rp_len = sizeof(*rp) + (2 * count);
  452	rp = kmalloc(rp_len, GFP_ATOMIC);
  453	if (!rp) {
  454		read_unlock(&hci_dev_list_lock);
  455		return -ENOMEM;
  456	}
  457
  458	count = 0;
  459	list_for_each_entry(d, &hci_dev_list, list) {
  460		if (hci_dev_test_flag(d, HCI_SETUP) ||
  461		    hci_dev_test_flag(d, HCI_CONFIG) ||
  462		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
  463			continue;
  464
  465		/* Devices marked as raw-only are neither configured
  466		 * nor unconfigured controllers.
  467		 */
  468		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
  469			continue;
  470
  471		if (d->dev_type == HCI_PRIMARY &&
  472		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
  473			rp->index[count++] = cpu_to_le16(d->id);
  474			bt_dev_dbg(hdev, "Added hci%u", d->id);
  475		}
  476	}
  477
  478	rp->num_controllers = cpu_to_le16(count);
  479	rp_len = sizeof(*rp) + (2 * count);
  480
  481	read_unlock(&hci_dev_list_lock);
  482
  483	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
  484				0, rp, rp_len);
  485
  486	kfree(rp);
  487
  488	return err;
  489}
  490
  491static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
  492				  void *data, u16 data_len)
  493{
  494	struct mgmt_rp_read_unconf_index_list *rp;
  495	struct hci_dev *d;
  496	size_t rp_len;
  497	u16 count;
  498	int err;
  499
  500	bt_dev_dbg(hdev, "sock %p", sk);
  501
  502	read_lock(&hci_dev_list_lock);
  503
  504	count = 0;
  505	list_for_each_entry(d, &hci_dev_list, list) {
  506		if (d->dev_type == HCI_PRIMARY &&
  507		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
  508			count++;
  509	}
  510
  511	rp_len = sizeof(*rp) + (2 * count);
  512	rp = kmalloc(rp_len, GFP_ATOMIC);
  513	if (!rp) {
  514		read_unlock(&hci_dev_list_lock);
  515		return -ENOMEM;
  516	}
  517
  518	count = 0;
  519	list_for_each_entry(d, &hci_dev_list, list) {
  520		if (hci_dev_test_flag(d, HCI_SETUP) ||
  521		    hci_dev_test_flag(d, HCI_CONFIG) ||
  522		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
  523			continue;
  524
  525		/* Devices marked as raw-only are neither configured
  526		 * nor unconfigured controllers.
  527		 */
  528		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
  529			continue;
  530
  531		if (d->dev_type == HCI_PRIMARY &&
  532		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
  533			rp->index[count++] = cpu_to_le16(d->id);
  534			bt_dev_dbg(hdev, "Added hci%u", d->id);
  535		}
  536	}
  537
  538	rp->num_controllers = cpu_to_le16(count);
  539	rp_len = sizeof(*rp) + (2 * count);
  540
  541	read_unlock(&hci_dev_list_lock);
  542
  543	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
  544				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
  545
  546	kfree(rp);
  547
  548	return err;
  549}
  550
  551static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
  552			       void *data, u16 data_len)
  553{
  554	struct mgmt_rp_read_ext_index_list *rp;
  555	struct hci_dev *d;
  556	u16 count;
  557	int err;
  558
  559	bt_dev_dbg(hdev, "sock %p", sk);
  560
  561	read_lock(&hci_dev_list_lock);
  562
  563	count = 0;
  564	list_for_each_entry(d, &hci_dev_list, list) {
  565		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
  566			count++;
  567	}
  568
  569	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
  570	if (!rp) {
  571		read_unlock(&hci_dev_list_lock);
  572		return -ENOMEM;
  573	}
  574
  575	count = 0;
  576	list_for_each_entry(d, &hci_dev_list, list) {
  577		if (hci_dev_test_flag(d, HCI_SETUP) ||
  578		    hci_dev_test_flag(d, HCI_CONFIG) ||
  579		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
  580			continue;
  581
  582		/* Devices marked as raw-only are neither configured
  583		 * nor unconfigured controllers.
  584		 */
  585		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
  586			continue;
  587
  588		if (d->dev_type == HCI_PRIMARY) {
  589			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
  590				rp->entry[count].type = 0x01;
  591			else
  592				rp->entry[count].type = 0x00;
  593		} else if (d->dev_type == HCI_AMP) {
  594			rp->entry[count].type = 0x02;
  595		} else {
  596			continue;
  597		}
  598
  599		rp->entry[count].bus = d->bus;
  600		rp->entry[count++].index = cpu_to_le16(d->id);
  601		bt_dev_dbg(hdev, "Added hci%u", d->id);
  602	}
  603
  604	rp->num_controllers = cpu_to_le16(count);
  605
  606	read_unlock(&hci_dev_list_lock);
  607
  608	/* If this command is called at least once, then all the
  609	 * default index and unconfigured index events are disabled
  610	 * and from now on only extended index events are used.
  611	 */
  612	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
  613	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
  614	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
  615
  616	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
  617				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
  618				struct_size(rp, entry, count));
  619
  620	kfree(rp);
  621
  622	return err;
  623}
  624
  625static bool is_configured(struct hci_dev *hdev)
  626{
  627	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
  628	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
  629		return false;
  630
  631	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
  632	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
  633	    !bacmp(&hdev->public_addr, BDADDR_ANY))
  634		return false;
  635
  636	return true;
  637}
  638
  639static __le32 get_missing_options(struct hci_dev *hdev)
  640{
  641	u32 options = 0;
  642
  643	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
  644	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
  645		options |= MGMT_OPTION_EXTERNAL_CONFIG;
  646
  647	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
  648	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
  649	    !bacmp(&hdev->public_addr, BDADDR_ANY))
  650		options |= MGMT_OPTION_PUBLIC_ADDRESS;
  651
  652	return cpu_to_le32(options);
  653}
  654
  655static int new_options(struct hci_dev *hdev, struct sock *skip)
  656{
  657	__le32 options = get_missing_options(hdev);
  658
  659	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
  660				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
  661}
  662
  663static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
  664{
  665	__le32 options = get_missing_options(hdev);
  666
  667	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
  668				 sizeof(options));
  669}
  670
  671static int read_config_info(struct sock *sk, struct hci_dev *hdev,
  672			    void *data, u16 data_len)
  673{
  674	struct mgmt_rp_read_config_info rp;
  675	u32 options = 0;
  676
  677	bt_dev_dbg(hdev, "sock %p", sk);
  678
  679	hci_dev_lock(hdev);
  680
  681	memset(&rp, 0, sizeof(rp));
  682	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
  683
  684	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
  685		options |= MGMT_OPTION_EXTERNAL_CONFIG;
  686
  687	if (hdev->set_bdaddr)
  688		options |= MGMT_OPTION_PUBLIC_ADDRESS;
  689
  690	rp.supported_options = cpu_to_le32(options);
  691	rp.missing_options = get_missing_options(hdev);
  692
  693	hci_dev_unlock(hdev);
  694
  695	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
  696				 &rp, sizeof(rp));
  697}
  698
  699static u32 get_supported_phys(struct hci_dev *hdev)
  700{
  701	u32 supported_phys = 0;
  702
  703	if (lmp_bredr_capable(hdev)) {
  704		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
  705
  706		if (hdev->features[0][0] & LMP_3SLOT)
  707			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
  708
  709		if (hdev->features[0][0] & LMP_5SLOT)
  710			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
  711
  712		if (lmp_edr_2m_capable(hdev)) {
  713			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
  714
  715			if (lmp_edr_3slot_capable(hdev))
  716				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
  717
  718			if (lmp_edr_5slot_capable(hdev))
  719				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
  720
  721			if (lmp_edr_3m_capable(hdev)) {
  722				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
  723
  724				if (lmp_edr_3slot_capable(hdev))
  725					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
  726
  727				if (lmp_edr_5slot_capable(hdev))
  728					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
  729			}
  730		}
  731	}
  732
  733	if (lmp_le_capable(hdev)) {
  734		supported_phys |= MGMT_PHY_LE_1M_TX;
  735		supported_phys |= MGMT_PHY_LE_1M_RX;
  736
  737		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
  738			supported_phys |= MGMT_PHY_LE_2M_TX;
  739			supported_phys |= MGMT_PHY_LE_2M_RX;
  740		}
  741
  742		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
  743			supported_phys |= MGMT_PHY_LE_CODED_TX;
  744			supported_phys |= MGMT_PHY_LE_CODED_RX;
  745		}
  746	}
  747
  748	return supported_phys;
  749}
  750
  751static u32 get_selected_phys(struct hci_dev *hdev)
  752{
  753	u32 selected_phys = 0;
  754
  755	if (lmp_bredr_capable(hdev)) {
  756		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
  757
  758		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
  759			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
  760
  761		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
  762			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
  763
  764		if (lmp_edr_2m_capable(hdev)) {
  765			if (!(hdev->pkt_type & HCI_2DH1))
  766				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
  767
  768			if (lmp_edr_3slot_capable(hdev) &&
  769			    !(hdev->pkt_type & HCI_2DH3))
  770				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
  771
  772			if (lmp_edr_5slot_capable(hdev) &&
  773			    !(hdev->pkt_type & HCI_2DH5))
  774				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
  775
  776			if (lmp_edr_3m_capable(hdev)) {
  777				if (!(hdev->pkt_type & HCI_3DH1))
  778					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
  779
  780				if (lmp_edr_3slot_capable(hdev) &&
  781				    !(hdev->pkt_type & HCI_3DH3))
  782					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
  783
  784				if (lmp_edr_5slot_capable(hdev) &&
  785				    !(hdev->pkt_type & HCI_3DH5))
  786					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
  787			}
  788		}
  789	}
  790
  791	if (lmp_le_capable(hdev)) {
  792		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
  793			selected_phys |= MGMT_PHY_LE_1M_TX;
  794
  795		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
  796			selected_phys |= MGMT_PHY_LE_1M_RX;
  797
  798		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
  799			selected_phys |= MGMT_PHY_LE_2M_TX;
  800
  801		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
  802			selected_phys |= MGMT_PHY_LE_2M_RX;
  803
  804		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
  805			selected_phys |= MGMT_PHY_LE_CODED_TX;
  806
  807		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
  808			selected_phys |= MGMT_PHY_LE_CODED_RX;
  809	}
  810
  811	return selected_phys;
  812}
  813
  814static u32 get_configurable_phys(struct hci_dev *hdev)
  815{
  816	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
  817		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
  818}
  819
  820static u32 get_supported_settings(struct hci_dev *hdev)
  821{
  822	u32 settings = 0;
  823
  824	settings |= MGMT_SETTING_POWERED;
  825	settings |= MGMT_SETTING_BONDABLE;
  826	settings |= MGMT_SETTING_DEBUG_KEYS;
  827	settings |= MGMT_SETTING_CONNECTABLE;
 
  828	settings |= MGMT_SETTING_DISCOVERABLE;
 
  829
  830	if (lmp_bredr_capable(hdev)) {
  831		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
  832			settings |= MGMT_SETTING_FAST_CONNECTABLE;
 
  833		settings |= MGMT_SETTING_BREDR;
  834		settings |= MGMT_SETTING_LINK_SECURITY;
  835
  836		if (lmp_ssp_capable(hdev)) {
  837			settings |= MGMT_SETTING_SSP;
  838			if (IS_ENABLED(CONFIG_BT_HS))
  839				settings |= MGMT_SETTING_HS;
  840		}
  841
  842		if (lmp_sc_capable(hdev))
  843			settings |= MGMT_SETTING_SECURE_CONN;
  844
  845		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
  846			     &hdev->quirks))
  847			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
  848	}
  849
  850	if (lmp_le_capable(hdev)) {
  851		settings |= MGMT_SETTING_LE;
  852		settings |= MGMT_SETTING_SECURE_CONN;
  853		settings |= MGMT_SETTING_PRIVACY;
  854		settings |= MGMT_SETTING_STATIC_ADDRESS;
  855		settings |= MGMT_SETTING_ADVERTISING;
  856	}
  857
  858	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
  859	    hdev->set_bdaddr)
  860		settings |= MGMT_SETTING_CONFIGURATION;
  861
  862	if (cis_central_capable(hdev))
  863		settings |= MGMT_SETTING_CIS_CENTRAL;
  864
  865	if (cis_peripheral_capable(hdev))
  866		settings |= MGMT_SETTING_CIS_PERIPHERAL;
  867
  868	settings |= MGMT_SETTING_PHY_CONFIGURATION;
 
  869
  870	return settings;
  871}
  872
  873static u32 get_current_settings(struct hci_dev *hdev)
  874{
  875	u32 settings = 0;
  876
  877	if (hdev_is_powered(hdev))
  878		settings |= MGMT_SETTING_POWERED;
  879
  880	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
  881		settings |= MGMT_SETTING_CONNECTABLE;
  882
  883	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
  884		settings |= MGMT_SETTING_FAST_CONNECTABLE;
  885
  886	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
  887		settings |= MGMT_SETTING_DISCOVERABLE;
  888
  889	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
  890		settings |= MGMT_SETTING_BONDABLE;
  891
  892	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
  893		settings |= MGMT_SETTING_BREDR;
  894
  895	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
  896		settings |= MGMT_SETTING_LE;
  897
  898	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
  899		settings |= MGMT_SETTING_LINK_SECURITY;
  900
  901	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
  902		settings |= MGMT_SETTING_SSP;
  903
  904	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
  905		settings |= MGMT_SETTING_HS;
  906
  907	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
  908		settings |= MGMT_SETTING_ADVERTISING;
  909
  910	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
  911		settings |= MGMT_SETTING_SECURE_CONN;
  912
  913	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
  914		settings |= MGMT_SETTING_DEBUG_KEYS;
 
 
  915
  916	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
  917		settings |= MGMT_SETTING_PRIVACY;
  918
  919	/* The current setting for static address has two purposes. The
  920	 * first is to indicate if the static address will be used and
  921	 * the second is to indicate if it is actually set.
  922	 *
  923	 * This means if the static address is not configured, this flag
  924	 * will never be set. If the address is configured, then if the
  925	 * address is actually used decides if the flag is set or not.
  926	 *
  927	 * For single mode LE only controllers and dual-mode controllers
  928	 * with BR/EDR disabled, the existence of the static address will
  929	 * be evaluated.
  930	 */
  931	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
  932	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
  933	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
  934		if (bacmp(&hdev->static_addr, BDADDR_ANY))
  935			settings |= MGMT_SETTING_STATIC_ADDRESS;
  936	}
  937
  938	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
  939		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
 
 
  940
  941	if (cis_central_capable(hdev))
  942		settings |= MGMT_SETTING_CIS_CENTRAL;
 
  943
  944	if (cis_peripheral_capable(hdev))
  945		settings |= MGMT_SETTING_CIS_PERIPHERAL;
  946
  947	if (bis_capable(hdev))
  948		settings |= MGMT_SETTING_ISO_BROADCASTER;
 
 
 
 
 
 
  949
  950	if (sync_recv_capable(hdev))
  951		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
  952
  953	return settings;
  954}
 
 
 
 
 
  955
  956static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
  957{
  958	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
  959}
  960
  961u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
  962{
  963	struct mgmt_pending_cmd *cmd;
  964
  965	/* If there's a pending mgmt command the flags will not yet have
  966	 * their final values, so check for this first.
  967	 */
  968	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
  969	if (cmd) {
  970		struct mgmt_mode *cp = cmd->param;
  971		if (cp->val == 0x01)
  972			return LE_AD_GENERAL;
  973		else if (cp->val == 0x02)
  974			return LE_AD_LIMITED;
  975	} else {
  976		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
  977			return LE_AD_LIMITED;
  978		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
  979			return LE_AD_GENERAL;
  980	}
  981
  982	return 0;
  983}
 
 
  984
  985bool mgmt_get_connectable(struct hci_dev *hdev)
  986{
  987	struct mgmt_pending_cmd *cmd;
  988
  989	/* If there's a pending mgmt command the flag will not yet have
  990	 * it's final value, so check for this first.
  991	 */
  992	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
  993	if (cmd) {
  994		struct mgmt_mode *cp = cmd->param;
 
 
  995
  996		return cp->val;
 
  997	}
  998
  999	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
 1000}
 1001
 1002static int service_cache_sync(struct hci_dev *hdev, void *data)
 1003{
 1004	hci_update_eir_sync(hdev);
 1005	hci_update_class_sync(hdev);
 1006
 1007	return 0;
 1008}
 
 1009
 1010static void service_cache_off(struct work_struct *work)
 1011{
 1012	struct hci_dev *hdev = container_of(work, struct hci_dev,
 1013					    service_cache.work);
 1014
 1015	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
 1016		return;
 1017
 1018	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
 1019}
 
 
 
 1020
 1021static int rpa_expired_sync(struct hci_dev *hdev, void *data)
 1022{
 1023	/* The generation of a new RPA and programming it into the
 1024	 * controller happens in the hci_req_enable_advertising()
 1025	 * function.
 1026	 */
 1027	if (ext_adv_capable(hdev))
 1028		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
 1029	else
 1030		return hci_enable_advertising_sync(hdev);
 1031}
 1032
 1033static void rpa_expired(struct work_struct *work)
 1034{
 1035	struct hci_dev *hdev = container_of(work, struct hci_dev,
 1036					    rpa_expired.work);
 
 1037
 1038	bt_dev_dbg(hdev, "");
 
 1039
 1040	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 
 1041
 1042	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
 1043		return;
 1044
 1045	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
 1046}
 
 
 1047
 1048static int set_discoverable_sync(struct hci_dev *hdev, void *data);
 
 
 
 1049
 1050static void discov_off(struct work_struct *work)
 1051{
 1052	struct hci_dev *hdev = container_of(work, struct hci_dev,
 1053					    discov_off.work);
 1054
 1055	bt_dev_dbg(hdev, "");
 
 1056
 1057	hci_dev_lock(hdev);
 
 1058
 1059	/* When discoverable timeout triggers, then just make sure
 1060	 * the limited discoverable flag is cleared. Even in the case
 1061	 * of a timeout triggered from general discoverable, it is
 1062	 * safe to unconditionally clear the flag.
 1063	 */
 1064	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
 1065	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
 1066	hdev->discov_timeout = 0;
 1067
 1068	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
 
 1069
 1070	mgmt_new_settings(hdev);
 1071
 1072	hci_dev_unlock(hdev);
 1073}
 
 
 
 
 1074
 1075static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
 
 1076
 1077static void mesh_send_complete(struct hci_dev *hdev,
 1078			       struct mgmt_mesh_tx *mesh_tx, bool silent)
 1079{
 1080	u8 handle = mesh_tx->handle;
 
 1081
 1082	if (!silent)
 1083		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
 1084			   sizeof(handle), NULL);
 1085
 1086	mgmt_mesh_remove(mesh_tx);
 1087}
 1088
 1089static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
 1090{
 1091	struct mgmt_mesh_tx *mesh_tx;
 
 1092
 1093	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
 1094	hci_disable_advertising_sync(hdev);
 1095	mesh_tx = mgmt_mesh_next(hdev, NULL);
 1096
 1097	if (mesh_tx)
 1098		mesh_send_complete(hdev, mesh_tx, false);
 1099
 1100	return 0;
 1101}
 1102
 1103static int mesh_send_sync(struct hci_dev *hdev, void *data);
 1104static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
 1105static void mesh_next(struct hci_dev *hdev, void *data, int err)
 1106{
 1107	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
 1108
 1109	if (!mesh_tx)
 1110		return;
 1111
 1112	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
 1113				 mesh_send_start_complete);
 
 1114
 1115	if (err < 0)
 1116		mesh_send_complete(hdev, mesh_tx, false);
 1117	else
 1118		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
 1119}
 1120
 1121static void mesh_send_done(struct work_struct *work)
 1122{
 1123	struct hci_dev *hdev = container_of(work, struct hci_dev,
 1124					    mesh_send_done.work);
 1125
 1126	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
 1127		return;
 1128
 1129	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
 
 
 
 
 
 1130}
 1131
 1132static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
 1133{
 1134	if (hci_dev_test_flag(hdev, HCI_MGMT))
 1135		return;
 1136
 1137	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
 1138
 1139	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
 1140	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
 1141	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
 1142	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
 1143
 1144	/* Non-mgmt controlled devices get this bit set
 1145	 * implicitly so that pairing works for them, however
 1146	 * for mgmt we require user-space to explicitly enable
 1147	 * it
 1148	 */
 1149	hci_dev_clear_flag(hdev, HCI_BONDABLE);
 1150
 1151	hci_dev_set_flag(hdev, HCI_MGMT);
 1152}
 1153
 1154static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
 1155				void *data, u16 data_len)
 1156{
 1157	struct mgmt_rp_read_info rp;
 1158
 1159	bt_dev_dbg(hdev, "sock %p", sk);
 1160
 1161	hci_dev_lock(hdev);
 1162
 1163	memset(&rp, 0, sizeof(rp));
 1164
 1165	bacpy(&rp.bdaddr, &hdev->bdaddr);
 1166
 1167	rp.version = hdev->hci_ver;
 1168	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
 1169
 1170	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
 1171	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
 1172
 1173	memcpy(rp.dev_class, hdev->dev_class, 3);
 1174
 1175	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
 1176	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
 1177
 1178	hci_dev_unlock(hdev);
 1179
 1180	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
 1181				 sizeof(rp));
 1182}
 1183
 1184static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
 1185{
 1186	u16 eir_len = 0;
 1187	size_t name_len;
 1188
 1189	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 1190		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
 1191					  hdev->dev_class, 3);
 1192
 1193	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 1194		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
 1195					  hdev->appearance);
 1196
 1197	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
 1198	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
 1199				  hdev->dev_name, name_len);
 1200
 1201	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
 1202	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
 1203				  hdev->short_name, name_len);
 1204
 1205	return eir_len;
 1206}
 1207
 1208static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
 1209				    void *data, u16 data_len)
 
 1210{
 1211	char buf[512];
 1212	struct mgmt_rp_read_ext_info *rp = (void *)buf;
 1213	u16 eir_len;
 1214
 1215	bt_dev_dbg(hdev, "sock %p", sk);
 1216
 1217	memset(&buf, 0, sizeof(buf));
 1218
 1219	hci_dev_lock(hdev);
 1220
 1221	bacpy(&rp->bdaddr, &hdev->bdaddr);
 1222
 1223	rp->version = hdev->hci_ver;
 1224	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
 1225
 1226	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
 1227	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
 1228
 1229
 1230	eir_len = append_eir_data_to_buf(hdev, rp->eir);
 1231	rp->eir_len = cpu_to_le16(eir_len);
 1232
 1233	hci_dev_unlock(hdev);
 1234
 1235	/* If this command is called at least once, then the events
 1236	 * for class of device and local name changes are disabled
 1237	 * and only the new extended controller information event
 1238	 * is used.
 1239	 */
 1240	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
 1241	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
 1242	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
 1243
 1244	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
 1245				 sizeof(*rp) + eir_len);
 1246}
 1247
 1248static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
 1249{
 1250	char buf[512];
 1251	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
 1252	u16 eir_len;
 1253
 1254	memset(buf, 0, sizeof(buf));
 
 1255
 1256	eir_len = append_eir_data_to_buf(hdev, ev->eir);
 1257	ev->eir_len = cpu_to_le16(eir_len);
 1258
 1259	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
 1260				  sizeof(*ev) + eir_len,
 1261				  HCI_MGMT_EXT_INFO_EVENTS, skip);
 1262}
 1263
 1264static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
 1265{
 1266	__le32 settings = cpu_to_le32(get_current_settings(hdev));
 1267
 1268	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
 1269				 sizeof(settings));
 1270}
 1271
 1272void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
 
 
 1273{
 1274	struct mgmt_ev_advertising_added ev;
 1275
 1276	ev.instance = instance;
 1277
 1278	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
 1279}
 1280
 1281void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
 1282			      u8 instance)
 1283{
 1284	struct mgmt_ev_advertising_removed ev;
 1285
 1286	ev.instance = instance;
 1287
 1288	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
 1289}
 1290
 1291static void cancel_adv_timeout(struct hci_dev *hdev)
 1292{
 1293	if (hdev->adv_instance_timeout) {
 1294		hdev->adv_instance_timeout = 0;
 1295		cancel_delayed_work(&hdev->adv_instance_expire);
 1296	}
 1297}
 1298
 1299/* This function requires the caller holds hdev->lock */
 1300static void restart_le_actions(struct hci_dev *hdev)
 1301{
 1302	struct hci_conn_params *p;
 1303
 1304	list_for_each_entry(p, &hdev->le_conn_params, list) {
 1305		/* Needed for AUTO_OFF case where might not "really"
 1306		 * have been powered off.
 1307		 */
 1308		hci_pend_le_list_del_init(p);
 1309
 1310		switch (p->auto_connect) {
 1311		case HCI_AUTO_CONN_DIRECT:
 1312		case HCI_AUTO_CONN_ALWAYS:
 1313			hci_pend_le_list_add(p, &hdev->pend_le_conns);
 1314			break;
 1315		case HCI_AUTO_CONN_REPORT:
 1316			hci_pend_le_list_add(p, &hdev->pend_le_reports);
 1317			break;
 1318		default:
 1319			break;
 1320		}
 1321	}
 1322}
 1323
 1324static int new_settings(struct hci_dev *hdev, struct sock *skip)
 1325{
 1326	__le32 ev = cpu_to_le32(get_current_settings(hdev));
 1327
 1328	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
 1329				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
 1330}
 1331
 1332static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
 1333{
 1334	struct mgmt_pending_cmd *cmd = data;
 1335	struct mgmt_mode *cp;
 1336
 1337	/* Make sure cmd still outstanding. */
 1338	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
 1339		return;
 1340
 1341	cp = cmd->param;
 1342
 1343	bt_dev_dbg(hdev, "err %d", err);
 1344
 1345	if (!err) {
 1346		if (cp->val) {
 1347			hci_dev_lock(hdev);
 1348			restart_le_actions(hdev);
 1349			hci_update_passive_scan(hdev);
 1350			hci_dev_unlock(hdev);
 1351		}
 1352
 1353		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
 1354
 1355		/* Only call new_setting for power on as power off is deferred
 1356		 * to hdev->power_off work which does call hci_dev_do_close.
 1357		 */
 1358		if (cp->val)
 1359			new_settings(hdev, cmd->sk);
 1360	} else {
 1361		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
 1362				mgmt_status(err));
 1363	}
 1364
 1365	mgmt_pending_remove(cmd);
 1366}
 1367
 1368static int set_powered_sync(struct hci_dev *hdev, void *data)
 1369{
 1370	struct mgmt_pending_cmd *cmd = data;
 1371	struct mgmt_mode *cp = cmd->param;
 1372
 1373	BT_DBG("%s", hdev->name);
 1374
 1375	return hci_set_powered_sync(hdev, cp->val);
 
 1376}
 1377
 1378static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
 1379		       u16 len)
 1380{
 1381	struct mgmt_mode *cp = data;
 1382	struct mgmt_pending_cmd *cmd;
 1383	int err;
 1384
 1385	bt_dev_dbg(hdev, "sock %p", sk);
 1386
 1387	if (cp->val != 0x00 && cp->val != 0x01)
 1388		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
 1389				       MGMT_STATUS_INVALID_PARAMS);
 1390
 1391	hci_dev_lock(hdev);
 1392
 1393	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
 1394		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
 1395				      MGMT_STATUS_BUSY);
 1396		goto failed;
 
 
 
 
 1397	}
 1398
 1399	if (!!cp->val == hdev_is_powered(hdev)) {
 1400		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
 1401		goto failed;
 1402	}
 1403
 
 
 
 
 
 
 1404	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
 1405	if (!cmd) {
 1406		err = -ENOMEM;
 1407		goto failed;
 1408	}
 1409
 1410	/* Cancel potentially blocking sync operation before power off */
 1411	if (cp->val == 0x00) {
 1412		__hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
 1413		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
 1414					 mgmt_set_powered_complete);
 1415	} else {
 1416		/* Use hci_cmd_sync_submit since hdev might not be running */
 1417		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
 1418					  mgmt_set_powered_complete);
 1419	}
 1420
 1421	if (err < 0)
 1422		mgmt_pending_remove(cmd);
 1423
 1424failed:
 1425	hci_dev_unlock(hdev);
 1426	return err;
 1427}
 1428
 1429int mgmt_new_settings(struct hci_dev *hdev)
 1430{
 1431	return new_settings(hdev, NULL);
 1432}
 1433
 1434struct cmd_lookup {
 1435	struct sock *sk;
 1436	struct hci_dev *hdev;
 1437	u8 mgmt_status;
 1438};
 1439
 1440static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
 1441{
 1442	struct cmd_lookup *match = data;
 1443
 1444	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
 1445
 1446	list_del(&cmd->list);
 1447
 1448	if (match->sk == NULL) {
 1449		match->sk = cmd->sk;
 1450		sock_hold(match->sk);
 1451	}
 1452
 1453	mgmt_pending_free(cmd);
 1454}
 1455
 1456static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
 1457{
 1458	u8 *status = data;
 1459
 1460	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
 1461	mgmt_pending_remove(cmd);
 1462}
 1463
 1464static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
 1465{
 1466	if (cmd->cmd_complete) {
 1467		u8 *status = data;
 1468
 1469		cmd->cmd_complete(cmd, *status);
 1470		mgmt_pending_remove(cmd);
 1471
 1472		return;
 1473	}
 1474
 1475	cmd_status_rsp(cmd, data);
 1476}
 1477
 1478static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
 1479{
 1480	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
 1481				 cmd->param, cmd->param_len);
 1482}
 1483
 1484static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
 1485{
 1486	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
 1487				 cmd->param, sizeof(struct mgmt_addr_info));
 1488}
 1489
 1490static u8 mgmt_bredr_support(struct hci_dev *hdev)
 1491{
 1492	if (!lmp_bredr_capable(hdev))
 1493		return MGMT_STATUS_NOT_SUPPORTED;
 1494	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 1495		return MGMT_STATUS_REJECTED;
 1496	else
 1497		return MGMT_STATUS_SUCCESS;
 1498}
 1499
 1500static u8 mgmt_le_support(struct hci_dev *hdev)
 1501{
 1502	if (!lmp_le_capable(hdev))
 1503		return MGMT_STATUS_NOT_SUPPORTED;
 1504	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 1505		return MGMT_STATUS_REJECTED;
 1506	else
 1507		return MGMT_STATUS_SUCCESS;
 1508}
 1509
 1510static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
 1511					   int err)
 1512{
 1513	struct mgmt_pending_cmd *cmd = data;
 1514
 1515	bt_dev_dbg(hdev, "err %d", err);
 1516
 1517	/* Make sure cmd still outstanding. */
 1518	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
 1519		return;
 1520
 1521	hci_dev_lock(hdev);
 
 1522
 1523	if (err) {
 1524		u8 mgmt_err = mgmt_status(err);
 1525		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
 1526		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
 1527		goto done;
 1528	}
 1529
 1530	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
 1531	    hdev->discov_timeout > 0) {
 1532		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
 1533		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
 1534	}
 1535
 1536	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
 1537	new_settings(hdev, cmd->sk);
 1538
 1539done:
 1540	mgmt_pending_remove(cmd);
 1541	hci_dev_unlock(hdev);
 1542}
 1543
 1544static int set_discoverable_sync(struct hci_dev *hdev, void *data)
 1545{
 1546	BT_DBG("%s", hdev->name);
 1547
 1548	return hci_update_discoverable_sync(hdev);
 
 
 1549}
 1550
 1551static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
 1552			    u16 len)
 1553{
 1554	struct mgmt_cp_set_discoverable *cp = data;
 1555	struct mgmt_pending_cmd *cmd;
 1556	u16 timeout;
 
 1557	int err;
 1558
 1559	bt_dev_dbg(hdev, "sock %p", sk);
 1560
 1561	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
 1562	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 1563		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 1564				       MGMT_STATUS_REJECTED);
 1565
 1566	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
 1567		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 1568				       MGMT_STATUS_INVALID_PARAMS);
 1569
 1570	timeout = __le16_to_cpu(cp->timeout);
 1571
 1572	/* Disabling discoverable requires that no timeout is set,
 1573	 * and enabling limited discoverable requires a timeout.
 1574	 */
 1575	if ((cp->val == 0x00 && timeout > 0) ||
 1576	    (cp->val == 0x02 && timeout == 0))
 1577		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 1578				       MGMT_STATUS_INVALID_PARAMS);
 1579
 1580	hci_dev_lock(hdev);
 1581
 1582	if (!hdev_is_powered(hdev) && timeout > 0) {
 1583		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 1584				      MGMT_STATUS_NOT_POWERED);
 1585		goto failed;
 1586	}
 1587
 1588	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
 1589	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
 1590		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 1591				      MGMT_STATUS_BUSY);
 1592		goto failed;
 1593	}
 1594
 1595	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
 1596		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 1597				      MGMT_STATUS_REJECTED);
 
 1598		goto failed;
 1599	}
 1600
 1601	if (hdev->advertising_paused) {
 1602		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 1603				      MGMT_STATUS_BUSY);
 1604		goto failed;
 1605	}
 1606
 1607	if (!hdev_is_powered(hdev)) {
 1608		bool changed = false;
 1609
 1610		/* Setting limited discoverable when powered off is
 1611		 * not a valid operation since it requires a timeout
 1612		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
 1613		 */
 1614		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
 1615			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
 1616			changed = true;
 1617		}
 1618
 1619		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
 1620		if (err < 0)
 1621			goto failed;
 1622
 1623		if (changed)
 1624			err = new_settings(hdev, sk);
 1625
 1626		goto failed;
 1627	}
 1628
 1629	/* If the current mode is the same, then just update the timeout
 1630	 * value with the new value. And if only the timeout gets updated,
 1631	 * then no need for any HCI transactions.
 1632	 */
 1633	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
 1634	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
 1635						   HCI_LIMITED_DISCOVERABLE)) {
 1636		cancel_delayed_work(&hdev->discov_off);
 1637		hdev->discov_timeout = timeout;
 1638
 1639		if (cp->val && hdev->discov_timeout > 0) {
 1640			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
 1641			queue_delayed_work(hdev->req_workqueue,
 1642					   &hdev->discov_off, to);
 1643		}
 1644
 1645		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
 1646		goto failed;
 1647	}
 1648
 1649	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
 1650	if (!cmd) {
 1651		err = -ENOMEM;
 1652		goto failed;
 1653	}
 1654
 1655	/* Cancel any potential discoverable timeout that might be
 1656	 * still active and store new timeout value. The arming of
 1657	 * the timeout happens in the complete handler.
 1658	 */
 1659	cancel_delayed_work(&hdev->discov_off);
 1660	hdev->discov_timeout = timeout;
 1661
 1662	if (cp->val)
 1663		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
 1664	else
 1665		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
 1666
 1667	/* Limited discoverable mode */
 1668	if (cp->val == 0x02)
 1669		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
 1670	else
 1671		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
 1672
 1673	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
 1674				 mgmt_set_discoverable_complete);
 1675
 
 1676	if (err < 0)
 1677		mgmt_pending_remove(cmd);
 1678
 
 
 
 1679failed:
 1680	hci_dev_unlock(hdev);
 1681	return err;
 1682}
 1683
 1684static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
 1685					  int err)
 1686{
 1687	struct mgmt_pending_cmd *cmd = data;
 1688
 1689	bt_dev_dbg(hdev, "err %d", err);
 
 1690
 1691	/* Make sure cmd still outstanding. */
 1692	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
 1693		return;
 1694
 1695	hci_dev_lock(hdev);
 1696
 1697	if (err) {
 1698		u8 mgmt_err = mgmt_status(err);
 1699		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
 1700		goto done;
 1701	}
 1702
 1703	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
 1704	new_settings(hdev, cmd->sk);
 1705
 1706done:
 1707	if (cmd)
 1708		mgmt_pending_remove(cmd);
 1709
 1710	hci_dev_unlock(hdev);
 1711}
 1712
 1713static int set_connectable_update_settings(struct hci_dev *hdev,
 1714					   struct sock *sk, u8 val)
 1715{
 1716	bool changed = false;
 1717	int err;
 1718
 1719	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
 1720		changed = true;
 
 
 
 
 1721
 1722	if (val) {
 1723		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
 1724	} else {
 1725		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
 1726		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
 1727	}
 1728
 1729	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
 1730	if (err < 0)
 1731		return err;
 1732
 1733	if (changed) {
 1734		hci_update_scan(hdev);
 1735		hci_update_passive_scan(hdev);
 1736		return new_settings(hdev, sk);
 1737	}
 1738
 1739	return 0;
 1740}
 1741
 1742static int set_connectable_sync(struct hci_dev *hdev, void *data)
 1743{
 1744	BT_DBG("%s", hdev->name);
 1745
 1746	return hci_update_connectable_sync(hdev);
 1747}
 1748
 1749static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
 1750			   u16 len)
 1751{
 1752	struct mgmt_mode *cp = data;
 1753	struct mgmt_pending_cmd *cmd;
 1754	int err;
 1755
 1756	bt_dev_dbg(hdev, "sock %p", sk);
 1757
 1758	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
 1759	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 1760		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
 1761				       MGMT_STATUS_REJECTED);
 1762
 1763	if (cp->val != 0x00 && cp->val != 0x01)
 1764		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
 1765				       MGMT_STATUS_INVALID_PARAMS);
 1766
 1767	hci_dev_lock(hdev);
 1768
 1769	if (!hdev_is_powered(hdev)) {
 1770		err = set_connectable_update_settings(hdev, sk, cp->val);
 1771		goto failed;
 1772	}
 1773
 1774	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
 1775	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
 1776		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
 1777				      MGMT_STATUS_BUSY);
 1778		goto failed;
 1779	}
 1780
 1781	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
 1782	if (!cmd) {
 1783		err = -ENOMEM;
 1784		goto failed;
 1785	}
 1786
 1787	if (cp->val) {
 1788		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
 1789	} else {
 1790		if (hdev->discov_timeout > 0)
 1791			cancel_delayed_work(&hdev->discov_off);
 1792
 1793		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
 1794		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
 1795		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
 1796	}
 1797
 1798	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
 1799				 mgmt_set_connectable_complete);
 1800
 1801	if (err < 0)
 1802		mgmt_pending_remove(cmd);
 1803
 1804failed:
 1805	hci_dev_unlock(hdev);
 1806	return err;
 1807}
 1808
 1809static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
 1810			u16 len)
 1811{
 1812	struct mgmt_mode *cp = data;
 1813	bool changed;
 1814	int err;
 1815
 1816	bt_dev_dbg(hdev, "sock %p", sk);
 1817
 1818	if (cp->val != 0x00 && cp->val != 0x01)
 1819		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
 1820				       MGMT_STATUS_INVALID_PARAMS);
 1821
 1822	hci_dev_lock(hdev);
 1823
 1824	if (cp->val)
 1825		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
 1826	else
 1827		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
 1828
 1829	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
 1830	if (err < 0)
 1831		goto unlock;
 1832
 1833	if (changed) {
 1834		/* In limited privacy mode the change of bondable mode
 1835		 * may affect the local advertising address.
 1836		 */
 1837		hci_update_discoverable(hdev);
 1838
 1839		err = new_settings(hdev, sk);
 1840	}
 1841
 1842unlock:
 1843	hci_dev_unlock(hdev);
 1844	return err;
 1845}
 1846
 1847static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
 1848			     u16 len)
 1849{
 1850	struct mgmt_mode *cp = data;
 1851	struct mgmt_pending_cmd *cmd;
 1852	u8 val, status;
 1853	int err;
 1854
 1855	bt_dev_dbg(hdev, "sock %p", sk);
 1856
 1857	status = mgmt_bredr_support(hdev);
 1858	if (status)
 1859		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
 1860				       status);
 1861
 1862	if (cp->val != 0x00 && cp->val != 0x01)
 1863		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
 1864				       MGMT_STATUS_INVALID_PARAMS);
 1865
 1866	hci_dev_lock(hdev);
 1867
 1868	if (!hdev_is_powered(hdev)) {
 1869		bool changed = false;
 1870
 1871		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
 1872			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
 
 1873			changed = true;
 1874		}
 1875
 1876		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
 1877		if (err < 0)
 1878			goto failed;
 1879
 1880		if (changed)
 1881			err = new_settings(hdev, sk);
 1882
 1883		goto failed;
 1884	}
 1885
 1886	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
 1887		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
 1888				      MGMT_STATUS_BUSY);
 1889		goto failed;
 1890	}
 1891
 1892	val = !!cp->val;
 1893
 1894	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
 1895		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
 1896		goto failed;
 1897	}
 1898
 1899	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
 1900	if (!cmd) {
 1901		err = -ENOMEM;
 1902		goto failed;
 1903	}
 1904
 1905	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
 1906	if (err < 0) {
 1907		mgmt_pending_remove(cmd);
 1908		goto failed;
 1909	}
 1910
 1911failed:
 1912	hci_dev_unlock(hdev);
 1913	return err;
 1914}
 1915
 1916static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
 1917{
 1918	struct cmd_lookup match = { NULL, hdev };
 1919	struct mgmt_pending_cmd *cmd = data;
 1920	struct mgmt_mode *cp = cmd->param;
 1921	u8 enable = cp->val;
 1922	bool changed;
 1923
 1924	/* Make sure cmd still outstanding. */
 1925	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
 1926		return;
 1927
 1928	if (err) {
 1929		u8 mgmt_err = mgmt_status(err);
 1930
 1931		if (enable && hci_dev_test_and_clear_flag(hdev,
 1932							  HCI_SSP_ENABLED)) {
 1933			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
 1934			new_settings(hdev, NULL);
 1935		}
 1936
 1937		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
 1938				     &mgmt_err);
 1939		return;
 1940	}
 1941
 1942	if (enable) {
 1943		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
 1944	} else {
 1945		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
 1946
 1947		if (!changed)
 1948			changed = hci_dev_test_and_clear_flag(hdev,
 1949							      HCI_HS_ENABLED);
 1950		else
 1951			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
 1952	}
 1953
 1954	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
 1955
 1956	if (changed)
 1957		new_settings(hdev, match.sk);
 1958
 1959	if (match.sk)
 1960		sock_put(match.sk);
 1961
 1962	hci_update_eir_sync(hdev);
 1963}
 1964
 1965static int set_ssp_sync(struct hci_dev *hdev, void *data)
 1966{
 1967	struct mgmt_pending_cmd *cmd = data;
 1968	struct mgmt_mode *cp = cmd->param;
 1969	bool changed = false;
 1970	int err;
 1971
 1972	if (cp->val)
 1973		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
 1974
 1975	err = hci_write_ssp_mode_sync(hdev, cp->val);
 1976
 1977	if (!err && changed)
 1978		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
 1979
 1980	return err;
 1981}
 1982
 1983static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 1984{
 1985	struct mgmt_mode *cp = data;
 1986	struct mgmt_pending_cmd *cmd;
 1987	u8 status;
 1988	int err;
 1989
 1990	bt_dev_dbg(hdev, "sock %p", sk);
 1991
 1992	status = mgmt_bredr_support(hdev);
 1993	if (status)
 1994		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
 1995
 1996	if (!lmp_ssp_capable(hdev))
 1997		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
 1998				       MGMT_STATUS_NOT_SUPPORTED);
 1999
 2000	if (cp->val != 0x00 && cp->val != 0x01)
 2001		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
 2002				       MGMT_STATUS_INVALID_PARAMS);
 2003
 2004	hci_dev_lock(hdev);
 2005
 
 
 
 
 
 
 
 
 2006	if (!hdev_is_powered(hdev)) {
 2007		bool changed;
 2008
 2009		if (cp->val) {
 2010			changed = !hci_dev_test_and_set_flag(hdev,
 2011							     HCI_SSP_ENABLED);
 2012		} else {
 2013			changed = hci_dev_test_and_clear_flag(hdev,
 2014							      HCI_SSP_ENABLED);
 2015			if (!changed)
 2016				changed = hci_dev_test_and_clear_flag(hdev,
 2017								      HCI_HS_ENABLED);
 2018			else
 2019				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
 2020		}
 2021
 2022		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
 2023		if (err < 0)
 2024			goto failed;
 2025
 2026		if (changed)
 2027			err = new_settings(hdev, sk);
 2028
 2029		goto failed;
 2030	}
 2031
 2032	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
 2033		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
 2034				      MGMT_STATUS_BUSY);
 2035		goto failed;
 2036	}
 2037
 2038	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
 2039		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
 2040		goto failed;
 2041	}
 2042
 2043	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
 2044	if (!cmd)
 2045		err = -ENOMEM;
 2046	else
 2047		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
 2048					 set_ssp_complete);
 2049
 
 2050	if (err < 0) {
 2051		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
 2052				      MGMT_STATUS_FAILED);
 2053
 2054		if (cmd)
 2055			mgmt_pending_remove(cmd);
 2056	}
 2057
 2058failed:
 2059	hci_dev_unlock(hdev);
 2060	return err;
 2061}
 2062
 2063static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 2064{
 2065	struct mgmt_mode *cp = data;
 2066	bool changed;
 2067	u8 status;
 2068	int err;
 2069
 2070	bt_dev_dbg(hdev, "sock %p", sk);
 2071
 2072	if (!IS_ENABLED(CONFIG_BT_HS))
 2073		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
 2074				       MGMT_STATUS_NOT_SUPPORTED);
 2075
 2076	status = mgmt_bredr_support(hdev);
 2077	if (status)
 2078		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
 2079
 2080	if (!lmp_ssp_capable(hdev))
 2081		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
 2082				       MGMT_STATUS_NOT_SUPPORTED);
 2083
 2084	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 2085		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
 2086				       MGMT_STATUS_REJECTED);
 2087
 2088	if (cp->val != 0x00 && cp->val != 0x01)
 2089		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
 2090				       MGMT_STATUS_INVALID_PARAMS);
 2091
 2092	hci_dev_lock(hdev);
 2093
 2094	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
 2095		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
 2096				      MGMT_STATUS_BUSY);
 2097		goto unlock;
 2098	}
 2099
 2100	if (cp->val) {
 2101		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
 2102	} else {
 2103		if (hdev_is_powered(hdev)) {
 2104			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
 2105					      MGMT_STATUS_REJECTED);
 2106			goto unlock;
 2107		}
 2108
 2109		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
 2110	}
 2111
 2112	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
 2113	if (err < 0)
 2114		goto unlock;
 2115
 2116	if (changed)
 2117		err = new_settings(hdev, sk);
 2118
 2119unlock:
 2120	hci_dev_unlock(hdev);
 2121	return err;
 2122}
 2123
 2124static void set_le_complete(struct hci_dev *hdev, void *data, int err)
 2125{
 2126	struct cmd_lookup match = { NULL, hdev };
 2127	u8 status = mgmt_status(err);
 2128
 2129	bt_dev_dbg(hdev, "err %d", err);
 2130
 2131	if (status) {
 2132		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
 2133							&status);
 2134		return;
 2135	}
 2136
 2137	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
 2138
 2139	new_settings(hdev, match.sk);
 2140
 2141	if (match.sk)
 2142		sock_put(match.sk);
 2143}
 2144
 2145static int set_le_sync(struct hci_dev *hdev, void *data)
 2146{
 2147	struct mgmt_pending_cmd *cmd = data;
 2148	struct mgmt_mode *cp = cmd->param;
 2149	u8 val = !!cp->val;
 2150	int err;
 2151
 2152	if (!val) {
 2153		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
 2154
 2155		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
 2156			hci_disable_advertising_sync(hdev);
 2157
 2158		if (ext_adv_capable(hdev))
 2159			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
 2160	} else {
 2161		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
 2162	}
 2163
 2164	err = hci_write_le_host_supported_sync(hdev, val, 0);
 2165
 2166	/* Make sure the controller has a good default for
 2167	 * advertising data. Restrict the update to when LE
 2168	 * has actually been enabled. During power on, the
 2169	 * update in powered_update_hci will take care of it.
 2170	 */
 2171	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
 2172		if (ext_adv_capable(hdev)) {
 2173			int status;
 2174
 2175			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
 2176			if (!status)
 2177				hci_update_scan_rsp_data_sync(hdev, 0x00);
 2178		} else {
 2179			hci_update_adv_data_sync(hdev, 0x00);
 2180			hci_update_scan_rsp_data_sync(hdev, 0x00);
 2181		}
 2182
 2183		hci_update_passive_scan(hdev);
 2184	}
 2185
 2186	return err;
 2187}
 2188
 2189static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
 2190{
 2191	struct mgmt_pending_cmd *cmd = data;
 2192	u8 status = mgmt_status(err);
 2193	struct sock *sk = cmd->sk;
 2194
 2195	if (status) {
 2196		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
 2197				     cmd_status_rsp, &status);
 2198		return;
 2199	}
 2200
 2201	mgmt_pending_remove(cmd);
 2202	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
 2203}
 2204
 2205static int set_mesh_sync(struct hci_dev *hdev, void *data)
 2206{
 2207	struct mgmt_pending_cmd *cmd = data;
 2208	struct mgmt_cp_set_mesh *cp = cmd->param;
 2209	size_t len = cmd->param_len;
 2210
 2211	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
 2212
 2213	if (cp->enable)
 2214		hci_dev_set_flag(hdev, HCI_MESH);
 2215	else
 2216		hci_dev_clear_flag(hdev, HCI_MESH);
 2217
 2218	len -= sizeof(*cp);
 2219
 2220	/* If filters don't fit, forward all adv pkts */
 2221	if (len <= sizeof(hdev->mesh_ad_types))
 2222		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
 2223
 2224	hci_update_passive_scan_sync(hdev);
 2225	return 0;
 2226}
 2227
 2228static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 2229{
 2230	struct mgmt_cp_set_mesh *cp = data;
 2231	struct mgmt_pending_cmd *cmd;
 2232	int err = 0;
 2233
 2234	bt_dev_dbg(hdev, "sock %p", sk);
 2235
 2236	if (!lmp_le_capable(hdev) ||
 2237	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
 2238		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
 2239				       MGMT_STATUS_NOT_SUPPORTED);
 2240
 2241	if (cp->enable != 0x00 && cp->enable != 0x01)
 2242		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
 2243				       MGMT_STATUS_INVALID_PARAMS);
 2244
 2245	hci_dev_lock(hdev);
 2246
 2247	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
 2248	if (!cmd)
 2249		err = -ENOMEM;
 2250	else
 2251		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
 2252					 set_mesh_complete);
 2253
 2254	if (err < 0) {
 2255		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
 2256				      MGMT_STATUS_FAILED);
 2257
 2258		if (cmd)
 2259			mgmt_pending_remove(cmd);
 2260	}
 2261
 2262	hci_dev_unlock(hdev);
 2263	return err;
 2264}
 2265
 2266static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
 2267{
 2268	struct mgmt_mesh_tx *mesh_tx = data;
 2269	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
 2270	unsigned long mesh_send_interval;
 2271	u8 mgmt_err = mgmt_status(err);
 2272
 2273	/* Report any errors here, but don't report completion */
 2274
 2275	if (mgmt_err) {
 2276		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
 2277		/* Send Complete Error Code for handle */
 2278		mesh_send_complete(hdev, mesh_tx, false);
 2279		return;
 2280	}
 2281
 2282	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
 2283	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
 2284			   mesh_send_interval);
 2285}
 2286
 2287static int mesh_send_sync(struct hci_dev *hdev, void *data)
 2288{
 2289	struct mgmt_mesh_tx *mesh_tx = data;
 2290	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
 2291	struct adv_info *adv, *next_instance;
 2292	u8 instance = hdev->le_num_of_adv_sets + 1;
 2293	u16 timeout, duration;
 2294	int err = 0;
 2295
 2296	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
 2297		return MGMT_STATUS_BUSY;
 2298
 2299	timeout = 1000;
 2300	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
 2301	adv = hci_add_adv_instance(hdev, instance, 0,
 2302				   send->adv_data_len, send->adv_data,
 2303				   0, NULL,
 2304				   timeout, duration,
 2305				   HCI_ADV_TX_POWER_NO_PREFERENCE,
 2306				   hdev->le_adv_min_interval,
 2307				   hdev->le_adv_max_interval,
 2308				   mesh_tx->handle);
 2309
 2310	if (!IS_ERR(adv))
 2311		mesh_tx->instance = instance;
 2312	else
 2313		err = PTR_ERR(adv);
 2314
 2315	if (hdev->cur_adv_instance == instance) {
 2316		/* If the currently advertised instance is being changed then
 2317		 * cancel the current advertising and schedule the next
 2318		 * instance. If there is only one instance then the overridden
 2319		 * advertising data will be visible right away.
 2320		 */
 2321		cancel_adv_timeout(hdev);
 2322
 2323		next_instance = hci_get_next_instance(hdev, instance);
 2324		if (next_instance)
 2325			instance = next_instance->instance;
 2326		else
 2327			instance = 0;
 2328	} else if (hdev->adv_instance_timeout) {
 2329		/* Immediately advertise the new instance if no other, or
 2330		 * let it go naturally from queue if ADV is already happening
 2331		 */
 2332		instance = 0;
 2333	}
 2334
 2335	if (instance)
 2336		return hci_schedule_adv_instance_sync(hdev, instance, true);
 2337
 2338	return err;
 2339}
 2340
 2341static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
 2342{
 2343	struct mgmt_rp_mesh_read_features *rp = data;
 2344
 2345	if (rp->used_handles >= rp->max_handles)
 2346		return;
 2347
 2348	rp->handles[rp->used_handles++] = mesh_tx->handle;
 2349}
 2350
 2351static int mesh_features(struct sock *sk, struct hci_dev *hdev,
 2352			 void *data, u16 len)
 2353{
 2354	struct mgmt_rp_mesh_read_features rp;
 2355
 2356	if (!lmp_le_capable(hdev) ||
 2357	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
 2358		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
 2359				       MGMT_STATUS_NOT_SUPPORTED);
 2360
 2361	memset(&rp, 0, sizeof(rp));
 2362	rp.index = cpu_to_le16(hdev->id);
 2363	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 2364		rp.max_handles = MESH_HANDLES_MAX;
 2365
 2366	hci_dev_lock(hdev);
 2367
 2368	if (rp.max_handles)
 2369		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
 2370
 2371	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
 2372			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
 2373
 2374	hci_dev_unlock(hdev);
 2375	return 0;
 2376}
 2377
 2378static int send_cancel(struct hci_dev *hdev, void *data)
 2379{
 2380	struct mgmt_pending_cmd *cmd = data;
 2381	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
 2382	struct mgmt_mesh_tx *mesh_tx;
 2383
 2384	if (!cancel->handle) {
 2385		do {
 2386			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
 2387
 2388			if (mesh_tx)
 2389				mesh_send_complete(hdev, mesh_tx, false);
 2390		} while (mesh_tx);
 2391	} else {
 2392		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
 2393
 2394		if (mesh_tx && mesh_tx->sk == cmd->sk)
 2395			mesh_send_complete(hdev, mesh_tx, false);
 2396	}
 2397
 2398	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
 2399			  0, NULL, 0);
 2400	mgmt_pending_free(cmd);
 2401
 2402	return 0;
 2403}
 2404
 2405static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
 2406			    void *data, u16 len)
 2407{
 2408	struct mgmt_pending_cmd *cmd;
 2409	int err;
 2410
 2411	if (!lmp_le_capable(hdev) ||
 2412	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
 2413		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
 2414				       MGMT_STATUS_NOT_SUPPORTED);
 2415
 2416	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 2417		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
 2418				       MGMT_STATUS_REJECTED);
 2419
 2420	hci_dev_lock(hdev);
 2421	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
 2422	if (!cmd)
 2423		err = -ENOMEM;
 2424	else
 2425		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
 2426
 2427	if (err < 0) {
 2428		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
 2429				      MGMT_STATUS_FAILED);
 2430
 2431		if (cmd)
 2432			mgmt_pending_free(cmd);
 2433	}
 2434
 2435	hci_dev_unlock(hdev);
 2436	return err;
 2437}
 2438
 2439static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 2440{
 2441	struct mgmt_mesh_tx *mesh_tx;
 2442	struct mgmt_cp_mesh_send *send = data;
 2443	struct mgmt_rp_mesh_read_features rp;
 2444	bool sending;
 2445	int err = 0;
 2446
 2447	if (!lmp_le_capable(hdev) ||
 2448	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
 2449		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
 2450				       MGMT_STATUS_NOT_SUPPORTED);
 2451	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
 2452	    len <= MGMT_MESH_SEND_SIZE ||
 2453	    len > (MGMT_MESH_SEND_SIZE + 31))
 2454		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
 2455				       MGMT_STATUS_REJECTED);
 2456
 2457	hci_dev_lock(hdev);
 2458
 2459	memset(&rp, 0, sizeof(rp));
 2460	rp.max_handles = MESH_HANDLES_MAX;
 2461
 2462	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
 2463
 2464	if (rp.max_handles <= rp.used_handles) {
 2465		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
 2466				      MGMT_STATUS_BUSY);
 2467		goto done;
 2468	}
 2469
 2470	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
 2471	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
 2472
 2473	if (!mesh_tx)
 2474		err = -ENOMEM;
 2475	else if (!sending)
 2476		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
 2477					 mesh_send_start_complete);
 2478
 2479	if (err < 0) {
 2480		bt_dev_err(hdev, "Send Mesh Failed %d", err);
 2481		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
 2482				      MGMT_STATUS_FAILED);
 2483
 2484		if (mesh_tx) {
 2485			if (sending)
 2486				mgmt_mesh_remove(mesh_tx);
 2487		}
 2488	} else {
 2489		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
 2490
 2491		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
 2492				  &mesh_tx->handle, 1);
 2493	}
 2494
 2495done:
 2496	hci_dev_unlock(hdev);
 2497	return err;
 2498}
 2499
 2500static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 2501{
 2502	struct mgmt_mode *cp = data;
 2503	struct mgmt_pending_cmd *cmd;
 
 2504	int err;
 2505	u8 val, enabled;
 2506
 2507	bt_dev_dbg(hdev, "sock %p", sk);
 2508
 2509	if (!lmp_le_capable(hdev))
 2510		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
 2511				       MGMT_STATUS_NOT_SUPPORTED);
 2512
 2513	if (cp->val != 0x00 && cp->val != 0x01)
 2514		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
 2515				       MGMT_STATUS_INVALID_PARAMS);
 2516
 2517	/* Bluetooth single mode LE only controllers or dual-mode
 2518	 * controllers configured as LE only devices, do not allow
 2519	 * switching LE off. These have either LE enabled explicitly
 2520	 * or BR/EDR has been previously switched off.
 2521	 *
 2522	 * When trying to enable an already enabled LE, then gracefully
 2523	 * send a positive response. Trying to disable it however will
 2524	 * result into rejection.
 2525	 */
 2526	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
 2527		if (cp->val == 0x01)
 2528			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
 2529
 2530		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
 2531				       MGMT_STATUS_REJECTED);
 
 
 2532	}
 2533
 2534	hci_dev_lock(hdev);
 2535
 2536	val = !!cp->val;
 2537	enabled = lmp_host_le_capable(hdev);
 2538
 2539	if (!hdev_is_powered(hdev) || val == enabled) {
 2540		bool changed = false;
 2541
 2542		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
 2543			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
 2544			changed = true;
 2545		}
 2546
 2547		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
 2548			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
 2549			changed = true;
 2550		}
 2551
 2552		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
 2553		if (err < 0)
 2554			goto unlock;
 2555
 2556		if (changed)
 2557			err = new_settings(hdev, sk);
 2558
 2559		goto unlock;
 2560	}
 2561
 2562	if (pending_find(MGMT_OP_SET_LE, hdev) ||
 2563	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
 2564		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
 2565				      MGMT_STATUS_BUSY);
 2566		goto unlock;
 2567	}
 2568
 2569	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
 2570	if (!cmd)
 2571		err = -ENOMEM;
 2572	else
 2573		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
 2574					 set_le_complete);
 2575
 2576	if (err < 0) {
 2577		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
 2578				      MGMT_STATUS_FAILED);
 2579
 2580		if (cmd)
 2581			mgmt_pending_remove(cmd);
 
 2582	}
 2583
 
 
 
 
 
 2584unlock:
 2585	hci_dev_unlock(hdev);
 2586	return err;
 2587}
 2588
 2589/* This is a helper function to test for pending mgmt commands that can
 2590 * cause CoD or EIR HCI commands. We can only allow one such pending
 2591 * mgmt command at a time since otherwise we cannot easily track what
 2592 * the current values are, will be, and based on that calculate if a new
 2593 * HCI command needs to be sent and if yes with what value.
 2594 */
 2595static bool pending_eir_or_class(struct hci_dev *hdev)
 2596{
 2597	struct mgmt_pending_cmd *cmd;
 2598
 2599	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
 2600		switch (cmd->opcode) {
 2601		case MGMT_OP_ADD_UUID:
 2602		case MGMT_OP_REMOVE_UUID:
 2603		case MGMT_OP_SET_DEV_CLASS:
 2604		case MGMT_OP_SET_POWERED:
 2605			return true;
 2606		}
 2607	}
 2608
 2609	return false;
 2610}
 2611
 2612static const u8 bluetooth_base_uuid[] = {
 2613			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
 2614			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 2615};
 2616
 2617static u8 get_uuid_size(const u8 *uuid)
 2618{
 2619	u32 val;
 2620
 2621	if (memcmp(uuid, bluetooth_base_uuid, 12))
 2622		return 128;
 2623
 2624	val = get_unaligned_le32(&uuid[12]);
 2625	if (val > 0xffff)
 2626		return 32;
 2627
 2628	return 16;
 2629}
 2630
 2631static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
 2632{
 2633	struct mgmt_pending_cmd *cmd = data;
 2634
 2635	bt_dev_dbg(hdev, "err %d", err);
 2636
 2637	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
 2638			  mgmt_status(err), hdev->dev_class, 3);
 2639
 2640	mgmt_pending_free(cmd);
 2641}
 2642
 2643static int add_uuid_sync(struct hci_dev *hdev, void *data)
 2644{
 2645	int err;
 2646
 2647	err = hci_update_class_sync(hdev);
 2648	if (err)
 2649		return err;
 2650
 2651	return hci_update_eir_sync(hdev);
 2652}
 2653
 2654static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 2655{
 2656	struct mgmt_cp_add_uuid *cp = data;
 2657	struct mgmt_pending_cmd *cmd;
 2658	struct bt_uuid *uuid;
 2659	int err;
 2660
 2661	bt_dev_dbg(hdev, "sock %p", sk);
 2662
 2663	hci_dev_lock(hdev);
 2664
 2665	if (pending_eir_or_class(hdev)) {
 2666		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
 2667				      MGMT_STATUS_BUSY);
 2668		goto failed;
 2669	}
 2670
 2671	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
 2672	if (!uuid) {
 2673		err = -ENOMEM;
 2674		goto failed;
 2675	}
 2676
 2677	memcpy(uuid->uuid, cp->uuid, 16);
 2678	uuid->svc_hint = cp->svc_hint;
 2679	uuid->size = get_uuid_size(cp->uuid);
 2680
 2681	list_add_tail(&uuid->list, &hdev->uuids);
 2682
 2683	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
 2684	if (!cmd) {
 2685		err = -ENOMEM;
 2686		goto failed;
 2687	}
 2688
 2689	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
 2690	if (err < 0) {
 2691		mgmt_pending_free(cmd);
 
 
 
 
 2692		goto failed;
 2693	}
 2694
 
 
 
 
 2695failed:
 2696	hci_dev_unlock(hdev);
 2697	return err;
 2698}
 2699
 2700static bool enable_service_cache(struct hci_dev *hdev)
 2701{
 2702	if (!hdev_is_powered(hdev))
 2703		return false;
 2704
 2705	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
 2706		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
 2707				   CACHE_TIMEOUT);
 2708		return true;
 2709	}
 2710
 2711	return false;
 2712}
 2713
 2714static int remove_uuid_sync(struct hci_dev *hdev, void *data)
 2715{
 2716	int err;
 2717
 2718	err = hci_update_class_sync(hdev);
 2719	if (err)
 2720		return err;
 2721
 2722	return hci_update_eir_sync(hdev);
 2723}
 2724
 2725static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
 2726		       u16 len)
 2727{
 2728	struct mgmt_cp_remove_uuid *cp = data;
 2729	struct mgmt_pending_cmd *cmd;
 2730	struct bt_uuid *match, *tmp;
 2731	static const u8 bt_uuid_any[] = {
 2732		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
 2733	};
 2734	int err, found;
 2735
 2736	bt_dev_dbg(hdev, "sock %p", sk);
 2737
 2738	hci_dev_lock(hdev);
 2739
 2740	if (pending_eir_or_class(hdev)) {
 2741		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
 2742				      MGMT_STATUS_BUSY);
 2743		goto unlock;
 2744	}
 2745
 2746	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
 2747		hci_uuids_clear(hdev);
 2748
 2749		if (enable_service_cache(hdev)) {
 2750			err = mgmt_cmd_complete(sk, hdev->id,
 2751						MGMT_OP_REMOVE_UUID,
 2752						0, hdev->dev_class, 3);
 2753			goto unlock;
 2754		}
 2755
 2756		goto update_class;
 2757	}
 2758
 2759	found = 0;
 2760
 2761	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
 
 
 2762		if (memcmp(match->uuid, cp->uuid, 16) != 0)
 2763			continue;
 2764
 2765		list_del(&match->list);
 2766		kfree(match);
 2767		found++;
 2768	}
 2769
 2770	if (found == 0) {
 2771		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
 2772				      MGMT_STATUS_INVALID_PARAMS);
 2773		goto unlock;
 2774	}
 2775
 2776update_class:
 2777	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
 2778	if (!cmd) {
 2779		err = -ENOMEM;
 2780		goto unlock;
 2781	}
 2782
 2783	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
 2784				 mgmt_class_complete);
 2785	if (err < 0)
 2786		mgmt_pending_free(cmd);
 2787
 2788unlock:
 2789	hci_dev_unlock(hdev);
 2790	return err;
 2791}
 2792
 2793static int set_class_sync(struct hci_dev *hdev, void *data)
 2794{
 2795	int err = 0;
 2796
 2797	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
 2798		cancel_delayed_work_sync(&hdev->service_cache);
 2799		err = hci_update_eir_sync(hdev);
 
 2800	}
 2801
 2802	if (err)
 2803		return err;
 
 2804
 2805	return hci_update_class_sync(hdev);
 
 
 2806}
 2807
 2808static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
 2809			 u16 len)
 2810{
 2811	struct mgmt_cp_set_dev_class *cp = data;
 2812	struct mgmt_pending_cmd *cmd;
 2813	int err;
 2814
 2815	bt_dev_dbg(hdev, "sock %p", sk);
 2816
 2817	if (!lmp_bredr_capable(hdev))
 2818		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
 2819				       MGMT_STATUS_NOT_SUPPORTED);
 2820
 2821	hci_dev_lock(hdev);
 2822
 2823	if (pending_eir_or_class(hdev)) {
 2824		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
 2825				      MGMT_STATUS_BUSY);
 2826		goto unlock;
 2827	}
 2828
 2829	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
 2830		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
 2831				      MGMT_STATUS_INVALID_PARAMS);
 2832		goto unlock;
 2833	}
 2834
 2835	hdev->major_class = cp->major;
 2836	hdev->minor_class = cp->minor;
 2837
 2838	if (!hdev_is_powered(hdev)) {
 2839		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
 2840					hdev->dev_class, 3);
 2841		goto unlock;
 2842	}
 2843
 2844	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
 2845	if (!cmd) {
 2846		err = -ENOMEM;
 2847		goto unlock;
 
 2848	}
 2849
 2850	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
 2851				 mgmt_class_complete);
 2852	if (err < 0)
 2853		mgmt_pending_free(cmd);
 
 
 
 
 
 
 
 
 
 
 2854
 2855unlock:
 2856	hci_dev_unlock(hdev);
 2857	return err;
 2858}
 2859
 2860static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
 2861			  u16 len)
 2862{
 2863	struct mgmt_cp_load_link_keys *cp = data;
 2864	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
 2865				   sizeof(struct mgmt_link_key_info));
 2866	u16 key_count, expected_len;
 2867	bool changed;
 2868	int i;
 2869
 2870	bt_dev_dbg(hdev, "sock %p", sk);
 2871
 2872	if (!lmp_bredr_capable(hdev))
 2873		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
 2874				       MGMT_STATUS_NOT_SUPPORTED);
 2875
 2876	key_count = __le16_to_cpu(cp->key_count);
 2877	if (key_count > max_key_count) {
 2878		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
 2879			   key_count);
 2880		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
 2881				       MGMT_STATUS_INVALID_PARAMS);
 2882	}
 2883
 2884	expected_len = struct_size(cp, keys, key_count);
 
 2885	if (expected_len != len) {
 2886		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
 2887			   expected_len, len);
 2888		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
 2889				       MGMT_STATUS_INVALID_PARAMS);
 2890	}
 2891
 2892	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
 2893		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
 2894				       MGMT_STATUS_INVALID_PARAMS);
 2895
 2896	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
 2897		   key_count);
 2898
 2899	for (i = 0; i < key_count; i++) {
 2900		struct mgmt_link_key_info *key = &cp->keys[i];
 2901
 2902		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
 2903		if (key->type > 0x08)
 2904			return mgmt_cmd_status(sk, hdev->id,
 2905					       MGMT_OP_LOAD_LINK_KEYS,
 2906					       MGMT_STATUS_INVALID_PARAMS);
 2907	}
 2908
 2909	hci_dev_lock(hdev);
 2910
 2911	hci_link_keys_clear(hdev);
 2912
 
 
 2913	if (cp->debug_keys)
 2914		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
 2915	else
 2916		changed = hci_dev_test_and_clear_flag(hdev,
 2917						      HCI_KEEP_DEBUG_KEYS);
 2918
 2919	if (changed)
 2920		new_settings(hdev, NULL);
 2921
 2922	for (i = 0; i < key_count; i++) {
 2923		struct mgmt_link_key_info *key = &cp->keys[i];
 2924
 2925		if (hci_is_blocked_key(hdev,
 2926				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
 2927				       key->val)) {
 2928			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
 2929				    &key->addr.bdaddr);
 2930			continue;
 2931		}
 2932
 2933		/* Always ignore debug keys and require a new pairing if
 2934		 * the user wants to use them.
 2935		 */
 2936		if (key->type == HCI_LK_DEBUG_COMBINATION)
 2937			continue;
 2938
 2939		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
 2940				 key->type, key->pin_len, NULL);
 2941	}
 2942
 2943	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
 2944
 2945	hci_dev_unlock(hdev);
 2946
 2947	return 0;
 2948}
 2949
 2950static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
 2951			   u8 addr_type, struct sock *skip_sk)
 2952{
 2953	struct mgmt_ev_device_unpaired ev;
 2954
 2955	bacpy(&ev.addr.bdaddr, bdaddr);
 2956	ev.addr.type = addr_type;
 2957
 2958	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
 2959			  skip_sk);
 2960}
 2961
 2962static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
 2963{
 2964	struct mgmt_pending_cmd *cmd = data;
 2965	struct mgmt_cp_unpair_device *cp = cmd->param;
 2966
 2967	if (!err)
 2968		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
 2969
 2970	cmd->cmd_complete(cmd, err);
 2971	mgmt_pending_free(cmd);
 2972}
 2973
 2974static int unpair_device_sync(struct hci_dev *hdev, void *data)
 2975{
 2976	struct mgmt_pending_cmd *cmd = data;
 2977	struct mgmt_cp_unpair_device *cp = cmd->param;
 2978	struct hci_conn *conn;
 2979
 2980	if (cp->addr.type == BDADDR_BREDR)
 2981		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
 2982					       &cp->addr.bdaddr);
 2983	else
 2984		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
 2985					       le_addr_type(cp->addr.type));
 2986
 2987	if (!conn)
 2988		return 0;
 2989
 2990	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
 2991}
 2992
 2993static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
 2994			 u16 len)
 2995{
 2996	struct mgmt_cp_unpair_device *cp = data;
 2997	struct mgmt_rp_unpair_device rp;
 2998	struct hci_conn_params *params;
 2999	struct mgmt_pending_cmd *cmd;
 3000	struct hci_conn *conn;
 3001	u8 addr_type;
 3002	int err;
 3003
 
 
 3004	memset(&rp, 0, sizeof(rp));
 3005	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
 3006	rp.addr.type = cp->addr.type;
 3007
 3008	if (!bdaddr_type_is_valid(cp->addr.type))
 3009		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
 3010					 MGMT_STATUS_INVALID_PARAMS,
 3011					 &rp, sizeof(rp));
 3012
 3013	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
 3014		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
 3015					 MGMT_STATUS_INVALID_PARAMS,
 3016					 &rp, sizeof(rp));
 3017
 3018	hci_dev_lock(hdev);
 3019
 3020	if (!hdev_is_powered(hdev)) {
 3021		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
 3022					MGMT_STATUS_NOT_POWERED, &rp,
 3023					sizeof(rp));
 3024		goto unlock;
 3025	}
 3026
 3027	if (cp->addr.type == BDADDR_BREDR) {
 3028		/* If disconnection is requested, then look up the
 3029		 * connection. If the remote device is connected, it
 3030		 * will be later used to terminate the link.
 3031		 *
 3032		 * Setting it to NULL explicitly will cause no
 3033		 * termination of the link.
 3034		 */
 3035		if (cp->disconnect)
 3036			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
 3037						       &cp->addr.bdaddr);
 3038		else
 3039			conn = NULL;
 3040
 3041		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
 3042		if (err < 0) {
 3043			err = mgmt_cmd_complete(sk, hdev->id,
 3044						MGMT_OP_UNPAIR_DEVICE,
 3045						MGMT_STATUS_NOT_PAIRED, &rp,
 3046						sizeof(rp));
 3047			goto unlock;
 3048		}
 3049
 3050		goto done;
 3051	}
 3052
 3053	/* LE address type */
 3054	addr_type = le_addr_type(cp->addr.type);
 3055
 3056	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
 3057	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
 3058	if (err < 0) {
 3059		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
 3060					MGMT_STATUS_NOT_PAIRED, &rp,
 3061					sizeof(rp));
 3062		goto unlock;
 3063	}
 3064
 3065	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
 3066	if (!conn) {
 3067		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
 3068		goto done;
 3069	}
 3070
 3071
 3072	/* Defer clearing up the connection parameters until closing to
 3073	 * give a chance of keeping them if a repairing happens.
 3074	 */
 3075	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
 3076
 3077	/* Disable auto-connection parameters if present */
 3078	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
 3079	if (params) {
 3080		if (params->explicit_connect)
 3081			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
 3082		else
 3083			params->auto_connect = HCI_AUTO_CONN_DISABLED;
 3084	}
 3085
 3086	/* If disconnection is not requested, then clear the connection
 3087	 * variable so that the link is not terminated.
 3088	 */
 3089	if (!cp->disconnect)
 3090		conn = NULL;
 
 3091
 3092done:
 3093	/* If the connection variable is set, then termination of the
 3094	 * link is requested.
 3095	 */
 3096	if (!conn) {
 3097		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
 3098					&rp, sizeof(rp));
 3099		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
 3100		goto unlock;
 3101	}
 3102
 3103	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
 3104			       sizeof(*cp));
 3105	if (!cmd) {
 3106		err = -ENOMEM;
 3107		goto unlock;
 3108	}
 3109
 3110	cmd->cmd_complete = addr_cmd_complete;
 3111
 3112	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
 3113				 unpair_device_complete);
 3114	if (err < 0)
 3115		mgmt_pending_free(cmd);
 3116
 3117unlock:
 3118	hci_dev_unlock(hdev);
 3119	return err;
 3120}
 3121
 3122static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
 3123		      u16 len)
 3124{
 3125	struct mgmt_cp_disconnect *cp = data;
 3126	struct mgmt_rp_disconnect rp;
 3127	struct mgmt_pending_cmd *cmd;
 3128	struct hci_conn *conn;
 3129	int err;
 3130
 3131	bt_dev_dbg(hdev, "sock %p", sk);
 3132
 3133	memset(&rp, 0, sizeof(rp));
 3134	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
 3135	rp.addr.type = cp->addr.type;
 3136
 3137	if (!bdaddr_type_is_valid(cp->addr.type))
 3138		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
 3139					 MGMT_STATUS_INVALID_PARAMS,
 3140					 &rp, sizeof(rp));
 3141
 3142	hci_dev_lock(hdev);
 3143
 3144	if (!test_bit(HCI_UP, &hdev->flags)) {
 3145		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
 3146					MGMT_STATUS_NOT_POWERED, &rp,
 3147					sizeof(rp));
 3148		goto failed;
 3149	}
 3150
 3151	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
 3152		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
 3153					MGMT_STATUS_BUSY, &rp, sizeof(rp));
 3154		goto failed;
 3155	}
 3156
 3157	if (cp->addr.type == BDADDR_BREDR)
 3158		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
 3159					       &cp->addr.bdaddr);
 3160	else
 3161		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
 3162					       le_addr_type(cp->addr.type));
 3163
 3164	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
 3165		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
 3166					MGMT_STATUS_NOT_CONNECTED, &rp,
 3167					sizeof(rp));
 3168		goto failed;
 3169	}
 3170
 3171	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
 3172	if (!cmd) {
 3173		err = -ENOMEM;
 3174		goto failed;
 3175	}
 3176
 3177	cmd->cmd_complete = generic_cmd_complete;
 
 3178
 3179	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
 3180	if (err < 0)
 3181		mgmt_pending_remove(cmd);
 3182
 3183failed:
 3184	hci_dev_unlock(hdev);
 3185	return err;
 3186}
 3187
 3188static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
 3189{
 3190	switch (link_type) {
 3191	case LE_LINK:
 3192		switch (addr_type) {
 3193		case ADDR_LE_DEV_PUBLIC:
 3194			return BDADDR_LE_PUBLIC;
 3195
 3196		default:
 3197			/* Fallback to LE Random address type */
 3198			return BDADDR_LE_RANDOM;
 3199		}
 3200
 3201	default:
 3202		/* Fallback to BR/EDR type */
 3203		return BDADDR_BREDR;
 3204	}
 3205}
 3206
 3207static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
 3208			   u16 data_len)
 3209{
 3210	struct mgmt_rp_get_connections *rp;
 3211	struct hci_conn *c;
 
 3212	int err;
 3213	u16 i;
 3214
 3215	bt_dev_dbg(hdev, "sock %p", sk);
 3216
 3217	hci_dev_lock(hdev);
 3218
 3219	if (!hdev_is_powered(hdev)) {
 3220		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
 3221				      MGMT_STATUS_NOT_POWERED);
 3222		goto unlock;
 3223	}
 3224
 3225	i = 0;
 3226	list_for_each_entry(c, &hdev->conn_hash.list, list) {
 3227		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
 3228			i++;
 3229	}
 3230
 3231	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
 
 3232	if (!rp) {
 3233		err = -ENOMEM;
 3234		goto unlock;
 3235	}
 3236
 3237	i = 0;
 3238	list_for_each_entry(c, &hdev->conn_hash.list, list) {
 3239		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
 3240			continue;
 3241		bacpy(&rp->addr[i].bdaddr, &c->dst);
 3242		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
 3243		if (c->type == SCO_LINK || c->type == ESCO_LINK)
 3244			continue;
 3245		i++;
 3246	}
 3247
 3248	rp->conn_count = cpu_to_le16(i);
 3249
 3250	/* Recalculate length in case of filtered SCO connections, etc */
 3251	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
 3252				struct_size(rp, addr, i));
 
 
 3253
 3254	kfree(rp);
 3255
 3256unlock:
 3257	hci_dev_unlock(hdev);
 3258	return err;
 3259}
 3260
 3261static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
 3262				   struct mgmt_cp_pin_code_neg_reply *cp)
 3263{
 3264	struct mgmt_pending_cmd *cmd;
 3265	int err;
 3266
 3267	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
 3268			       sizeof(*cp));
 3269	if (!cmd)
 3270		return -ENOMEM;
 3271
 3272	cmd->cmd_complete = addr_cmd_complete;
 3273
 3274	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
 3275			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
 3276	if (err < 0)
 3277		mgmt_pending_remove(cmd);
 3278
 3279	return err;
 3280}
 3281
 3282static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
 3283			  u16 len)
 3284{
 3285	struct hci_conn *conn;
 3286	struct mgmt_cp_pin_code_reply *cp = data;
 3287	struct hci_cp_pin_code_reply reply;
 3288	struct mgmt_pending_cmd *cmd;
 3289	int err;
 3290
 3291	bt_dev_dbg(hdev, "sock %p", sk);
 3292
 3293	hci_dev_lock(hdev);
 3294
 3295	if (!hdev_is_powered(hdev)) {
 3296		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
 3297				      MGMT_STATUS_NOT_POWERED);
 3298		goto failed;
 3299	}
 3300
 3301	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
 3302	if (!conn) {
 3303		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
 3304				      MGMT_STATUS_NOT_CONNECTED);
 3305		goto failed;
 3306	}
 3307
 3308	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
 3309		struct mgmt_cp_pin_code_neg_reply ncp;
 3310
 3311		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
 3312
 3313		bt_dev_err(hdev, "PIN code is not 16 bytes long");
 3314
 3315		err = send_pin_code_neg_reply(sk, hdev, &ncp);
 3316		if (err >= 0)
 3317			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
 3318					      MGMT_STATUS_INVALID_PARAMS);
 3319
 3320		goto failed;
 3321	}
 3322
 3323	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
 3324	if (!cmd) {
 3325		err = -ENOMEM;
 3326		goto failed;
 3327	}
 3328
 3329	cmd->cmd_complete = addr_cmd_complete;
 3330
 3331	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
 3332	reply.pin_len = cp->pin_len;
 3333	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
 3334
 3335	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
 3336	if (err < 0)
 3337		mgmt_pending_remove(cmd);
 3338
 3339failed:
 3340	hci_dev_unlock(hdev);
 3341	return err;
 3342}
 3343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 3344static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
 3345			     u16 len)
 3346{
 3347	struct mgmt_cp_set_io_capability *cp = data;
 3348
 3349	bt_dev_dbg(hdev, "sock %p", sk);
 3350
 3351	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
 3352		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
 3353				       MGMT_STATUS_INVALID_PARAMS);
 3354
 3355	hci_dev_lock(hdev);
 3356
 3357	hdev->io_capability = cp->io_capability;
 3358
 3359	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
 
 3360
 3361	hci_dev_unlock(hdev);
 3362
 3363	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
 3364				 NULL, 0);
 3365}
 3366
 3367static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
 3368{
 3369	struct hci_dev *hdev = conn->hdev;
 3370	struct mgmt_pending_cmd *cmd;
 3371
 3372	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
 3373		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
 3374			continue;
 3375
 3376		if (cmd->user_data != conn)
 3377			continue;
 3378
 3379		return cmd;
 3380	}
 3381
 3382	return NULL;
 3383}
 3384
 3385static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
 3386{
 3387	struct mgmt_rp_pair_device rp;
 3388	struct hci_conn *conn = cmd->user_data;
 3389	int err;
 3390
 3391	bacpy(&rp.addr.bdaddr, &conn->dst);
 3392	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
 3393
 3394	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
 3395				status, &rp, sizeof(rp));
 3396
 3397	/* So we don't get further callbacks for this connection */
 3398	conn->connect_cfm_cb = NULL;
 3399	conn->security_cfm_cb = NULL;
 3400	conn->disconn_cfm_cb = NULL;
 3401
 3402	hci_conn_drop(conn);
 3403
 3404	/* The device is paired so there is no need to remove
 3405	 * its connection parameters anymore.
 3406	 */
 3407	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
 3408
 3409	hci_conn_put(conn);
 3410
 3411	return err;
 3412}
 3413
 3414void mgmt_smp_complete(struct hci_conn *conn, bool complete)
 3415{
 3416	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
 3417	struct mgmt_pending_cmd *cmd;
 3418
 3419	cmd = find_pairing(conn);
 3420	if (cmd) {
 3421		cmd->cmd_complete(cmd, status);
 3422		mgmt_pending_remove(cmd);
 3423	}
 3424}
 3425
 3426static void pairing_complete_cb(struct hci_conn *conn, u8 status)
 3427{
 3428	struct mgmt_pending_cmd *cmd;
 3429
 3430	BT_DBG("status %u", status);
 3431
 3432	cmd = find_pairing(conn);
 3433	if (!cmd) {
 3434		BT_DBG("Unable to find a pending command");
 3435		return;
 3436	}
 3437
 3438	cmd->cmd_complete(cmd, mgmt_status(status));
 3439	mgmt_pending_remove(cmd);
 3440}
 3441
 3442static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
 3443{
 3444	struct mgmt_pending_cmd *cmd;
 3445
 3446	BT_DBG("status %u", status);
 3447
 3448	if (!status)
 3449		return;
 3450
 3451	cmd = find_pairing(conn);
 3452	if (!cmd) {
 3453		BT_DBG("Unable to find a pending command");
 3454		return;
 3455	}
 3456
 3457	cmd->cmd_complete(cmd, mgmt_status(status));
 3458	mgmt_pending_remove(cmd);
 3459}
 3460
 3461static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
 3462		       u16 len)
 3463{
 3464	struct mgmt_cp_pair_device *cp = data;
 3465	struct mgmt_rp_pair_device rp;
 3466	struct mgmt_pending_cmd *cmd;
 3467	u8 sec_level, auth_type;
 3468	struct hci_conn *conn;
 3469	int err;
 3470
 3471	bt_dev_dbg(hdev, "sock %p", sk);
 3472
 3473	memset(&rp, 0, sizeof(rp));
 3474	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
 3475	rp.addr.type = cp->addr.type;
 3476
 3477	if (!bdaddr_type_is_valid(cp->addr.type))
 3478		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
 3479					 MGMT_STATUS_INVALID_PARAMS,
 3480					 &rp, sizeof(rp));
 3481
 3482	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
 3483		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
 3484					 MGMT_STATUS_INVALID_PARAMS,
 3485					 &rp, sizeof(rp));
 3486
 3487	hci_dev_lock(hdev);
 3488
 3489	if (!hdev_is_powered(hdev)) {
 3490		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
 3491					MGMT_STATUS_NOT_POWERED, &rp,
 3492					sizeof(rp));
 3493		goto unlock;
 3494	}
 3495
 3496	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
 3497		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
 3498					MGMT_STATUS_ALREADY_PAIRED, &rp,
 3499					sizeof(rp));
 3500		goto unlock;
 3501	}
 3502
 3503	sec_level = BT_SECURITY_MEDIUM;
 3504	auth_type = HCI_AT_DEDICATED_BONDING;
 
 
 
 3505
 3506	if (cp->addr.type == BDADDR_BREDR) {
 3507		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
 3508				       auth_type, CONN_REASON_PAIR_DEVICE);
 3509	} else {
 3510		u8 addr_type = le_addr_type(cp->addr.type);
 3511		struct hci_conn_params *p;
 3512
 3513		/* When pairing a new device, it is expected to remember
 3514		 * this device for future connections. Adding the connection
 3515		 * parameter information ahead of time allows tracking
 3516		 * of the peripheral preferred values and will speed up any
 3517		 * further connection establishment.
 3518		 *
 3519		 * If connection parameters already exist, then they
 3520		 * will be kept and this function does nothing.
 3521		 */
 3522		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
 3523
 3524		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
 3525			p->auto_connect = HCI_AUTO_CONN_DISABLED;
 3526
 3527		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
 3528					   sec_level, HCI_LE_CONN_TIMEOUT,
 3529					   CONN_REASON_PAIR_DEVICE);
 3530	}
 3531
 3532	if (IS_ERR(conn)) {
 3533		int status;
 3534
 3535		if (PTR_ERR(conn) == -EBUSY)
 3536			status = MGMT_STATUS_BUSY;
 3537		else if (PTR_ERR(conn) == -EOPNOTSUPP)
 3538			status = MGMT_STATUS_NOT_SUPPORTED;
 3539		else if (PTR_ERR(conn) == -ECONNREFUSED)
 3540			status = MGMT_STATUS_REJECTED;
 3541		else
 3542			status = MGMT_STATUS_CONNECT_FAILED;
 3543
 3544		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
 3545					status, &rp, sizeof(rp));
 3546		goto unlock;
 3547	}
 3548
 3549	if (conn->connect_cfm_cb) {
 3550		hci_conn_drop(conn);
 3551		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
 3552					MGMT_STATUS_BUSY, &rp, sizeof(rp));
 3553		goto unlock;
 3554	}
 3555
 3556	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
 3557	if (!cmd) {
 3558		err = -ENOMEM;
 3559		hci_conn_drop(conn);
 3560		goto unlock;
 3561	}
 3562
 3563	cmd->cmd_complete = pairing_complete;
 3564
 3565	/* For LE, just connecting isn't a proof that the pairing finished */
 3566	if (cp->addr.type == BDADDR_BREDR) {
 3567		conn->connect_cfm_cb = pairing_complete_cb;
 3568		conn->security_cfm_cb = pairing_complete_cb;
 3569		conn->disconn_cfm_cb = pairing_complete_cb;
 3570	} else {
 3571		conn->connect_cfm_cb = le_pairing_complete_cb;
 3572		conn->security_cfm_cb = le_pairing_complete_cb;
 3573		conn->disconn_cfm_cb = le_pairing_complete_cb;
 3574	}
 3575
 
 
 3576	conn->io_capability = cp->io_cap;
 3577	cmd->user_data = hci_conn_get(conn);
 3578
 3579	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
 3580	    hci_conn_security(conn, sec_level, auth_type, true)) {
 3581		cmd->cmd_complete(cmd, 0);
 3582		mgmt_pending_remove(cmd);
 3583	}
 3584
 3585	err = 0;
 3586
 3587unlock:
 3588	hci_dev_unlock(hdev);
 3589	return err;
 3590}
 3591
 3592static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
 3593			      u16 len)
 3594{
 3595	struct mgmt_addr_info *addr = data;
 3596	struct mgmt_pending_cmd *cmd;
 3597	struct hci_conn *conn;
 3598	int err;
 3599
 3600	bt_dev_dbg(hdev, "sock %p", sk);
 3601
 3602	hci_dev_lock(hdev);
 3603
 3604	if (!hdev_is_powered(hdev)) {
 3605		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
 3606				      MGMT_STATUS_NOT_POWERED);
 3607		goto unlock;
 3608	}
 3609
 3610	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
 3611	if (!cmd) {
 3612		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
 3613				      MGMT_STATUS_INVALID_PARAMS);
 3614		goto unlock;
 3615	}
 3616
 3617	conn = cmd->user_data;
 3618
 3619	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
 3620		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
 3621				      MGMT_STATUS_INVALID_PARAMS);
 3622		goto unlock;
 3623	}
 3624
 3625	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
 3626	mgmt_pending_remove(cmd);
 3627
 3628	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
 3629				addr, sizeof(*addr));
 3630
 3631	/* Since user doesn't want to proceed with the connection, abort any
 3632	 * ongoing pairing and then terminate the link if it was created
 3633	 * because of the pair device action.
 3634	 */
 3635	if (addr->type == BDADDR_BREDR)
 3636		hci_remove_link_key(hdev, &addr->bdaddr);
 3637	else
 3638		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
 3639					      le_addr_type(addr->type));
 3640
 3641	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
 3642		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
 3643
 
 
 3644unlock:
 3645	hci_dev_unlock(hdev);
 3646	return err;
 3647}
 3648
 3649static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
 3650			     struct mgmt_addr_info *addr, u16 mgmt_op,
 3651			     u16 hci_op, __le32 passkey)
 3652{
 3653	struct mgmt_pending_cmd *cmd;
 3654	struct hci_conn *conn;
 3655	int err;
 3656
 3657	hci_dev_lock(hdev);
 3658
 3659	if (!hdev_is_powered(hdev)) {
 3660		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
 3661					MGMT_STATUS_NOT_POWERED, addr,
 3662					sizeof(*addr));
 3663		goto done;
 3664	}
 3665
 3666	if (addr->type == BDADDR_BREDR)
 3667		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
 3668	else
 3669		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
 3670					       le_addr_type(addr->type));
 3671
 3672	if (!conn) {
 3673		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
 3674					MGMT_STATUS_NOT_CONNECTED, addr,
 3675					sizeof(*addr));
 3676		goto done;
 3677	}
 3678
 3679	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
 
 3680		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
 
 3681		if (!err)
 3682			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
 3683						MGMT_STATUS_SUCCESS, addr,
 3684						sizeof(*addr));
 3685		else
 3686			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
 3687						MGMT_STATUS_FAILED, addr,
 3688						sizeof(*addr));
 3689
 3690		goto done;
 3691	}
 3692
 3693	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
 3694	if (!cmd) {
 3695		err = -ENOMEM;
 3696		goto done;
 3697	}
 3698
 3699	cmd->cmd_complete = addr_cmd_complete;
 3700
 3701	/* Continue with pairing via HCI */
 3702	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
 3703		struct hci_cp_user_passkey_reply cp;
 3704
 3705		bacpy(&cp.bdaddr, &addr->bdaddr);
 3706		cp.passkey = passkey;
 3707		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
 3708	} else
 3709		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
 3710				   &addr->bdaddr);
 3711
 3712	if (err < 0)
 3713		mgmt_pending_remove(cmd);
 3714
 3715done:
 3716	hci_dev_unlock(hdev);
 3717	return err;
 3718}
 3719
 3720static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
 3721			      void *data, u16 len)
 3722{
 3723	struct mgmt_cp_pin_code_neg_reply *cp = data;
 3724
 3725	bt_dev_dbg(hdev, "sock %p", sk);
 3726
 3727	return user_pairing_resp(sk, hdev, &cp->addr,
 3728				MGMT_OP_PIN_CODE_NEG_REPLY,
 3729				HCI_OP_PIN_CODE_NEG_REPLY, 0);
 3730}
 3731
 3732static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
 3733			      u16 len)
 3734{
 3735	struct mgmt_cp_user_confirm_reply *cp = data;
 3736
 3737	bt_dev_dbg(hdev, "sock %p", sk);
 3738
 3739	if (len != sizeof(*cp))
 3740		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
 3741				       MGMT_STATUS_INVALID_PARAMS);
 3742
 3743	return user_pairing_resp(sk, hdev, &cp->addr,
 3744				 MGMT_OP_USER_CONFIRM_REPLY,
 3745				 HCI_OP_USER_CONFIRM_REPLY, 0);
 3746}
 3747
 3748static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
 3749				  void *data, u16 len)
 3750{
 3751	struct mgmt_cp_user_confirm_neg_reply *cp = data;
 3752
 3753	bt_dev_dbg(hdev, "sock %p", sk);
 3754
 3755	return user_pairing_resp(sk, hdev, &cp->addr,
 3756				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
 3757				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
 3758}
 3759
 3760static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
 3761			      u16 len)
 3762{
 3763	struct mgmt_cp_user_passkey_reply *cp = data;
 3764
 3765	bt_dev_dbg(hdev, "sock %p", sk);
 3766
 3767	return user_pairing_resp(sk, hdev, &cp->addr,
 3768				 MGMT_OP_USER_PASSKEY_REPLY,
 3769				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
 3770}
 3771
 3772static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
 3773				  void *data, u16 len)
 3774{
 3775	struct mgmt_cp_user_passkey_neg_reply *cp = data;
 3776
 3777	bt_dev_dbg(hdev, "sock %p", sk);
 3778
 3779	return user_pairing_resp(sk, hdev, &cp->addr,
 3780				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
 3781				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
 3782}
 3783
 3784static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
 3785{
 3786	struct adv_info *adv_instance;
 3787
 3788	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
 3789	if (!adv_instance)
 3790		return 0;
 3791
 3792	/* stop if current instance doesn't need to be changed */
 3793	if (!(adv_instance->flags & flags))
 3794		return 0;
 3795
 3796	cancel_adv_timeout(hdev);
 3797
 3798	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
 3799	if (!adv_instance)
 3800		return 0;
 3801
 3802	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
 3803
 3804	return 0;
 3805}
 3806
 3807static int name_changed_sync(struct hci_dev *hdev, void *data)
 3808{
 3809	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
 3810}
 3811
 3812static void set_name_complete(struct hci_dev *hdev, void *data, int err)
 3813{
 3814	struct mgmt_pending_cmd *cmd = data;
 3815	struct mgmt_cp_set_local_name *cp = cmd->param;
 3816	u8 status = mgmt_status(err);
 3817
 3818	bt_dev_dbg(hdev, "err %d", err);
 3819
 3820	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
 3821		return;
 3822
 3823	if (status) {
 3824		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
 3825				status);
 3826	} else {
 3827		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
 3828				  cp, sizeof(*cp));
 3829
 3830		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
 3831			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
 3832	}
 3833
 3834	mgmt_pending_remove(cmd);
 3835}
 3836
 3837static int set_name_sync(struct hci_dev *hdev, void *data)
 3838{
 3839	if (lmp_bredr_capable(hdev)) {
 3840		hci_update_name_sync(hdev);
 3841		hci_update_eir_sync(hdev);
 3842	}
 3843
 3844	/* The name is stored in the scan response data and so
 3845	 * no need to update the advertising data here.
 3846	 */
 3847	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
 3848		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
 3849
 3850	return 0;
 3851}
 3852
 3853static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
 3854			  u16 len)
 3855{
 3856	struct mgmt_cp_set_local_name *cp = data;
 3857	struct mgmt_pending_cmd *cmd;
 3858	int err;
 3859
 3860	bt_dev_dbg(hdev, "sock %p", sk);
 3861
 3862	hci_dev_lock(hdev);
 3863
 3864	/* If the old values are the same as the new ones just return a
 3865	 * direct command complete event.
 3866	 */
 3867	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
 3868	    !memcmp(hdev->short_name, cp->short_name,
 3869		    sizeof(hdev->short_name))) {
 3870		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
 3871					data, len);
 3872		goto failed;
 3873	}
 3874
 3875	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
 3876
 3877	if (!hdev_is_powered(hdev)) {
 3878		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
 3879
 3880		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
 3881					data, len);
 3882		if (err < 0)
 3883			goto failed;
 3884
 3885		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
 3886					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
 3887		ext_info_changed(hdev, sk);
 3888
 3889		goto failed;
 3890	}
 3891
 3892	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
 3893	if (!cmd)
 3894		err = -ENOMEM;
 3895	else
 3896		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
 3897					 set_name_complete);
 3898
 3899	if (err < 0) {
 3900		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
 3901				      MGMT_STATUS_FAILED);
 3902
 3903		if (cmd)
 3904			mgmt_pending_remove(cmd);
 3905
 3906		goto failed;
 3907	}
 3908
 3909	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
 
 
 3910
 3911failed:
 3912	hci_dev_unlock(hdev);
 3913	return err;
 3914}
 3915
 3916static int appearance_changed_sync(struct hci_dev *hdev, void *data)
 3917{
 3918	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
 3919}
 3920
 3921static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
 3922			  u16 len)
 3923{
 3924	struct mgmt_cp_set_appearance *cp = data;
 3925	u16 appearance;
 3926	int err;
 3927
 3928	bt_dev_dbg(hdev, "sock %p", sk);
 3929
 3930	if (!lmp_le_capable(hdev))
 3931		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
 3932				       MGMT_STATUS_NOT_SUPPORTED);
 3933
 3934	appearance = le16_to_cpu(cp->appearance);
 3935
 3936	hci_dev_lock(hdev);
 3937
 3938	if (hdev->appearance != appearance) {
 3939		hdev->appearance = appearance;
 3940
 3941		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
 3942			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
 3943					   NULL);
 3944
 3945		ext_info_changed(hdev, sk);
 3946	}
 3947
 3948	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
 3949				0);
 3950
 3951	hci_dev_unlock(hdev);
 3952
 3953	return err;
 3954}
 3955
 3956static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
 3957				 void *data, u16 len)
 3958{
 3959	struct mgmt_rp_get_phy_configuration rp;
 3960
 3961	bt_dev_dbg(hdev, "sock %p", sk);
 3962
 3963	hci_dev_lock(hdev);
 3964
 3965	memset(&rp, 0, sizeof(rp));
 3966
 3967	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
 3968	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
 3969	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
 3970
 3971	hci_dev_unlock(hdev);
 3972
 3973	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
 3974				 &rp, sizeof(rp));
 3975}
 3976
 3977int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
 3978{
 3979	struct mgmt_ev_phy_configuration_changed ev;
 3980
 3981	memset(&ev, 0, sizeof(ev));
 3982
 3983	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
 3984
 3985	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
 3986			  sizeof(ev), skip);
 3987}
 3988
 3989static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
 3990{
 3991	struct mgmt_pending_cmd *cmd = data;
 3992	struct sk_buff *skb = cmd->skb;
 3993	u8 status = mgmt_status(err);
 3994
 3995	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
 3996		return;
 3997
 3998	if (!status) {
 3999		if (!skb)
 4000			status = MGMT_STATUS_FAILED;
 4001		else if (IS_ERR(skb))
 4002			status = mgmt_status(PTR_ERR(skb));
 4003		else
 4004			status = mgmt_status(skb->data[0]);
 4005	}
 4006
 4007	bt_dev_dbg(hdev, "status %d", status);
 4008
 4009	if (status) {
 4010		mgmt_cmd_status(cmd->sk, hdev->id,
 4011				MGMT_OP_SET_PHY_CONFIGURATION, status);
 4012	} else {
 4013		mgmt_cmd_complete(cmd->sk, hdev->id,
 4014				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
 4015				  NULL, 0);
 4016
 4017		mgmt_phy_configuration_changed(hdev, cmd->sk);
 4018	}
 4019
 4020	if (skb && !IS_ERR(skb))
 4021		kfree_skb(skb);
 4022
 4023	mgmt_pending_remove(cmd);
 4024}
 4025
 4026static int set_default_phy_sync(struct hci_dev *hdev, void *data)
 4027{
 4028	struct mgmt_pending_cmd *cmd = data;
 4029	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
 4030	struct hci_cp_le_set_default_phy cp_phy;
 4031	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
 4032
 4033	memset(&cp_phy, 0, sizeof(cp_phy));
 4034
 4035	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
 4036		cp_phy.all_phys |= 0x01;
 4037
 4038	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
 4039		cp_phy.all_phys |= 0x02;
 4040
 4041	if (selected_phys & MGMT_PHY_LE_1M_TX)
 4042		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
 4043
 4044	if (selected_phys & MGMT_PHY_LE_2M_TX)
 4045		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
 4046
 4047	if (selected_phys & MGMT_PHY_LE_CODED_TX)
 4048		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
 4049
 4050	if (selected_phys & MGMT_PHY_LE_1M_RX)
 4051		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
 4052
 4053	if (selected_phys & MGMT_PHY_LE_2M_RX)
 4054		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
 4055
 4056	if (selected_phys & MGMT_PHY_LE_CODED_RX)
 4057		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
 4058
 4059	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
 4060				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
 4061
 4062	return 0;
 4063}
 4064
 4065static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
 4066				 void *data, u16 len)
 4067{
 4068	struct mgmt_cp_set_phy_configuration *cp = data;
 4069	struct mgmt_pending_cmd *cmd;
 4070	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
 4071	u16 pkt_type = (HCI_DH1 | HCI_DM1);
 4072	bool changed = false;
 4073	int err;
 4074
 4075	bt_dev_dbg(hdev, "sock %p", sk);
 4076
 4077	configurable_phys = get_configurable_phys(hdev);
 4078	supported_phys = get_supported_phys(hdev);
 4079	selected_phys = __le32_to_cpu(cp->selected_phys);
 4080
 4081	if (selected_phys & ~supported_phys)
 4082		return mgmt_cmd_status(sk, hdev->id,
 4083				       MGMT_OP_SET_PHY_CONFIGURATION,
 4084				       MGMT_STATUS_INVALID_PARAMS);
 4085
 4086	unconfigure_phys = supported_phys & ~configurable_phys;
 4087
 4088	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
 4089		return mgmt_cmd_status(sk, hdev->id,
 4090				       MGMT_OP_SET_PHY_CONFIGURATION,
 4091				       MGMT_STATUS_INVALID_PARAMS);
 4092
 4093	if (selected_phys == get_selected_phys(hdev))
 4094		return mgmt_cmd_complete(sk, hdev->id,
 4095					 MGMT_OP_SET_PHY_CONFIGURATION,
 4096					 0, NULL, 0);
 4097
 4098	hci_dev_lock(hdev);
 4099
 4100	if (!hdev_is_powered(hdev)) {
 4101		err = mgmt_cmd_status(sk, hdev->id,
 4102				      MGMT_OP_SET_PHY_CONFIGURATION,
 4103				      MGMT_STATUS_REJECTED);
 4104		goto unlock;
 4105	}
 4106
 4107	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
 4108		err = mgmt_cmd_status(sk, hdev->id,
 4109				      MGMT_OP_SET_PHY_CONFIGURATION,
 4110				      MGMT_STATUS_BUSY);
 4111		goto unlock;
 4112	}
 4113
 4114	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
 4115		pkt_type |= (HCI_DH3 | HCI_DM3);
 4116	else
 4117		pkt_type &= ~(HCI_DH3 | HCI_DM3);
 4118
 4119	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
 4120		pkt_type |= (HCI_DH5 | HCI_DM5);
 4121	else
 4122		pkt_type &= ~(HCI_DH5 | HCI_DM5);
 4123
 4124	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
 4125		pkt_type &= ~HCI_2DH1;
 4126	else
 4127		pkt_type |= HCI_2DH1;
 4128
 4129	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
 4130		pkt_type &= ~HCI_2DH3;
 4131	else
 4132		pkt_type |= HCI_2DH3;
 4133
 4134	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
 4135		pkt_type &= ~HCI_2DH5;
 4136	else
 4137		pkt_type |= HCI_2DH5;
 4138
 4139	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
 4140		pkt_type &= ~HCI_3DH1;
 4141	else
 4142		pkt_type |= HCI_3DH1;
 4143
 4144	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
 4145		pkt_type &= ~HCI_3DH3;
 4146	else
 4147		pkt_type |= HCI_3DH3;
 4148
 4149	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
 4150		pkt_type &= ~HCI_3DH5;
 4151	else
 4152		pkt_type |= HCI_3DH5;
 4153
 4154	if (pkt_type != hdev->pkt_type) {
 4155		hdev->pkt_type = pkt_type;
 4156		changed = true;
 4157	}
 4158
 4159	if ((selected_phys & MGMT_PHY_LE_MASK) ==
 4160	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
 4161		if (changed)
 4162			mgmt_phy_configuration_changed(hdev, sk);
 4163
 4164		err = mgmt_cmd_complete(sk, hdev->id,
 4165					MGMT_OP_SET_PHY_CONFIGURATION,
 4166					0, NULL, 0);
 4167
 4168		goto unlock;
 4169	}
 4170
 4171	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
 4172			       len);
 4173	if (!cmd)
 4174		err = -ENOMEM;
 4175	else
 4176		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
 4177					 set_default_phy_complete);
 4178
 4179	if (err < 0) {
 4180		err = mgmt_cmd_status(sk, hdev->id,
 4181				      MGMT_OP_SET_PHY_CONFIGURATION,
 4182				      MGMT_STATUS_FAILED);
 4183
 4184		if (cmd)
 4185			mgmt_pending_remove(cmd);
 4186	}
 4187
 4188unlock:
 4189	hci_dev_unlock(hdev);
 4190
 4191	return err;
 4192}
 4193
 4194static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
 4195			    u16 len)
 4196{
 4197	int err = MGMT_STATUS_SUCCESS;
 4198	struct mgmt_cp_set_blocked_keys *keys = data;
 4199	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
 4200				   sizeof(struct mgmt_blocked_key_info));
 4201	u16 key_count, expected_len;
 4202	int i;
 4203
 4204	bt_dev_dbg(hdev, "sock %p", sk);
 4205
 4206	key_count = __le16_to_cpu(keys->key_count);
 4207	if (key_count > max_key_count) {
 4208		bt_dev_err(hdev, "too big key_count value %u", key_count);
 4209		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
 4210				       MGMT_STATUS_INVALID_PARAMS);
 4211	}
 4212
 4213	expected_len = struct_size(keys, keys, key_count);
 4214	if (expected_len != len) {
 4215		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
 4216			   expected_len, len);
 4217		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
 4218				       MGMT_STATUS_INVALID_PARAMS);
 4219	}
 4220
 4221	hci_dev_lock(hdev);
 4222
 4223	hci_blocked_keys_clear(hdev);
 4224
 4225	for (i = 0; i < key_count; ++i) {
 4226		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
 4227
 4228		if (!b) {
 4229			err = MGMT_STATUS_NO_RESOURCES;
 4230			break;
 4231		}
 4232
 4233		b->type = keys->keys[i].type;
 4234		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
 4235		list_add_rcu(&b->list, &hdev->blocked_keys);
 4236	}
 4237	hci_dev_unlock(hdev);
 4238
 4239	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
 4240				err, NULL, 0);
 4241}
 4242
 4243static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
 4244			       void *data, u16 len)
 4245{
 4246	struct mgmt_mode *cp = data;
 4247	int err;
 4248	bool changed = false;
 4249
 4250	bt_dev_dbg(hdev, "sock %p", sk);
 4251
 4252	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
 4253		return mgmt_cmd_status(sk, hdev->id,
 4254				       MGMT_OP_SET_WIDEBAND_SPEECH,
 4255				       MGMT_STATUS_NOT_SUPPORTED);
 4256
 4257	if (cp->val != 0x00 && cp->val != 0x01)
 4258		return mgmt_cmd_status(sk, hdev->id,
 4259				       MGMT_OP_SET_WIDEBAND_SPEECH,
 4260				       MGMT_STATUS_INVALID_PARAMS);
 4261
 4262	hci_dev_lock(hdev);
 4263
 4264	if (hdev_is_powered(hdev) &&
 4265	    !!cp->val != hci_dev_test_flag(hdev,
 4266					   HCI_WIDEBAND_SPEECH_ENABLED)) {
 4267		err = mgmt_cmd_status(sk, hdev->id,
 4268				      MGMT_OP_SET_WIDEBAND_SPEECH,
 4269				      MGMT_STATUS_REJECTED);
 4270		goto unlock;
 4271	}
 4272
 4273	if (cp->val)
 4274		changed = !hci_dev_test_and_set_flag(hdev,
 4275						   HCI_WIDEBAND_SPEECH_ENABLED);
 4276	else
 4277		changed = hci_dev_test_and_clear_flag(hdev,
 4278						   HCI_WIDEBAND_SPEECH_ENABLED);
 4279
 4280	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
 4281	if (err < 0)
 4282		goto unlock;
 4283
 4284	if (changed)
 4285		err = new_settings(hdev, sk);
 4286
 4287unlock:
 4288	hci_dev_unlock(hdev);
 4289	return err;
 4290}
 4291
 4292static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
 4293			       void *data, u16 data_len)
 4294{
 4295	char buf[20];
 4296	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
 4297	u16 cap_len = 0;
 4298	u8 flags = 0;
 4299	u8 tx_power_range[2];
 4300
 4301	bt_dev_dbg(hdev, "sock %p", sk);
 4302
 4303	memset(&buf, 0, sizeof(buf));
 4304
 4305	hci_dev_lock(hdev);
 4306
 4307	/* When the Read Simple Pairing Options command is supported, then
 4308	 * the remote public key validation is supported.
 4309	 *
 4310	 * Alternatively, when Microsoft extensions are available, they can
 4311	 * indicate support for public key validation as well.
 4312	 */
 4313	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
 4314		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
 4315
 4316	flags |= 0x02;		/* Remote public key validation (LE) */
 4317
 4318	/* When the Read Encryption Key Size command is supported, then the
 4319	 * encryption key size is enforced.
 4320	 */
 4321	if (hdev->commands[20] & 0x10)
 4322		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
 4323
 4324	flags |= 0x08;		/* Encryption key size enforcement (LE) */
 4325
 4326	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
 4327				  &flags, 1);
 4328
 4329	/* When the Read Simple Pairing Options command is supported, then
 4330	 * also max encryption key size information is provided.
 4331	 */
 4332	if (hdev->commands[41] & 0x08)
 4333		cap_len = eir_append_le16(rp->cap, cap_len,
 4334					  MGMT_CAP_MAX_ENC_KEY_SIZE,
 4335					  hdev->max_enc_key_size);
 4336
 4337	cap_len = eir_append_le16(rp->cap, cap_len,
 4338				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
 4339				  SMP_MAX_ENC_KEY_SIZE);
 4340
 4341	/* Append the min/max LE tx power parameters if we were able to fetch
 4342	 * it from the controller
 4343	 */
 4344	if (hdev->commands[38] & 0x80) {
 4345		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
 4346		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
 4347		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
 4348					  tx_power_range, 2);
 4349	}
 4350
 4351	rp->cap_len = cpu_to_le16(cap_len);
 4352
 4353	hci_dev_unlock(hdev);
 4354
 4355	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
 4356				 rp, sizeof(*rp) + cap_len);
 4357}
 4358
 4359#ifdef CONFIG_BT_FEATURE_DEBUG
 4360/* d4992530-b9ec-469f-ab01-6c481c47da1c */
 4361static const u8 debug_uuid[16] = {
 4362	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
 4363	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
 4364};
 4365#endif
 4366
 4367/* 330859bc-7506-492d-9370-9a6f0614037f */
 4368static const u8 quality_report_uuid[16] = {
 4369	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
 4370	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
 4371};
 4372
 4373/* a6695ace-ee7f-4fb9-881a-5fac66c629af */
 4374static const u8 offload_codecs_uuid[16] = {
 4375	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
 4376	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
 4377};
 4378
 4379/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
 4380static const u8 le_simultaneous_roles_uuid[16] = {
 4381	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
 4382	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
 4383};
 4384
 4385/* 15c0a148-c273-11ea-b3de-0242ac130004 */
 4386static const u8 rpa_resolution_uuid[16] = {
 4387	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
 4388	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
 4389};
 4390
 4391/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
 4392static const u8 iso_socket_uuid[16] = {
 4393	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
 4394	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
 4395};
 4396
 4397/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
 4398static const u8 mgmt_mesh_uuid[16] = {
 4399	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
 4400	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
 4401};
 4402
 4403static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
 4404				  void *data, u16 data_len)
 4405{
 4406	struct mgmt_rp_read_exp_features_info *rp;
 4407	size_t len;
 4408	u16 idx = 0;
 4409	u32 flags;
 4410	int status;
 4411
 4412	bt_dev_dbg(hdev, "sock %p", sk);
 4413
 4414	/* Enough space for 7 features */
 4415	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
 4416	rp = kzalloc(len, GFP_KERNEL);
 4417	if (!rp)
 4418		return -ENOMEM;
 4419
 4420#ifdef CONFIG_BT_FEATURE_DEBUG
 4421	if (!hdev) {
 4422		flags = bt_dbg_get() ? BIT(0) : 0;
 4423
 4424		memcpy(rp->features[idx].uuid, debug_uuid, 16);
 4425		rp->features[idx].flags = cpu_to_le32(flags);
 4426		idx++;
 4427	}
 4428#endif
 4429
 4430	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
 4431		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
 4432			flags = BIT(0);
 4433		else
 4434			flags = 0;
 4435
 4436		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
 4437		rp->features[idx].flags = cpu_to_le32(flags);
 4438		idx++;
 4439	}
 4440
 4441	if (hdev && ll_privacy_capable(hdev)) {
 4442		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
 4443			flags = BIT(0) | BIT(1);
 4444		else
 4445			flags = BIT(1);
 4446
 4447		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
 4448		rp->features[idx].flags = cpu_to_le32(flags);
 4449		idx++;
 4450	}
 4451
 4452	if (hdev && (aosp_has_quality_report(hdev) ||
 4453		     hdev->set_quality_report)) {
 4454		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
 4455			flags = BIT(0);
 4456		else
 4457			flags = 0;
 4458
 4459		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
 4460		rp->features[idx].flags = cpu_to_le32(flags);
 4461		idx++;
 4462	}
 4463
 4464	if (hdev && hdev->get_data_path_id) {
 4465		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
 4466			flags = BIT(0);
 4467		else
 4468			flags = 0;
 4469
 4470		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
 4471		rp->features[idx].flags = cpu_to_le32(flags);
 4472		idx++;
 4473	}
 4474
 4475	if (IS_ENABLED(CONFIG_BT_LE)) {
 4476		flags = iso_enabled() ? BIT(0) : 0;
 4477		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
 4478		rp->features[idx].flags = cpu_to_le32(flags);
 4479		idx++;
 4480	}
 4481
 4482	if (hdev && lmp_le_capable(hdev)) {
 4483		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
 4484			flags = BIT(0);
 4485		else
 4486			flags = 0;
 4487
 4488		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
 4489		rp->features[idx].flags = cpu_to_le32(flags);
 4490		idx++;
 4491	}
 4492
 4493	rp->feature_count = cpu_to_le16(idx);
 4494
 4495	/* After reading the experimental features information, enable
 4496	 * the events to update client on any future change.
 4497	 */
 4498	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
 4499
 4500	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
 4501				   MGMT_OP_READ_EXP_FEATURES_INFO,
 4502				   0, rp, sizeof(*rp) + (20 * idx));
 4503
 4504	kfree(rp);
 4505	return status;
 4506}
 4507
 4508static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
 4509					  struct sock *skip)
 4510{
 4511	struct mgmt_ev_exp_feature_changed ev;
 4512
 4513	memset(&ev, 0, sizeof(ev));
 4514	memcpy(ev.uuid, rpa_resolution_uuid, 16);
 4515	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
 4516
 4517	// Do we need to be atomic with the conn_flags?
 4518	if (enabled && privacy_mode_capable(hdev))
 4519		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
 4520	else
 4521		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
 4522
 4523	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
 4524				  &ev, sizeof(ev),
 4525				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
 4526
 4527}
 4528
 4529static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
 4530			       bool enabled, struct sock *skip)
 4531{
 4532	struct mgmt_ev_exp_feature_changed ev;
 4533
 4534	memset(&ev, 0, sizeof(ev));
 4535	memcpy(ev.uuid, uuid, 16);
 4536	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
 4537
 4538	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
 4539				  &ev, sizeof(ev),
 4540				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
 4541}
 4542
 4543#define EXP_FEAT(_uuid, _set_func)	\
 4544{					\
 4545	.uuid = _uuid,			\
 4546	.set_func = _set_func,		\
 4547}
 4548
 4549/* The zero key uuid is special. Multiple exp features are set through it. */
 4550static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
 4551			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
 4552{
 4553	struct mgmt_rp_set_exp_feature rp;
 4554
 4555	memset(rp.uuid, 0, 16);
 4556	rp.flags = cpu_to_le32(0);
 4557
 4558#ifdef CONFIG_BT_FEATURE_DEBUG
 4559	if (!hdev) {
 4560		bool changed = bt_dbg_get();
 4561
 4562		bt_dbg_set(false);
 4563
 4564		if (changed)
 4565			exp_feature_changed(NULL, ZERO_KEY, false, sk);
 4566	}
 4567#endif
 4568
 4569	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
 4570		bool changed;
 4571
 4572		changed = hci_dev_test_and_clear_flag(hdev,
 4573						      HCI_ENABLE_LL_PRIVACY);
 4574		if (changed)
 4575			exp_feature_changed(hdev, rpa_resolution_uuid, false,
 4576					    sk);
 4577	}
 4578
 4579	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
 4580
 4581	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
 4582				 MGMT_OP_SET_EXP_FEATURE, 0,
 4583				 &rp, sizeof(rp));
 4584}
 4585
 4586#ifdef CONFIG_BT_FEATURE_DEBUG
 4587static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
 4588			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
 4589{
 4590	struct mgmt_rp_set_exp_feature rp;
 4591
 4592	bool val, changed;
 4593	int err;
 4594
 4595	/* Command requires to use the non-controller index */
 4596	if (hdev)
 4597		return mgmt_cmd_status(sk, hdev->id,
 4598				       MGMT_OP_SET_EXP_FEATURE,
 4599				       MGMT_STATUS_INVALID_INDEX);
 4600
 4601	/* Parameters are limited to a single octet */
 4602	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
 4603		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
 4604				       MGMT_OP_SET_EXP_FEATURE,
 4605				       MGMT_STATUS_INVALID_PARAMS);
 4606
 4607	/* Only boolean on/off is supported */
 4608	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
 4609		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
 4610				       MGMT_OP_SET_EXP_FEATURE,
 4611				       MGMT_STATUS_INVALID_PARAMS);
 4612
 4613	val = !!cp->param[0];
 4614	changed = val ? !bt_dbg_get() : bt_dbg_get();
 4615	bt_dbg_set(val);
 4616
 4617	memcpy(rp.uuid, debug_uuid, 16);
 4618	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
 4619
 4620	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
 4621
 4622	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
 4623				MGMT_OP_SET_EXP_FEATURE, 0,
 4624				&rp, sizeof(rp));
 4625
 4626	if (changed)
 4627		exp_feature_changed(hdev, debug_uuid, val, sk);
 4628
 4629	return err;
 4630}
 4631#endif
 4632
 4633static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
 4634			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
 4635{
 4636	struct mgmt_rp_set_exp_feature rp;
 4637	bool val, changed;
 4638	int err;
 4639
 4640	/* Command requires to use the controller index */
 4641	if (!hdev)
 4642		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
 4643				       MGMT_OP_SET_EXP_FEATURE,
 4644				       MGMT_STATUS_INVALID_INDEX);
 4645
 4646	/* Parameters are limited to a single octet */
 4647	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
 4648		return mgmt_cmd_status(sk, hdev->id,
 4649				       MGMT_OP_SET_EXP_FEATURE,
 4650				       MGMT_STATUS_INVALID_PARAMS);
 4651
 4652	/* Only boolean on/off is supported */
 4653	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
 4654		return mgmt_cmd_status(sk, hdev->id,
 4655				       MGMT_OP_SET_EXP_FEATURE,
 4656				       MGMT_STATUS_INVALID_PARAMS);
 4657
 4658	val = !!cp->param[0];
 4659
 4660	if (val) {
 4661		changed = !hci_dev_test_and_set_flag(hdev,
 4662						     HCI_MESH_EXPERIMENTAL);
 4663	} else {
 4664		hci_dev_clear_flag(hdev, HCI_MESH);
 4665		changed = hci_dev_test_and_clear_flag(hdev,
 4666						      HCI_MESH_EXPERIMENTAL);
 4667	}
 4668
 4669	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
 4670	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
 4671
 4672	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
 4673
 4674	err = mgmt_cmd_complete(sk, hdev->id,
 4675				MGMT_OP_SET_EXP_FEATURE, 0,
 4676				&rp, sizeof(rp));
 4677
 4678	if (changed)
 4679		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
 4680
 4681	return err;
 4682}
 4683
 4684static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
 4685				   struct mgmt_cp_set_exp_feature *cp,
 4686				   u16 data_len)
 4687{
 4688	struct mgmt_rp_set_exp_feature rp;
 4689	bool val, changed;
 4690	int err;
 4691	u32 flags;
 4692
 4693	/* Command requires to use the controller index */
 4694	if (!hdev)
 4695		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
 4696				       MGMT_OP_SET_EXP_FEATURE,
 4697				       MGMT_STATUS_INVALID_INDEX);
 4698
 4699	/* Changes can only be made when controller is powered down */
 4700	if (hdev_is_powered(hdev))
 4701		return mgmt_cmd_status(sk, hdev->id,
 4702				       MGMT_OP_SET_EXP_FEATURE,
 4703				       MGMT_STATUS_REJECTED);
 4704
 4705	/* Parameters are limited to a single octet */
 4706	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
 4707		return mgmt_cmd_status(sk, hdev->id,
 4708				       MGMT_OP_SET_EXP_FEATURE,
 4709				       MGMT_STATUS_INVALID_PARAMS);
 4710
 4711	/* Only boolean on/off is supported */
 4712	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
 4713		return mgmt_cmd_status(sk, hdev->id,
 4714				       MGMT_OP_SET_EXP_FEATURE,
 4715				       MGMT_STATUS_INVALID_PARAMS);
 4716
 4717	val = !!cp->param[0];
 4718
 4719	if (val) {
 4720		changed = !hci_dev_test_and_set_flag(hdev,
 4721						     HCI_ENABLE_LL_PRIVACY);
 4722		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
 4723
 4724		/* Enable LL privacy + supported settings changed */
 4725		flags = BIT(0) | BIT(1);
 4726	} else {
 4727		changed = hci_dev_test_and_clear_flag(hdev,
 4728						      HCI_ENABLE_LL_PRIVACY);
 4729
 4730		/* Disable LL privacy + supported settings changed */
 4731		flags = BIT(1);
 4732	}
 4733
 4734	memcpy(rp.uuid, rpa_resolution_uuid, 16);
 4735	rp.flags = cpu_to_le32(flags);
 4736
 4737	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
 4738
 4739	err = mgmt_cmd_complete(sk, hdev->id,
 4740				MGMT_OP_SET_EXP_FEATURE, 0,
 4741				&rp, sizeof(rp));
 4742
 4743	if (changed)
 4744		exp_ll_privacy_feature_changed(val, hdev, sk);
 4745
 4746	return err;
 4747}
 4748
 4749static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
 4750				   struct mgmt_cp_set_exp_feature *cp,
 4751				   u16 data_len)
 4752{
 4753	struct mgmt_rp_set_exp_feature rp;
 4754	bool val, changed;
 4755	int err;
 4756
 4757	/* Command requires to use a valid controller index */
 4758	if (!hdev)
 4759		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
 4760				       MGMT_OP_SET_EXP_FEATURE,
 4761				       MGMT_STATUS_INVALID_INDEX);
 4762
 4763	/* Parameters are limited to a single octet */
 4764	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
 4765		return mgmt_cmd_status(sk, hdev->id,
 4766				       MGMT_OP_SET_EXP_FEATURE,
 4767				       MGMT_STATUS_INVALID_PARAMS);
 4768
 4769	/* Only boolean on/off is supported */
 4770	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
 4771		return mgmt_cmd_status(sk, hdev->id,
 4772				       MGMT_OP_SET_EXP_FEATURE,
 4773				       MGMT_STATUS_INVALID_PARAMS);
 4774
 4775	hci_req_sync_lock(hdev);
 4776
 4777	val = !!cp->param[0];
 4778	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
 4779
 4780	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
 4781		err = mgmt_cmd_status(sk, hdev->id,
 4782				      MGMT_OP_SET_EXP_FEATURE,
 4783				      MGMT_STATUS_NOT_SUPPORTED);
 4784		goto unlock_quality_report;
 4785	}
 4786
 4787	if (changed) {
 4788		if (hdev->set_quality_report)
 4789			err = hdev->set_quality_report(hdev, val);
 4790		else
 4791			err = aosp_set_quality_report(hdev, val);
 4792
 4793		if (err) {
 4794			err = mgmt_cmd_status(sk, hdev->id,
 4795					      MGMT_OP_SET_EXP_FEATURE,
 4796					      MGMT_STATUS_FAILED);
 4797			goto unlock_quality_report;
 4798		}
 4799
 4800		if (val)
 4801			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
 4802		else
 4803			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
 4804	}
 4805
 4806	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
 4807
 4808	memcpy(rp.uuid, quality_report_uuid, 16);
 4809	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
 4810	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
 4811
 4812	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
 4813				&rp, sizeof(rp));
 4814
 4815	if (changed)
 4816		exp_feature_changed(hdev, quality_report_uuid, val, sk);
 4817
 4818unlock_quality_report:
 4819	hci_req_sync_unlock(hdev);
 4820	return err;
 4821}
 4822
 4823static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
 4824				  struct mgmt_cp_set_exp_feature *cp,
 4825				  u16 data_len)
 4826{
 4827	bool val, changed;
 4828	int err;
 4829	struct mgmt_rp_set_exp_feature rp;
 4830
 4831	/* Command requires to use a valid controller index */
 4832	if (!hdev)
 4833		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
 4834				       MGMT_OP_SET_EXP_FEATURE,
 4835				       MGMT_STATUS_INVALID_INDEX);
 4836
 4837	/* Parameters are limited to a single octet */
 4838	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
 4839		return mgmt_cmd_status(sk, hdev->id,
 4840				       MGMT_OP_SET_EXP_FEATURE,
 4841				       MGMT_STATUS_INVALID_PARAMS);
 4842
 4843	/* Only boolean on/off is supported */
 4844	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
 4845		return mgmt_cmd_status(sk, hdev->id,
 4846				       MGMT_OP_SET_EXP_FEATURE,
 4847				       MGMT_STATUS_INVALID_PARAMS);
 4848
 4849	val = !!cp->param[0];
 4850	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
 4851
 4852	if (!hdev->get_data_path_id) {
 4853		return mgmt_cmd_status(sk, hdev->id,
 4854				       MGMT_OP_SET_EXP_FEATURE,
 4855				       MGMT_STATUS_NOT_SUPPORTED);
 4856	}
 4857
 4858	if (changed) {
 4859		if (val)
 4860			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
 4861		else
 4862			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
 4863	}
 4864
 4865	bt_dev_info(hdev, "offload codecs enable %d changed %d",
 4866		    val, changed);
 4867
 4868	memcpy(rp.uuid, offload_codecs_uuid, 16);
 4869	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
 4870	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
 4871	err = mgmt_cmd_complete(sk, hdev->id,
 4872				MGMT_OP_SET_EXP_FEATURE, 0,
 4873				&rp, sizeof(rp));
 4874
 4875	if (changed)
 4876		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
 4877
 4878	return err;
 4879}
 4880
 4881static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
 4882					  struct mgmt_cp_set_exp_feature *cp,
 4883					  u16 data_len)
 4884{
 4885	bool val, changed;
 4886	int err;
 4887	struct mgmt_rp_set_exp_feature rp;
 4888
 4889	/* Command requires to use a valid controller index */
 4890	if (!hdev)
 4891		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
 4892				       MGMT_OP_SET_EXP_FEATURE,
 4893				       MGMT_STATUS_INVALID_INDEX);
 4894
 4895	/* Parameters are limited to a single octet */
 4896	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
 4897		return mgmt_cmd_status(sk, hdev->id,
 4898				       MGMT_OP_SET_EXP_FEATURE,
 4899				       MGMT_STATUS_INVALID_PARAMS);
 4900
 4901	/* Only boolean on/off is supported */
 4902	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
 4903		return mgmt_cmd_status(sk, hdev->id,
 4904				       MGMT_OP_SET_EXP_FEATURE,
 4905				       MGMT_STATUS_INVALID_PARAMS);
 4906
 4907	val = !!cp->param[0];
 4908	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
 4909
 4910	if (!hci_dev_le_state_simultaneous(hdev)) {
 4911		return mgmt_cmd_status(sk, hdev->id,
 4912				       MGMT_OP_SET_EXP_FEATURE,
 4913				       MGMT_STATUS_NOT_SUPPORTED);
 4914	}
 4915
 4916	if (changed) {
 4917		if (val)
 4918			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
 4919		else
 4920			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
 4921	}
 4922
 4923	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
 4924		    val, changed);
 4925
 4926	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
 4927	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
 4928	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
 4929	err = mgmt_cmd_complete(sk, hdev->id,
 4930				MGMT_OP_SET_EXP_FEATURE, 0,
 4931				&rp, sizeof(rp));
 4932
 4933	if (changed)
 4934		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
 4935
 4936	return err;
 4937}
 4938
 4939#ifdef CONFIG_BT_LE
 4940static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
 4941			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
 4942{
 4943	struct mgmt_rp_set_exp_feature rp;
 4944	bool val, changed = false;
 4945	int err;
 4946
 4947	/* Command requires to use the non-controller index */
 4948	if (hdev)
 4949		return mgmt_cmd_status(sk, hdev->id,
 4950				       MGMT_OP_SET_EXP_FEATURE,
 4951				       MGMT_STATUS_INVALID_INDEX);
 4952
 4953	/* Parameters are limited to a single octet */
 4954	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
 4955		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
 4956				       MGMT_OP_SET_EXP_FEATURE,
 4957				       MGMT_STATUS_INVALID_PARAMS);
 4958
 4959	/* Only boolean on/off is supported */
 4960	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
 4961		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
 4962				       MGMT_OP_SET_EXP_FEATURE,
 4963				       MGMT_STATUS_INVALID_PARAMS);
 4964
 4965	val = cp->param[0] ? true : false;
 4966	if (val)
 4967		err = iso_init();
 4968	else
 4969		err = iso_exit();
 4970
 4971	if (!err)
 4972		changed = true;
 4973
 4974	memcpy(rp.uuid, iso_socket_uuid, 16);
 4975	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
 4976
 4977	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
 4978
 4979	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
 4980				MGMT_OP_SET_EXP_FEATURE, 0,
 4981				&rp, sizeof(rp));
 4982
 4983	if (changed)
 4984		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
 4985
 4986	return err;
 4987}
 4988#endif
 4989
 4990static const struct mgmt_exp_feature {
 4991	const u8 *uuid;
 4992	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
 4993			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
 4994} exp_features[] = {
 4995	EXP_FEAT(ZERO_KEY, set_zero_key_func),
 4996#ifdef CONFIG_BT_FEATURE_DEBUG
 4997	EXP_FEAT(debug_uuid, set_debug_func),
 4998#endif
 4999	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
 5000	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
 5001	EXP_FEAT(quality_report_uuid, set_quality_report_func),
 5002	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
 5003	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
 5004#ifdef CONFIG_BT_LE
 5005	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
 5006#endif
 5007
 5008	/* end with a null feature */
 5009	EXP_FEAT(NULL, NULL)
 5010};
 5011
 5012static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
 5013			   void *data, u16 data_len)
 5014{
 5015	struct mgmt_cp_set_exp_feature *cp = data;
 5016	size_t i = 0;
 5017
 5018	bt_dev_dbg(hdev, "sock %p", sk);
 5019
 5020	for (i = 0; exp_features[i].uuid; i++) {
 5021		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
 5022			return exp_features[i].set_func(sk, hdev, cp, data_len);
 5023	}
 5024
 5025	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
 5026			       MGMT_OP_SET_EXP_FEATURE,
 5027			       MGMT_STATUS_NOT_SUPPORTED);
 5028}
 5029
 5030static u32 get_params_flags(struct hci_dev *hdev,
 5031			    struct hci_conn_params *params)
 5032{
 5033	u32 flags = hdev->conn_flags;
 5034
 5035	/* Devices using RPAs can only be programmed in the acceptlist if
 5036	 * LL Privacy has been enable otherwise they cannot mark
 5037	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
 5038	 */
 5039	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
 5040	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
 5041		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
 5042
 5043	return flags;
 5044}
 5045
 5046static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
 5047			    u16 data_len)
 5048{
 5049	struct mgmt_cp_get_device_flags *cp = data;
 5050	struct mgmt_rp_get_device_flags rp;
 5051	struct bdaddr_list_with_flags *br_params;
 5052	struct hci_conn_params *params;
 5053	u32 supported_flags;
 5054	u32 current_flags = 0;
 5055	u8 status = MGMT_STATUS_INVALID_PARAMS;
 5056
 5057	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
 5058		   &cp->addr.bdaddr, cp->addr.type);
 5059
 5060	hci_dev_lock(hdev);
 5061
 5062	supported_flags = hdev->conn_flags;
 5063
 5064	memset(&rp, 0, sizeof(rp));
 5065
 5066	if (cp->addr.type == BDADDR_BREDR) {
 5067		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
 5068							      &cp->addr.bdaddr,
 5069							      cp->addr.type);
 5070		if (!br_params)
 5071			goto done;
 5072
 5073		current_flags = br_params->flags;
 5074	} else {
 5075		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
 5076						le_addr_type(cp->addr.type));
 5077		if (!params)
 5078			goto done;
 5079
 5080		supported_flags = get_params_flags(hdev, params);
 5081		current_flags = params->flags;
 5082	}
 5083
 5084	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
 5085	rp.addr.type = cp->addr.type;
 5086	rp.supported_flags = cpu_to_le32(supported_flags);
 5087	rp.current_flags = cpu_to_le32(current_flags);
 5088
 5089	status = MGMT_STATUS_SUCCESS;
 5090
 5091done:
 5092	hci_dev_unlock(hdev);
 5093
 5094	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
 5095				&rp, sizeof(rp));
 5096}
 5097
 5098static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
 5099				 bdaddr_t *bdaddr, u8 bdaddr_type,
 5100				 u32 supported_flags, u32 current_flags)
 5101{
 5102	struct mgmt_ev_device_flags_changed ev;
 5103
 5104	bacpy(&ev.addr.bdaddr, bdaddr);
 5105	ev.addr.type = bdaddr_type;
 5106	ev.supported_flags = cpu_to_le32(supported_flags);
 5107	ev.current_flags = cpu_to_le32(current_flags);
 5108
 5109	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
 5110}
 5111
 5112static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
 5113			    u16 len)
 5114{
 5115	struct mgmt_cp_set_device_flags *cp = data;
 5116	struct bdaddr_list_with_flags *br_params;
 5117	struct hci_conn_params *params;
 5118	u8 status = MGMT_STATUS_INVALID_PARAMS;
 5119	u32 supported_flags;
 5120	u32 current_flags = __le32_to_cpu(cp->current_flags);
 5121
 5122	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
 5123		   &cp->addr.bdaddr, cp->addr.type, current_flags);
 5124
 5125	// We should take hci_dev_lock() early, I think.. conn_flags can change
 5126	supported_flags = hdev->conn_flags;
 5127
 5128	if ((supported_flags | current_flags) != supported_flags) {
 5129		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
 5130			    current_flags, supported_flags);
 5131		goto done;
 5132	}
 5133
 5134	hci_dev_lock(hdev);
 5135
 5136	if (cp->addr.type == BDADDR_BREDR) {
 5137		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
 5138							      &cp->addr.bdaddr,
 5139							      cp->addr.type);
 5140
 5141		if (br_params) {
 5142			br_params->flags = current_flags;
 5143			status = MGMT_STATUS_SUCCESS;
 5144		} else {
 5145			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
 5146				    &cp->addr.bdaddr, cp->addr.type);
 5147		}
 5148
 5149		goto unlock;
 5150	}
 5151
 5152	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
 5153					le_addr_type(cp->addr.type));
 5154	if (!params) {
 5155		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
 5156			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
 5157		goto unlock;
 5158	}
 5159
 5160	supported_flags = get_params_flags(hdev, params);
 5161
 5162	if ((supported_flags | current_flags) != supported_flags) {
 5163		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
 5164			    current_flags, supported_flags);
 5165		goto unlock;
 5166	}
 5167
 5168	WRITE_ONCE(params->flags, current_flags);
 5169	status = MGMT_STATUS_SUCCESS;
 5170
 5171	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
 5172	 * has been set.
 5173	 */
 5174	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
 5175		hci_update_passive_scan(hdev);
 5176
 5177unlock:
 5178	hci_dev_unlock(hdev);
 5179
 5180done:
 5181	if (status == MGMT_STATUS_SUCCESS)
 5182		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
 5183				     supported_flags, current_flags);
 5184
 5185	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
 5186				 &cp->addr, sizeof(cp->addr));
 5187}
 5188
 5189static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
 5190				   u16 handle)
 5191{
 5192	struct mgmt_ev_adv_monitor_added ev;
 5193
 5194	ev.monitor_handle = cpu_to_le16(handle);
 5195
 5196	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
 5197}
 5198
 5199void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
 5200{
 5201	struct mgmt_ev_adv_monitor_removed ev;
 5202	struct mgmt_pending_cmd *cmd;
 5203	struct sock *sk_skip = NULL;
 5204	struct mgmt_cp_remove_adv_monitor *cp;
 5205
 5206	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
 5207	if (cmd) {
 5208		cp = cmd->param;
 5209
 5210		if (cp->monitor_handle)
 5211			sk_skip = cmd->sk;
 5212	}
 5213
 5214	ev.monitor_handle = cpu_to_le16(handle);
 5215
 5216	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
 5217}
 5218
 5219static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
 5220				 void *data, u16 len)
 5221{
 5222	struct adv_monitor *monitor = NULL;
 5223	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
 5224	int handle, err;
 5225	size_t rp_size = 0;
 5226	__u32 supported = 0;
 5227	__u32 enabled = 0;
 5228	__u16 num_handles = 0;
 5229	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
 5230
 5231	BT_DBG("request for %s", hdev->name);
 5232
 5233	hci_dev_lock(hdev);
 5234
 5235	if (msft_monitor_supported(hdev))
 5236		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
 5237
 5238	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
 5239		handles[num_handles++] = monitor->handle;
 5240
 5241	hci_dev_unlock(hdev);
 5242
 5243	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
 5244	rp = kmalloc(rp_size, GFP_KERNEL);
 5245	if (!rp)
 5246		return -ENOMEM;
 5247
 5248	/* All supported features are currently enabled */
 5249	enabled = supported;
 5250
 5251	rp->supported_features = cpu_to_le32(supported);
 5252	rp->enabled_features = cpu_to_le32(enabled);
 5253	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
 5254	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
 5255	rp->num_handles = cpu_to_le16(num_handles);
 5256	if (num_handles)
 5257		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
 5258
 5259	err = mgmt_cmd_complete(sk, hdev->id,
 5260				MGMT_OP_READ_ADV_MONITOR_FEATURES,
 5261				MGMT_STATUS_SUCCESS, rp, rp_size);
 5262
 5263	kfree(rp);
 5264
 5265	return err;
 5266}
 5267
 5268static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
 5269						   void *data, int status)
 5270{
 5271	struct mgmt_rp_add_adv_patterns_monitor rp;
 5272	struct mgmt_pending_cmd *cmd = data;
 5273	struct adv_monitor *monitor = cmd->user_data;
 5274
 5275	hci_dev_lock(hdev);
 5276
 5277	rp.monitor_handle = cpu_to_le16(monitor->handle);
 5278
 5279	if (!status) {
 5280		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
 5281		hdev->adv_monitors_cnt++;
 5282		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
 5283			monitor->state = ADV_MONITOR_STATE_REGISTERED;
 5284		hci_update_passive_scan(hdev);
 5285	}
 5286
 5287	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
 5288			  mgmt_status(status), &rp, sizeof(rp));
 5289	mgmt_pending_remove(cmd);
 5290
 5291	hci_dev_unlock(hdev);
 5292	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
 5293		   rp.monitor_handle, status);
 5294}
 5295
 5296static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
 5297{
 5298	struct mgmt_pending_cmd *cmd = data;
 5299	struct adv_monitor *monitor = cmd->user_data;
 5300
 5301	return hci_add_adv_monitor(hdev, monitor);
 5302}
 5303
 5304static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
 5305				      struct adv_monitor *m, u8 status,
 5306				      void *data, u16 len, u16 op)
 5307{
 5308	struct mgmt_pending_cmd *cmd;
 5309	int err;
 5310
 5311	hci_dev_lock(hdev);
 5312
 5313	if (status)
 5314		goto unlock;
 5315
 5316	if (pending_find(MGMT_OP_SET_LE, hdev) ||
 5317	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
 5318	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
 5319	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
 5320		status = MGMT_STATUS_BUSY;
 5321		goto unlock;
 5322	}
 5323
 5324	cmd = mgmt_pending_add(sk, op, hdev, data, len);
 5325	if (!cmd) {
 5326		status = MGMT_STATUS_NO_RESOURCES;
 5327		goto unlock;
 5328	}
 5329
 5330	cmd->user_data = m;
 5331	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
 5332				 mgmt_add_adv_patterns_monitor_complete);
 5333	if (err) {
 5334		if (err == -ENOMEM)
 5335			status = MGMT_STATUS_NO_RESOURCES;
 5336		else
 5337			status = MGMT_STATUS_FAILED;
 5338
 5339		goto unlock;
 5340	}
 5341
 5342	hci_dev_unlock(hdev);
 5343
 5344	return 0;
 5345
 5346unlock:
 5347	hci_free_adv_monitor(hdev, m);
 5348	hci_dev_unlock(hdev);
 5349	return mgmt_cmd_status(sk, hdev->id, op, status);
 5350}
 5351
 5352static void parse_adv_monitor_rssi(struct adv_monitor *m,
 5353				   struct mgmt_adv_rssi_thresholds *rssi)
 5354{
 5355	if (rssi) {
 5356		m->rssi.low_threshold = rssi->low_threshold;
 5357		m->rssi.low_threshold_timeout =
 5358		    __le16_to_cpu(rssi->low_threshold_timeout);
 5359		m->rssi.high_threshold = rssi->high_threshold;
 5360		m->rssi.high_threshold_timeout =
 5361		    __le16_to_cpu(rssi->high_threshold_timeout);
 5362		m->rssi.sampling_period = rssi->sampling_period;
 5363	} else {
 5364		/* Default values. These numbers are the least constricting
 5365		 * parameters for MSFT API to work, so it behaves as if there
 5366		 * are no rssi parameter to consider. May need to be changed
 5367		 * if other API are to be supported.
 5368		 */
 5369		m->rssi.low_threshold = -127;
 5370		m->rssi.low_threshold_timeout = 60;
 5371		m->rssi.high_threshold = -127;
 5372		m->rssi.high_threshold_timeout = 0;
 5373		m->rssi.sampling_period = 0;
 5374	}
 5375}
 5376
 5377static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
 5378				    struct mgmt_adv_pattern *patterns)
 5379{
 5380	u8 offset = 0, length = 0;
 5381	struct adv_pattern *p = NULL;
 5382	int i;
 5383
 5384	for (i = 0; i < pattern_count; i++) {
 5385		offset = patterns[i].offset;
 5386		length = patterns[i].length;
 5387		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
 5388		    length > HCI_MAX_EXT_AD_LENGTH ||
 5389		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
 5390			return MGMT_STATUS_INVALID_PARAMS;
 5391
 5392		p = kmalloc(sizeof(*p), GFP_KERNEL);
 5393		if (!p)
 5394			return MGMT_STATUS_NO_RESOURCES;
 5395
 5396		p->ad_type = patterns[i].ad_type;
 5397		p->offset = patterns[i].offset;
 5398		p->length = patterns[i].length;
 5399		memcpy(p->value, patterns[i].value, p->length);
 5400
 5401		INIT_LIST_HEAD(&p->list);
 5402		list_add(&p->list, &m->patterns);
 5403	}
 5404
 5405	return MGMT_STATUS_SUCCESS;
 5406}
 5407
 5408static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
 5409				    void *data, u16 len)
 5410{
 5411	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
 5412	struct adv_monitor *m = NULL;
 5413	u8 status = MGMT_STATUS_SUCCESS;
 5414	size_t expected_size = sizeof(*cp);
 5415
 5416	BT_DBG("request for %s", hdev->name);
 5417
 5418	if (len <= sizeof(*cp)) {
 5419		status = MGMT_STATUS_INVALID_PARAMS;
 5420		goto done;
 5421	}
 5422
 5423	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
 5424	if (len != expected_size) {
 5425		status = MGMT_STATUS_INVALID_PARAMS;
 5426		goto done;
 5427	}
 5428
 5429	m = kzalloc(sizeof(*m), GFP_KERNEL);
 5430	if (!m) {
 5431		status = MGMT_STATUS_NO_RESOURCES;
 5432		goto done;
 5433	}
 5434
 5435	INIT_LIST_HEAD(&m->patterns);
 5436
 5437	parse_adv_monitor_rssi(m, NULL);
 5438	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
 5439
 5440done:
 5441	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
 5442					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
 5443}
 5444
 5445static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
 5446					 void *data, u16 len)
 5447{
 5448	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
 5449	struct adv_monitor *m = NULL;
 5450	u8 status = MGMT_STATUS_SUCCESS;
 5451	size_t expected_size = sizeof(*cp);
 5452
 5453	BT_DBG("request for %s", hdev->name);
 5454
 5455	if (len <= sizeof(*cp)) {
 5456		status = MGMT_STATUS_INVALID_PARAMS;
 5457		goto done;
 5458	}
 5459
 5460	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
 5461	if (len != expected_size) {
 5462		status = MGMT_STATUS_INVALID_PARAMS;
 5463		goto done;
 5464	}
 5465
 5466	m = kzalloc(sizeof(*m), GFP_KERNEL);
 5467	if (!m) {
 5468		status = MGMT_STATUS_NO_RESOURCES;
 5469		goto done;
 5470	}
 5471
 5472	INIT_LIST_HEAD(&m->patterns);
 5473
 5474	parse_adv_monitor_rssi(m, &cp->rssi);
 5475	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
 5476
 5477done:
 5478	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
 5479					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
 5480}
 5481
 5482static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
 5483					     void *data, int status)
 5484{
 5485	struct mgmt_rp_remove_adv_monitor rp;
 5486	struct mgmt_pending_cmd *cmd = data;
 5487	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
 5488
 5489	hci_dev_lock(hdev);
 5490
 5491	rp.monitor_handle = cp->monitor_handle;
 5492
 5493	if (!status)
 5494		hci_update_passive_scan(hdev);
 5495
 5496	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
 5497			  mgmt_status(status), &rp, sizeof(rp));
 5498	mgmt_pending_remove(cmd);
 5499
 5500	hci_dev_unlock(hdev);
 5501	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
 5502		   rp.monitor_handle, status);
 5503}
 5504
 5505static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
 5506{
 5507	struct mgmt_pending_cmd *cmd = data;
 5508	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
 5509	u16 handle = __le16_to_cpu(cp->monitor_handle);
 5510
 5511	if (!handle)
 5512		return hci_remove_all_adv_monitor(hdev);
 5513
 5514	return hci_remove_single_adv_monitor(hdev, handle);
 5515}
 5516
 5517static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
 5518			      void *data, u16 len)
 5519{
 5520	struct mgmt_pending_cmd *cmd;
 5521	int err, status;
 5522
 5523	hci_dev_lock(hdev);
 5524
 5525	if (pending_find(MGMT_OP_SET_LE, hdev) ||
 5526	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
 5527	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
 5528	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
 5529		status = MGMT_STATUS_BUSY;
 5530		goto unlock;
 5531	}
 5532
 5533	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
 5534	if (!cmd) {
 5535		status = MGMT_STATUS_NO_RESOURCES;
 5536		goto unlock;
 5537	}
 5538
 5539	err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
 5540				 mgmt_remove_adv_monitor_complete);
 5541
 5542	if (err) {
 5543		mgmt_pending_remove(cmd);
 5544
 5545		if (err == -ENOMEM)
 5546			status = MGMT_STATUS_NO_RESOURCES;
 5547		else
 5548			status = MGMT_STATUS_FAILED;
 5549
 5550		goto unlock;
 5551	}
 5552
 5553	hci_dev_unlock(hdev);
 5554
 5555	return 0;
 5556
 5557unlock:
 5558	hci_dev_unlock(hdev);
 5559	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
 5560			       status);
 5561}
 5562
 5563static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
 5564{
 5565	struct mgmt_rp_read_local_oob_data mgmt_rp;
 5566	size_t rp_size = sizeof(mgmt_rp);
 5567	struct mgmt_pending_cmd *cmd = data;
 5568	struct sk_buff *skb = cmd->skb;
 5569	u8 status = mgmt_status(err);
 5570
 5571	if (!status) {
 5572		if (!skb)
 5573			status = MGMT_STATUS_FAILED;
 5574		else if (IS_ERR(skb))
 5575			status = mgmt_status(PTR_ERR(skb));
 5576		else
 5577			status = mgmt_status(skb->data[0]);
 5578	}
 5579
 5580	bt_dev_dbg(hdev, "status %d", status);
 5581
 5582	if (status) {
 5583		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
 5584		goto remove;
 5585	}
 5586
 5587	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
 5588
 5589	if (!bredr_sc_enabled(hdev)) {
 5590		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
 5591
 5592		if (skb->len < sizeof(*rp)) {
 5593			mgmt_cmd_status(cmd->sk, hdev->id,
 5594					MGMT_OP_READ_LOCAL_OOB_DATA,
 5595					MGMT_STATUS_FAILED);
 5596			goto remove;
 5597		}
 5598
 5599		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
 5600		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
 5601
 5602		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
 5603	} else {
 5604		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
 5605
 5606		if (skb->len < sizeof(*rp)) {
 5607			mgmt_cmd_status(cmd->sk, hdev->id,
 5608					MGMT_OP_READ_LOCAL_OOB_DATA,
 5609					MGMT_STATUS_FAILED);
 5610			goto remove;
 5611		}
 5612
 5613		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
 5614		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
 5615
 5616		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
 5617		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
 5618	}
 5619
 5620	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
 5621			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
 5622
 5623remove:
 5624	if (skb && !IS_ERR(skb))
 5625		kfree_skb(skb);
 5626
 5627	mgmt_pending_free(cmd);
 5628}
 5629
 5630static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
 5631{
 5632	struct mgmt_pending_cmd *cmd = data;
 5633
 5634	if (bredr_sc_enabled(hdev))
 5635		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
 5636	else
 5637		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
 5638
 5639	if (IS_ERR(cmd->skb))
 5640		return PTR_ERR(cmd->skb);
 5641	else
 5642		return 0;
 5643}
 5644
 5645static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
 5646			       void *data, u16 data_len)
 5647{
 5648	struct mgmt_pending_cmd *cmd;
 
 5649	int err;
 5650
 5651	bt_dev_dbg(hdev, "sock %p", sk);
 5652
 5653	hci_dev_lock(hdev);
 5654
 5655	if (!hdev_is_powered(hdev)) {
 5656		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
 5657				      MGMT_STATUS_NOT_POWERED);
 
 5658		goto unlock;
 5659	}
 5660
 5661	if (!lmp_ssp_capable(hdev)) {
 5662		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
 5663				      MGMT_STATUS_NOT_SUPPORTED);
 5664		goto unlock;
 5665	}
 5666
 5667	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
 5668	if (!cmd)
 5669		err = -ENOMEM;
 5670	else
 5671		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
 5672					 read_local_oob_data_complete);
 5673
 5674	if (err < 0) {
 5675		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
 5676				      MGMT_STATUS_FAILED);
 5677
 5678		if (cmd)
 5679			mgmt_pending_free(cmd);
 5680	}
 5681
 5682unlock:
 5683	hci_dev_unlock(hdev);
 5684	return err;
 5685}
 5686
 5687static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
 5688			       void *data, u16 len)
 5689{
 5690	struct mgmt_addr_info *addr = data;
 5691	int err;
 5692
 5693	bt_dev_dbg(hdev, "sock %p", sk);
 5694
 5695	if (!bdaddr_type_is_valid(addr->type))
 5696		return mgmt_cmd_complete(sk, hdev->id,
 5697					 MGMT_OP_ADD_REMOTE_OOB_DATA,
 5698					 MGMT_STATUS_INVALID_PARAMS,
 5699					 addr, sizeof(*addr));
 5700
 5701	hci_dev_lock(hdev);
 5702
 5703	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
 5704		struct mgmt_cp_add_remote_oob_data *cp = data;
 5705		u8 status;
 5706
 5707		if (cp->addr.type != BDADDR_BREDR) {
 5708			err = mgmt_cmd_complete(sk, hdev->id,
 5709						MGMT_OP_ADD_REMOTE_OOB_DATA,
 5710						MGMT_STATUS_INVALID_PARAMS,
 5711						&cp->addr, sizeof(cp->addr));
 5712			goto unlock;
 5713		}
 5714
 5715		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
 5716					      cp->addr.type, cp->hash,
 5717					      cp->rand, NULL, NULL);
 5718		if (err < 0)
 5719			status = MGMT_STATUS_FAILED;
 5720		else
 5721			status = MGMT_STATUS_SUCCESS;
 5722
 5723		err = mgmt_cmd_complete(sk, hdev->id,
 5724					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
 5725					&cp->addr, sizeof(cp->addr));
 5726	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
 5727		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
 5728		u8 *rand192, *hash192, *rand256, *hash256;
 5729		u8 status;
 5730
 5731		if (bdaddr_type_is_le(cp->addr.type)) {
 5732			/* Enforce zero-valued 192-bit parameters as
 5733			 * long as legacy SMP OOB isn't implemented.
 5734			 */
 5735			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
 5736			    memcmp(cp->hash192, ZERO_KEY, 16)) {
 5737				err = mgmt_cmd_complete(sk, hdev->id,
 5738							MGMT_OP_ADD_REMOTE_OOB_DATA,
 5739							MGMT_STATUS_INVALID_PARAMS,
 5740							addr, sizeof(*addr));
 5741				goto unlock;
 5742			}
 5743
 5744			rand192 = NULL;
 5745			hash192 = NULL;
 5746		} else {
 5747			/* In case one of the P-192 values is set to zero,
 5748			 * then just disable OOB data for P-192.
 5749			 */
 5750			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
 5751			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
 5752				rand192 = NULL;
 5753				hash192 = NULL;
 5754			} else {
 5755				rand192 = cp->rand192;
 5756				hash192 = cp->hash192;
 5757			}
 5758		}
 5759
 5760		/* In case one of the P-256 values is set to zero, then just
 5761		 * disable OOB data for P-256.
 5762		 */
 5763		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
 5764		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
 5765			rand256 = NULL;
 5766			hash256 = NULL;
 5767		} else {
 5768			rand256 = cp->rand256;
 5769			hash256 = cp->hash256;
 5770		}
 5771
 5772		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
 5773					      cp->addr.type, hash192, rand192,
 5774					      hash256, rand256);
 5775		if (err < 0)
 5776			status = MGMT_STATUS_FAILED;
 5777		else
 5778			status = MGMT_STATUS_SUCCESS;
 5779
 5780		err = mgmt_cmd_complete(sk, hdev->id,
 5781					MGMT_OP_ADD_REMOTE_OOB_DATA,
 5782					status, &cp->addr, sizeof(cp->addr));
 5783	} else {
 5784		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
 5785			   len);
 5786		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
 5787				      MGMT_STATUS_INVALID_PARAMS);
 5788	}
 5789
 5790unlock:
 5791	hci_dev_unlock(hdev);
 5792	return err;
 5793}
 5794
 5795static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
 5796				  void *data, u16 len)
 5797{
 5798	struct mgmt_cp_remove_remote_oob_data *cp = data;
 5799	u8 status;
 5800	int err;
 5801
 5802	bt_dev_dbg(hdev, "sock %p", sk);
 5803
 5804	if (cp->addr.type != BDADDR_BREDR)
 5805		return mgmt_cmd_complete(sk, hdev->id,
 5806					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
 5807					 MGMT_STATUS_INVALID_PARAMS,
 5808					 &cp->addr, sizeof(cp->addr));
 5809
 5810	hci_dev_lock(hdev);
 5811
 5812	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
 5813		hci_remote_oob_data_clear(hdev);
 5814		status = MGMT_STATUS_SUCCESS;
 5815		goto done;
 
 
 5816	}
 5817
 5818	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
 5819	if (err < 0)
 5820		status = MGMT_STATUS_INVALID_PARAMS;
 5821	else
 5822		status = MGMT_STATUS_SUCCESS;
 5823
 5824done:
 5825	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
 5826				status, &cp->addr, sizeof(cp->addr));
 5827
 
 5828	hci_dev_unlock(hdev);
 5829	return err;
 5830}
 5831
 5832void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
 5833{
 5834	struct mgmt_pending_cmd *cmd;
 5835
 5836	bt_dev_dbg(hdev, "status %u", status);
 5837
 5838	hci_dev_lock(hdev);
 5839
 5840	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
 5841	if (!cmd)
 5842		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
 5843
 5844	if (!cmd)
 5845		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
 5846
 5847	if (cmd) {
 5848		cmd->cmd_complete(cmd, mgmt_status(status));
 5849		mgmt_pending_remove(cmd);
 5850	}
 5851
 5852	hci_dev_unlock(hdev);
 5853}
 5854
 5855static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
 5856				    uint8_t *mgmt_status)
 5857{
 5858	switch (type) {
 5859	case DISCOV_TYPE_LE:
 5860		*mgmt_status = mgmt_le_support(hdev);
 5861		if (*mgmt_status)
 5862			return false;
 5863		break;
 5864	case DISCOV_TYPE_INTERLEAVED:
 5865		*mgmt_status = mgmt_le_support(hdev);
 5866		if (*mgmt_status)
 5867			return false;
 5868		fallthrough;
 5869	case DISCOV_TYPE_BREDR:
 5870		*mgmt_status = mgmt_bredr_support(hdev);
 5871		if (*mgmt_status)
 5872			return false;
 5873		break;
 5874	default:
 5875		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
 5876		return false;
 5877	}
 5878
 5879	return true;
 5880}
 5881
 5882static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
 5883{
 5884	struct mgmt_pending_cmd *cmd = data;
 5885
 5886	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
 5887	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
 5888	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
 5889		return;
 5890
 5891	bt_dev_dbg(hdev, "err %d", err);
 5892
 5893	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
 5894			  cmd->param, 1);
 5895	mgmt_pending_remove(cmd);
 5896
 5897	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
 5898				DISCOVERY_FINDING);
 5899}
 5900
 5901static int start_discovery_sync(struct hci_dev *hdev, void *data)
 5902{
 5903	return hci_start_discovery_sync(hdev);
 5904}
 5905
 5906static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
 5907				    u16 op, void *data, u16 len)
 5908{
 5909	struct mgmt_cp_start_discovery *cp = data;
 5910	struct mgmt_pending_cmd *cmd;
 5911	u8 status;
 5912	int err;
 5913
 5914	bt_dev_dbg(hdev, "sock %p", sk);
 5915
 5916	hci_dev_lock(hdev);
 5917
 5918	if (!hdev_is_powered(hdev)) {
 5919		err = mgmt_cmd_complete(sk, hdev->id, op,
 5920					MGMT_STATUS_NOT_POWERED,
 5921					&cp->type, sizeof(cp->type));
 5922		goto failed;
 5923	}
 5924
 5925	if (hdev->discovery.state != DISCOVERY_STOPPED ||
 5926	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
 5927		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
 5928					&cp->type, sizeof(cp->type));
 5929		goto failed;
 5930	}
 5931
 5932	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
 5933		err = mgmt_cmd_complete(sk, hdev->id, op, status,
 5934					&cp->type, sizeof(cp->type));
 5935		goto failed;
 5936	}
 5937
 5938	/* Can't start discovery when it is paused */
 5939	if (hdev->discovery_paused) {
 5940		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
 5941					&cp->type, sizeof(cp->type));
 5942		goto failed;
 5943	}
 5944
 5945	/* Clear the discovery filter first to free any previously
 5946	 * allocated memory for the UUID list.
 5947	 */
 5948	hci_discovery_filter_clear(hdev);
 5949
 5950	hdev->discovery.type = cp->type;
 5951	hdev->discovery.report_invalid_rssi = false;
 5952	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
 5953		hdev->discovery.limited = true;
 5954	else
 5955		hdev->discovery.limited = false;
 5956
 5957	cmd = mgmt_pending_add(sk, op, hdev, data, len);
 5958	if (!cmd) {
 5959		err = -ENOMEM;
 5960		goto failed;
 5961	}
 5962
 5963	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
 5964				 start_discovery_complete);
 5965	if (err < 0) {
 5966		mgmt_pending_remove(cmd);
 5967		goto failed;
 5968	}
 5969
 5970	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
 5971
 5972failed:
 5973	hci_dev_unlock(hdev);
 
 5974	return err;
 5975}
 5976
 5977static int start_discovery(struct sock *sk, struct hci_dev *hdev,
 5978			   void *data, u16 len)
 5979{
 5980	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
 5981					data, len);
 5982}
 5983
 5984static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
 5985				   void *data, u16 len)
 5986{
 5987	return start_discovery_internal(sk, hdev,
 5988					MGMT_OP_START_LIMITED_DISCOVERY,
 5989					data, len);
 5990}
 5991
 5992static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
 5993				   void *data, u16 len)
 5994{
 5995	struct mgmt_cp_start_service_discovery *cp = data;
 5996	struct mgmt_pending_cmd *cmd;
 5997	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
 5998	u16 uuid_count, expected_len;
 5999	u8 status;
 6000	int err;
 6001
 6002	bt_dev_dbg(hdev, "sock %p", sk);
 6003
 6004	hci_dev_lock(hdev);
 6005
 6006	if (!hdev_is_powered(hdev)) {
 6007		err = mgmt_cmd_complete(sk, hdev->id,
 6008					MGMT_OP_START_SERVICE_DISCOVERY,
 6009					MGMT_STATUS_NOT_POWERED,
 6010					&cp->type, sizeof(cp->type));
 6011		goto failed;
 6012	}
 6013
 6014	if (hdev->discovery.state != DISCOVERY_STOPPED ||
 6015	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
 6016		err = mgmt_cmd_complete(sk, hdev->id,
 6017					MGMT_OP_START_SERVICE_DISCOVERY,
 6018					MGMT_STATUS_BUSY, &cp->type,
 6019					sizeof(cp->type));
 6020		goto failed;
 6021	}
 6022
 6023	if (hdev->discovery_paused) {
 6024		err = mgmt_cmd_complete(sk, hdev->id,
 6025					MGMT_OP_START_SERVICE_DISCOVERY,
 6026					MGMT_STATUS_BUSY, &cp->type,
 6027					sizeof(cp->type));
 6028		goto failed;
 6029	}
 6030
 6031	uuid_count = __le16_to_cpu(cp->uuid_count);
 6032	if (uuid_count > max_uuid_count) {
 6033		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
 6034			   uuid_count);
 6035		err = mgmt_cmd_complete(sk, hdev->id,
 6036					MGMT_OP_START_SERVICE_DISCOVERY,
 6037					MGMT_STATUS_INVALID_PARAMS, &cp->type,
 6038					sizeof(cp->type));
 6039		goto failed;
 6040	}
 6041
 6042	expected_len = sizeof(*cp) + uuid_count * 16;
 6043	if (expected_len != len) {
 6044		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
 6045			   expected_len, len);
 6046		err = mgmt_cmd_complete(sk, hdev->id,
 6047					MGMT_OP_START_SERVICE_DISCOVERY,
 6048					MGMT_STATUS_INVALID_PARAMS, &cp->type,
 6049					sizeof(cp->type));
 6050		goto failed;
 6051	}
 6052
 6053	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
 6054		err = mgmt_cmd_complete(sk, hdev->id,
 6055					MGMT_OP_START_SERVICE_DISCOVERY,
 6056					status, &cp->type, sizeof(cp->type));
 6057		goto failed;
 6058	}
 6059
 6060	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
 6061			       hdev, data, len);
 6062	if (!cmd) {
 6063		err = -ENOMEM;
 6064		goto failed;
 6065	}
 6066
 6067	/* Clear the discovery filter first to free any previously
 6068	 * allocated memory for the UUID list.
 6069	 */
 6070	hci_discovery_filter_clear(hdev);
 6071
 6072	hdev->discovery.result_filtering = true;
 6073	hdev->discovery.type = cp->type;
 6074	hdev->discovery.rssi = cp->rssi;
 6075	hdev->discovery.uuid_count = uuid_count;
 6076
 6077	if (uuid_count > 0) {
 6078		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
 6079						GFP_KERNEL);
 6080		if (!hdev->discovery.uuids) {
 6081			err = mgmt_cmd_complete(sk, hdev->id,
 6082						MGMT_OP_START_SERVICE_DISCOVERY,
 6083						MGMT_STATUS_FAILED,
 6084						&cp->type, sizeof(cp->type));
 6085			mgmt_pending_remove(cmd);
 6086			goto failed;
 6087		}
 6088	}
 6089
 6090	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
 6091				 start_discovery_complete);
 6092	if (err < 0) {
 6093		mgmt_pending_remove(cmd);
 6094		goto failed;
 6095	}
 6096
 6097	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
 6098
 6099failed:
 6100	hci_dev_unlock(hdev);
 6101	return err;
 6102}
 6103
 6104void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
 6105{
 6106	struct mgmt_pending_cmd *cmd;
 
 
 
 
 6107
 6108	bt_dev_dbg(hdev, "status %u", status);
 
 
 
 
 
 
 
 6109
 6110	hci_dev_lock(hdev);
 
 
 6111
 6112	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
 6113	if (cmd) {
 6114		cmd->cmd_complete(cmd, mgmt_status(status));
 6115		mgmt_pending_remove(cmd);
 6116	}
 
 6117
 
 6118	hci_dev_unlock(hdev);
 6119}
 6120
 6121static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
 6122{
 6123	struct mgmt_pending_cmd *cmd = data;
 6124
 6125	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
 6126		return;
 6127
 6128	bt_dev_dbg(hdev, "err %d", err);
 6129
 6130	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
 6131			  cmd->param, 1);
 6132	mgmt_pending_remove(cmd);
 6133
 6134	if (!err)
 6135		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 6136}
 6137
 6138static int stop_discovery_sync(struct hci_dev *hdev, void *data)
 6139{
 6140	return hci_stop_discovery_sync(hdev);
 6141}
 6142
 6143static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
 6144			  u16 len)
 6145{
 6146	struct mgmt_cp_stop_discovery *mgmt_cp = data;
 6147	struct mgmt_pending_cmd *cmd;
 
 
 6148	int err;
 6149
 6150	bt_dev_dbg(hdev, "sock %p", sk);
 6151
 6152	hci_dev_lock(hdev);
 6153
 6154	if (!hci_discovery_active(hdev)) {
 6155		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
 6156					MGMT_STATUS_REJECTED, &mgmt_cp->type,
 6157					sizeof(mgmt_cp->type));
 6158		goto unlock;
 6159	}
 6160
 6161	if (hdev->discovery.type != mgmt_cp->type) {
 6162		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
 6163					MGMT_STATUS_INVALID_PARAMS,
 6164					&mgmt_cp->type, sizeof(mgmt_cp->type));
 6165		goto unlock;
 6166	}
 6167
 6168	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
 6169	if (!cmd) {
 6170		err = -ENOMEM;
 6171		goto unlock;
 6172	}
 6173
 6174	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
 6175				 stop_discovery_complete);
 6176	if (err < 0) {
 6177		mgmt_pending_remove(cmd);
 6178		goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6179	}
 6180
 6181	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
 
 
 
 6182
 6183unlock:
 6184	hci_dev_unlock(hdev);
 6185	return err;
 6186}
 6187
 6188static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
 6189			u16 len)
 6190{
 6191	struct mgmt_cp_confirm_name *cp = data;
 6192	struct inquiry_entry *e;
 6193	int err;
 6194
 6195	bt_dev_dbg(hdev, "sock %p", sk);
 6196
 6197	hci_dev_lock(hdev);
 6198
 6199	if (!hci_discovery_active(hdev)) {
 6200		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
 6201					MGMT_STATUS_FAILED, &cp->addr,
 6202					sizeof(cp->addr));
 6203		goto failed;
 6204	}
 6205
 6206	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
 6207	if (!e) {
 6208		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
 6209					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
 6210					sizeof(cp->addr));
 6211		goto failed;
 6212	}
 6213
 6214	if (cp->name_known) {
 6215		e->name_state = NAME_KNOWN;
 6216		list_del(&e->list);
 6217	} else {
 6218		e->name_state = NAME_NEEDED;
 6219		hci_inquiry_cache_update_resolve(hdev, e);
 6220	}
 6221
 6222	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
 6223				&cp->addr, sizeof(cp->addr));
 6224
 6225failed:
 6226	hci_dev_unlock(hdev);
 6227	return err;
 6228}
 6229
 6230static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
 6231			u16 len)
 6232{
 6233	struct mgmt_cp_block_device *cp = data;
 6234	u8 status;
 6235	int err;
 6236
 6237	bt_dev_dbg(hdev, "sock %p", sk);
 6238
 6239	if (!bdaddr_type_is_valid(cp->addr.type))
 6240		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
 6241					 MGMT_STATUS_INVALID_PARAMS,
 6242					 &cp->addr, sizeof(cp->addr));
 6243
 6244	hci_dev_lock(hdev);
 6245
 6246	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
 6247				  cp->addr.type);
 6248	if (err < 0) {
 6249		status = MGMT_STATUS_FAILED;
 6250		goto done;
 6251	}
 6252
 6253	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
 6254		   sk);
 6255	status = MGMT_STATUS_SUCCESS;
 6256
 6257done:
 6258	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
 6259				&cp->addr, sizeof(cp->addr));
 6260
 6261	hci_dev_unlock(hdev);
 6262
 6263	return err;
 6264}
 6265
 6266static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
 6267			  u16 len)
 6268{
 6269	struct mgmt_cp_unblock_device *cp = data;
 6270	u8 status;
 6271	int err;
 6272
 6273	bt_dev_dbg(hdev, "sock %p", sk);
 6274
 6275	if (!bdaddr_type_is_valid(cp->addr.type))
 6276		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
 6277					 MGMT_STATUS_INVALID_PARAMS,
 6278					 &cp->addr, sizeof(cp->addr));
 6279
 6280	hci_dev_lock(hdev);
 6281
 6282	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
 6283				  cp->addr.type);
 6284	if (err < 0) {
 6285		status = MGMT_STATUS_INVALID_PARAMS;
 6286		goto done;
 6287	}
 6288
 6289	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
 6290		   sk);
 6291	status = MGMT_STATUS_SUCCESS;
 6292
 6293done:
 6294	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
 6295				&cp->addr, sizeof(cp->addr));
 6296
 6297	hci_dev_unlock(hdev);
 6298
 6299	return err;
 6300}
 6301
 6302static int set_device_id_sync(struct hci_dev *hdev, void *data)
 6303{
 6304	return hci_update_eir_sync(hdev);
 6305}
 6306
 6307static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
 6308			 u16 len)
 6309{
 6310	struct mgmt_cp_set_device_id *cp = data;
 6311	int err;
 6312	__u16 source;
 6313
 6314	bt_dev_dbg(hdev, "sock %p", sk);
 6315
 6316	source = __le16_to_cpu(cp->source);
 6317
 6318	if (source > 0x0002)
 6319		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
 6320				       MGMT_STATUS_INVALID_PARAMS);
 6321
 6322	hci_dev_lock(hdev);
 6323
 6324	hdev->devid_source = source;
 6325	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
 6326	hdev->devid_product = __le16_to_cpu(cp->product);
 6327	hdev->devid_version = __le16_to_cpu(cp->version);
 6328
 6329	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
 6330				NULL, 0);
 6331
 6332	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
 6333
 6334	hci_dev_unlock(hdev);
 6335
 6336	return err;
 6337}
 6338
 6339static void enable_advertising_instance(struct hci_dev *hdev, int err)
 6340{
 6341	if (err)
 6342		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
 6343	else
 6344		bt_dev_dbg(hdev, "status %d", err);
 6345}
 6346
 6347static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
 6348{
 6349	struct cmd_lookup match = { NULL, hdev };
 6350	u8 instance;
 6351	struct adv_info *adv_instance;
 6352	u8 status = mgmt_status(err);
 6353
 6354	if (status) {
 6355		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
 6356				     cmd_status_rsp, &status);
 6357		return;
 6358	}
 6359
 6360	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
 6361		hci_dev_set_flag(hdev, HCI_ADVERTISING);
 6362	else
 6363		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
 6364
 6365	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
 6366			     &match);
 6367
 6368	new_settings(hdev, match.sk);
 6369
 6370	if (match.sk)
 6371		sock_put(match.sk);
 6372
 6373	/* If "Set Advertising" was just disabled and instance advertising was
 6374	 * set up earlier, then re-enable multi-instance advertising.
 6375	 */
 6376	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
 6377	    list_empty(&hdev->adv_instances))
 6378		return;
 6379
 6380	instance = hdev->cur_adv_instance;
 6381	if (!instance) {
 6382		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
 6383							struct adv_info, list);
 6384		if (!adv_instance)
 6385			return;
 6386
 6387		instance = adv_instance->instance;
 6388	}
 6389
 6390	err = hci_schedule_adv_instance_sync(hdev, instance, true);
 6391
 6392	enable_advertising_instance(hdev, err);
 6393}
 6394
 6395static int set_adv_sync(struct hci_dev *hdev, void *data)
 6396{
 6397	struct mgmt_pending_cmd *cmd = data;
 6398	struct mgmt_mode *cp = cmd->param;
 6399	u8 val = !!cp->val;
 6400
 6401	if (cp->val == 0x02)
 6402		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
 6403	else
 6404		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
 6405
 6406	cancel_adv_timeout(hdev);
 6407
 6408	if (val) {
 6409		/* Switch to instance "0" for the Set Advertising setting.
 6410		 * We cannot use update_[adv|scan_rsp]_data() here as the
 6411		 * HCI_ADVERTISING flag is not yet set.
 6412		 */
 6413		hdev->cur_adv_instance = 0x00;
 6414
 6415		if (ext_adv_capable(hdev)) {
 6416			hci_start_ext_adv_sync(hdev, 0x00);
 6417		} else {
 6418			hci_update_adv_data_sync(hdev, 0x00);
 6419			hci_update_scan_rsp_data_sync(hdev, 0x00);
 6420			hci_enable_advertising_sync(hdev);
 6421		}
 6422	} else {
 6423		hci_disable_advertising_sync(hdev);
 6424	}
 6425
 6426	return 0;
 6427}
 6428
 6429static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
 6430			   u16 len)
 6431{
 6432	struct mgmt_mode *cp = data;
 6433	struct mgmt_pending_cmd *cmd;
 6434	u8 val, status;
 6435	int err;
 6436
 6437	bt_dev_dbg(hdev, "sock %p", sk);
 6438
 6439	status = mgmt_le_support(hdev);
 6440	if (status)
 6441		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
 6442				       status);
 6443
 6444	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
 6445		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
 6446				       MGMT_STATUS_INVALID_PARAMS);
 6447
 6448	if (hdev->advertising_paused)
 6449		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
 6450				       MGMT_STATUS_BUSY);
 6451
 6452	hci_dev_lock(hdev);
 6453
 6454	val = !!cp->val;
 6455
 6456	/* The following conditions are ones which mean that we should
 6457	 * not do any HCI communication but directly send a mgmt
 6458	 * response to user space (after toggling the flag if
 6459	 * necessary).
 6460	 */
 6461	if (!hdev_is_powered(hdev) ||
 6462	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
 6463	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
 6464	    hci_dev_test_flag(hdev, HCI_MESH) ||
 6465	    hci_conn_num(hdev, LE_LINK) > 0 ||
 6466	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
 6467	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
 6468		bool changed;
 6469
 6470		if (cp->val) {
 6471			hdev->cur_adv_instance = 0x00;
 6472			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
 6473			if (cp->val == 0x02)
 6474				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
 6475			else
 6476				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
 6477		} else {
 6478			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
 6479			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
 6480		}
 6481
 6482		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
 6483		if (err < 0)
 6484			goto unlock;
 6485
 6486		if (changed)
 6487			err = new_settings(hdev, sk);
 6488
 6489		goto unlock;
 6490	}
 6491
 6492	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
 6493	    pending_find(MGMT_OP_SET_LE, hdev)) {
 6494		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
 6495				      MGMT_STATUS_BUSY);
 6496		goto unlock;
 6497	}
 6498
 6499	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
 6500	if (!cmd)
 6501		err = -ENOMEM;
 6502	else
 6503		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
 6504					 set_advertising_complete);
 6505
 6506	if (err < 0 && cmd)
 6507		mgmt_pending_remove(cmd);
 6508
 6509unlock:
 6510	hci_dev_unlock(hdev);
 6511	return err;
 6512}
 6513
 6514static int set_static_address(struct sock *sk, struct hci_dev *hdev,
 6515			      void *data, u16 len)
 6516{
 6517	struct mgmt_cp_set_static_address *cp = data;
 6518	int err;
 6519
 6520	bt_dev_dbg(hdev, "sock %p", sk);
 6521
 6522	if (!lmp_le_capable(hdev))
 6523		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
 6524				       MGMT_STATUS_NOT_SUPPORTED);
 6525
 6526	if (hdev_is_powered(hdev))
 6527		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
 6528				       MGMT_STATUS_REJECTED);
 6529
 6530	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
 6531		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
 6532			return mgmt_cmd_status(sk, hdev->id,
 6533					       MGMT_OP_SET_STATIC_ADDRESS,
 6534					       MGMT_STATUS_INVALID_PARAMS);
 6535
 6536		/* Two most significant bits shall be set */
 6537		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
 6538			return mgmt_cmd_status(sk, hdev->id,
 6539					       MGMT_OP_SET_STATIC_ADDRESS,
 6540					       MGMT_STATUS_INVALID_PARAMS);
 6541	}
 6542
 6543	hci_dev_lock(hdev);
 6544
 6545	bacpy(&hdev->static_addr, &cp->bdaddr);
 6546
 6547	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
 6548	if (err < 0)
 6549		goto unlock;
 6550
 6551	err = new_settings(hdev, sk);
 6552
 6553unlock:
 6554	hci_dev_unlock(hdev);
 6555	return err;
 6556}
 6557
 6558static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
 6559			   void *data, u16 len)
 6560{
 6561	struct mgmt_cp_set_scan_params *cp = data;
 6562	__u16 interval, window;
 6563	int err;
 6564
 6565	bt_dev_dbg(hdev, "sock %p", sk);
 6566
 6567	if (!lmp_le_capable(hdev))
 6568		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
 6569				       MGMT_STATUS_NOT_SUPPORTED);
 6570
 6571	interval = __le16_to_cpu(cp->interval);
 6572
 6573	if (interval < 0x0004 || interval > 0x4000)
 6574		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
 6575				       MGMT_STATUS_INVALID_PARAMS);
 6576
 6577	window = __le16_to_cpu(cp->window);
 6578
 6579	if (window < 0x0004 || window > 0x4000)
 6580		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
 6581				       MGMT_STATUS_INVALID_PARAMS);
 6582
 6583	if (window > interval)
 6584		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
 6585				       MGMT_STATUS_INVALID_PARAMS);
 6586
 6587	hci_dev_lock(hdev);
 6588
 6589	hdev->le_scan_interval = interval;
 6590	hdev->le_scan_window = window;
 6591
 6592	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
 6593				NULL, 0);
 6594
 6595	/* If background scan is running, restart it so new parameters are
 6596	 * loaded.
 6597	 */
 6598	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
 6599	    hdev->discovery.state == DISCOVERY_STOPPED)
 6600		hci_update_passive_scan(hdev);
 6601
 6602	hci_dev_unlock(hdev);
 6603
 6604	return err;
 6605}
 6606
 6607static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
 6608{
 6609	struct mgmt_pending_cmd *cmd = data;
 6610
 6611	bt_dev_dbg(hdev, "err %d", err);
 6612
 6613	if (err) {
 6614		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
 6615				mgmt_status(err));
 6616	} else {
 6617		struct mgmt_mode *cp = cmd->param;
 6618
 6619		if (cp->val)
 6620			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
 6621		else
 6622			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
 6623
 6624		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
 6625		new_settings(hdev, cmd->sk);
 6626	}
 6627
 6628	mgmt_pending_free(cmd);
 6629}
 6630
 6631static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
 6632{
 6633	struct mgmt_pending_cmd *cmd = data;
 6634	struct mgmt_mode *cp = cmd->param;
 6635
 6636	return hci_write_fast_connectable_sync(hdev, cp->val);
 6637}
 6638
 6639static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
 6640				void *data, u16 len)
 6641{
 6642	struct mgmt_mode *cp = data;
 6643	struct mgmt_pending_cmd *cmd;
 
 6644	int err;
 6645
 6646	bt_dev_dbg(hdev, "sock %p", sk);
 6647
 6648	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
 6649	    hdev->hci_ver < BLUETOOTH_VER_1_2)
 6650		return mgmt_cmd_status(sk, hdev->id,
 6651				       MGMT_OP_SET_FAST_CONNECTABLE,
 6652				       MGMT_STATUS_NOT_SUPPORTED);
 6653
 6654	if (cp->val != 0x00 && cp->val != 0x01)
 6655		return mgmt_cmd_status(sk, hdev->id,
 6656				       MGMT_OP_SET_FAST_CONNECTABLE,
 6657				       MGMT_STATUS_INVALID_PARAMS);
 6658
 6659	hci_dev_lock(hdev);
 6660
 6661	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
 6662		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
 6663		goto unlock;
 6664	}
 6665
 6666	if (!hdev_is_powered(hdev)) {
 6667		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
 6668		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
 6669		new_settings(hdev, sk);
 6670		goto unlock;
 6671	}
 6672
 6673	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
 6674			       len);
 6675	if (!cmd)
 6676		err = -ENOMEM;
 6677	else
 6678		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
 6679					 fast_connectable_complete);
 6680
 6681	if (err < 0) {
 6682		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
 6683				MGMT_STATUS_FAILED);
 6684
 6685		if (cmd)
 6686			mgmt_pending_free(cmd);
 6687	}
 6688
 6689unlock:
 6690	hci_dev_unlock(hdev);
 6691
 6692	return err;
 6693}
 6694
 6695static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
 6696{
 6697	struct mgmt_pending_cmd *cmd = data;
 6698
 6699	bt_dev_dbg(hdev, "err %d", err);
 6700
 6701	if (err) {
 6702		u8 mgmt_err = mgmt_status(err);
 6703
 6704		/* We need to restore the flag if related HCI commands
 6705		 * failed.
 6706		 */
 6707		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
 6708
 6709		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
 
 6710	} else {
 6711		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
 6712		new_settings(hdev, cmd->sk);
 6713	}
 6714
 6715	mgmt_pending_free(cmd);
 6716}
 6717
 6718static int set_bredr_sync(struct hci_dev *hdev, void *data)
 6719{
 6720	int status;
 6721
 6722	status = hci_write_fast_connectable_sync(hdev, false);
 6723
 6724	if (!status)
 6725		status = hci_update_scan_sync(hdev);
 6726
 6727	/* Since only the advertising data flags will change, there
 6728	 * is no need to update the scan response data.
 6729	 */
 6730	if (!status)
 6731		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
 6732
 6733	return status;
 6734}
 6735
 6736static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 6737{
 6738	struct mgmt_mode *cp = data;
 6739	struct mgmt_pending_cmd *cmd;
 6740	int err;
 6741
 6742	bt_dev_dbg(hdev, "sock %p", sk);
 6743
 6744	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
 6745		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
 6746				       MGMT_STATUS_NOT_SUPPORTED);
 6747
 6748	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 6749		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
 6750				       MGMT_STATUS_REJECTED);
 6751
 6752	if (cp->val != 0x00 && cp->val != 0x01)
 6753		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
 6754				       MGMT_STATUS_INVALID_PARAMS);
 6755
 6756	hci_dev_lock(hdev);
 6757
 6758	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
 6759		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
 6760		goto unlock;
 6761	}
 6762
 6763	if (!hdev_is_powered(hdev)) {
 6764		if (!cp->val) {
 6765			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
 6766			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
 6767			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
 6768			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
 6769			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
 6770		}
 6771
 6772		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
 6773
 6774		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
 6775		if (err < 0)
 6776			goto unlock;
 6777
 6778		err = new_settings(hdev, sk);
 6779		goto unlock;
 6780	}
 6781
 6782	/* Reject disabling when powered on */
 6783	if (!cp->val) {
 6784		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
 6785				      MGMT_STATUS_REJECTED);
 6786		goto unlock;
 6787	} else {
 6788		/* When configuring a dual-mode controller to operate
 6789		 * with LE only and using a static address, then switching
 6790		 * BR/EDR back on is not allowed.
 6791		 *
 6792		 * Dual-mode controllers shall operate with the public
 6793		 * address as its identity address for BR/EDR and LE. So
 6794		 * reject the attempt to create an invalid configuration.
 6795		 *
 6796		 * The same restrictions applies when secure connections
 6797		 * has been enabled. For BR/EDR this is a controller feature
 6798		 * while for LE it is a host stack feature. This means that
 6799		 * switching BR/EDR back on when secure connections has been
 6800		 * enabled is not a supported transaction.
 6801		 */
 6802		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
 6803		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
 6804		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
 6805			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
 6806					      MGMT_STATUS_REJECTED);
 6807			goto unlock;
 6808		}
 6809	}
 6810
 6811	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
 6812	if (!cmd)
 6813		err = -ENOMEM;
 6814	else
 6815		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
 6816					 set_bredr_complete);
 6817
 
 
 6818	if (err < 0) {
 6819		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
 6820				MGMT_STATUS_FAILED);
 6821		if (cmd)
 6822			mgmt_pending_free(cmd);
 6823
 6824		goto unlock;
 6825	}
 6826
 6827	/* We need to flip the bit already here so that
 6828	 * hci_req_update_adv_data generates the correct flags.
 6829	 */
 6830	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
 6831
 6832unlock:
 6833	hci_dev_unlock(hdev);
 6834	return err;
 6835}
 6836
 6837static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
 6838{
 6839	struct mgmt_pending_cmd *cmd = data;
 6840	struct mgmt_mode *cp;
 6841
 6842	bt_dev_dbg(hdev, "err %d", err);
 6843
 6844	if (err) {
 6845		u8 mgmt_err = mgmt_status(err);
 6846
 6847		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
 6848		goto done;
 6849	}
 6850
 6851	cp = cmd->param;
 6852
 6853	switch (cp->val) {
 6854	case 0x00:
 6855		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
 6856		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
 6857		break;
 6858	case 0x01:
 6859		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
 6860		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
 6861		break;
 6862	case 0x02:
 6863		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
 6864		hci_dev_set_flag(hdev, HCI_SC_ONLY);
 6865		break;
 6866	}
 6867
 6868	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
 6869	new_settings(hdev, cmd->sk);
 6870
 6871done:
 6872	mgmt_pending_free(cmd);
 6873}
 6874
 6875static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
 6876{
 6877	struct mgmt_pending_cmd *cmd = data;
 6878	struct mgmt_mode *cp = cmd->param;
 6879	u8 val = !!cp->val;
 6880
 6881	/* Force write of val */
 6882	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
 6883
 6884	return hci_write_sc_support_sync(hdev, val);
 6885}
 6886
 6887static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
 6888			   void *data, u16 len)
 6889{
 6890	struct mgmt_mode *cp = data;
 6891	struct mgmt_pending_cmd *cmd;
 6892	u8 val;
 6893	int err;
 6894
 6895	bt_dev_dbg(hdev, "sock %p", sk);
 6896
 6897	if (!lmp_sc_capable(hdev) &&
 6898	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 6899		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
 6900				       MGMT_STATUS_NOT_SUPPORTED);
 6901
 6902	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
 6903	    lmp_sc_capable(hdev) &&
 6904	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 6905		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
 6906				       MGMT_STATUS_REJECTED);
 6907
 6908	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
 6909		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
 6910				       MGMT_STATUS_INVALID_PARAMS);
 6911
 6912	hci_dev_lock(hdev);
 6913
 6914	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
 6915	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
 6916		bool changed;
 6917
 6918		if (cp->val) {
 6919			changed = !hci_dev_test_and_set_flag(hdev,
 6920							     HCI_SC_ENABLED);
 6921			if (cp->val == 0x02)
 6922				hci_dev_set_flag(hdev, HCI_SC_ONLY);
 6923			else
 6924				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
 6925		} else {
 6926			changed = hci_dev_test_and_clear_flag(hdev,
 6927							      HCI_SC_ENABLED);
 6928			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
 6929		}
 6930
 6931		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
 6932		if (err < 0)
 6933			goto failed;
 6934
 6935		if (changed)
 6936			err = new_settings(hdev, sk);
 6937
 6938		goto failed;
 6939	}
 6940
 6941	val = !!cp->val;
 6942
 6943	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
 6944	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
 6945		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
 6946		goto failed;
 6947	}
 6948
 6949	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
 6950	if (!cmd)
 6951		err = -ENOMEM;
 6952	else
 6953		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
 6954					 set_secure_conn_complete);
 6955
 6956	if (err < 0) {
 6957		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
 6958				MGMT_STATUS_FAILED);
 6959		if (cmd)
 6960			mgmt_pending_free(cmd);
 6961	}
 6962
 6963failed:
 6964	hci_dev_unlock(hdev);
 6965	return err;
 6966}
 6967
 6968static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
 6969			  void *data, u16 len)
 6970{
 6971	struct mgmt_mode *cp = data;
 6972	bool changed, use_changed;
 6973	int err;
 6974
 6975	bt_dev_dbg(hdev, "sock %p", sk);
 6976
 6977	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
 6978		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
 6979				       MGMT_STATUS_INVALID_PARAMS);
 6980
 6981	hci_dev_lock(hdev);
 6982
 6983	if (cp->val)
 6984		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
 6985	else
 6986		changed = hci_dev_test_and_clear_flag(hdev,
 6987						      HCI_KEEP_DEBUG_KEYS);
 6988
 6989	if (cp->val == 0x02)
 6990		use_changed = !hci_dev_test_and_set_flag(hdev,
 6991							 HCI_USE_DEBUG_KEYS);
 6992	else
 6993		use_changed = hci_dev_test_and_clear_flag(hdev,
 6994							  HCI_USE_DEBUG_KEYS);
 6995
 6996	if (hdev_is_powered(hdev) && use_changed &&
 6997	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
 6998		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
 6999		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
 7000			     sizeof(mode), &mode);
 7001	}
 7002
 7003	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
 7004	if (err < 0)
 7005		goto unlock;
 7006
 7007	if (changed)
 7008		err = new_settings(hdev, sk);
 7009
 7010unlock:
 7011	hci_dev_unlock(hdev);
 7012	return err;
 7013}
 7014
 7015static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
 7016		       u16 len)
 7017{
 7018	struct mgmt_cp_set_privacy *cp = cp_data;
 7019	bool changed;
 7020	int err;
 7021
 7022	bt_dev_dbg(hdev, "sock %p", sk);
 7023
 7024	if (!lmp_le_capable(hdev))
 7025		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
 7026				       MGMT_STATUS_NOT_SUPPORTED);
 7027
 7028	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
 7029		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
 7030				       MGMT_STATUS_INVALID_PARAMS);
 7031
 7032	if (hdev_is_powered(hdev))
 7033		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
 7034				       MGMT_STATUS_REJECTED);
 7035
 7036	hci_dev_lock(hdev);
 7037
 7038	/* If user space supports this command it is also expected to
 7039	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
 7040	 */
 7041	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
 7042
 7043	if (cp->privacy) {
 7044		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
 7045		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
 7046		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 7047		hci_adv_instances_set_rpa_expired(hdev, true);
 7048		if (cp->privacy == 0x02)
 7049			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
 7050		else
 7051			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
 7052	} else {
 7053		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
 7054		memset(hdev->irk, 0, sizeof(hdev->irk));
 7055		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
 7056		hci_adv_instances_set_rpa_expired(hdev, false);
 7057		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
 7058	}
 7059
 7060	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
 7061	if (err < 0)
 7062		goto unlock;
 7063
 7064	if (changed)
 7065		err = new_settings(hdev, sk);
 7066
 7067unlock:
 7068	hci_dev_unlock(hdev);
 7069	return err;
 7070}
 7071
 7072static bool irk_is_valid(struct mgmt_irk_info *irk)
 7073{
 7074	switch (irk->addr.type) {
 7075	case BDADDR_LE_PUBLIC:
 7076		return true;
 7077
 7078	case BDADDR_LE_RANDOM:
 7079		/* Two most significant bits shall be set */
 7080		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
 7081			return false;
 7082		return true;
 7083	}
 7084
 7085	return false;
 7086}
 7087
 7088static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
 7089		     u16 len)
 7090{
 7091	struct mgmt_cp_load_irks *cp = cp_data;
 7092	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
 7093				   sizeof(struct mgmt_irk_info));
 7094	u16 irk_count, expected_len;
 7095	int i, err;
 7096
 7097	bt_dev_dbg(hdev, "sock %p", sk);
 7098
 7099	if (!lmp_le_capable(hdev))
 7100		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
 7101				       MGMT_STATUS_NOT_SUPPORTED);
 7102
 7103	irk_count = __le16_to_cpu(cp->irk_count);
 7104	if (irk_count > max_irk_count) {
 7105		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
 7106			   irk_count);
 7107		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
 7108				       MGMT_STATUS_INVALID_PARAMS);
 7109	}
 7110
 7111	expected_len = struct_size(cp, irks, irk_count);
 7112	if (expected_len != len) {
 7113		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
 7114			   expected_len, len);
 7115		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
 7116				       MGMT_STATUS_INVALID_PARAMS);
 7117	}
 7118
 7119	bt_dev_dbg(hdev, "irk_count %u", irk_count);
 7120
 7121	for (i = 0; i < irk_count; i++) {
 7122		struct mgmt_irk_info *key = &cp->irks[i];
 7123
 7124		if (!irk_is_valid(key))
 7125			return mgmt_cmd_status(sk, hdev->id,
 7126					       MGMT_OP_LOAD_IRKS,
 7127					       MGMT_STATUS_INVALID_PARAMS);
 7128	}
 7129
 7130	hci_dev_lock(hdev);
 7131
 7132	hci_smp_irks_clear(hdev);
 7133
 7134	for (i = 0; i < irk_count; i++) {
 7135		struct mgmt_irk_info *irk = &cp->irks[i];
 7136		u8 addr_type = le_addr_type(irk->addr.type);
 7137
 7138		if (hci_is_blocked_key(hdev,
 7139				       HCI_BLOCKED_KEY_TYPE_IRK,
 7140				       irk->val)) {
 7141			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
 7142				    &irk->addr.bdaddr);
 7143			continue;
 7144		}
 7145
 7146		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
 7147		if (irk->addr.type == BDADDR_BREDR)
 7148			addr_type = BDADDR_BREDR;
 7149
 7150		hci_add_irk(hdev, &irk->addr.bdaddr,
 7151			    addr_type, irk->val,
 7152			    BDADDR_ANY);
 7153	}
 7154
 7155	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
 7156
 7157	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
 7158
 7159	hci_dev_unlock(hdev);
 7160
 7161	return err;
 7162}
 7163
 7164static bool ltk_is_valid(struct mgmt_ltk_info *key)
 7165{
 7166	if (key->initiator != 0x00 && key->initiator != 0x01)
 7167		return false;
 7168
 7169	switch (key->addr.type) {
 7170	case BDADDR_LE_PUBLIC:
 7171		return true;
 7172
 7173	case BDADDR_LE_RANDOM:
 7174		/* Two most significant bits shall be set */
 7175		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
 7176			return false;
 7177		return true;
 7178	}
 7179
 7180	return false;
 7181}
 7182
 7183static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
 7184			       void *cp_data, u16 len)
 7185{
 7186	struct mgmt_cp_load_long_term_keys *cp = cp_data;
 7187	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
 7188				   sizeof(struct mgmt_ltk_info));
 7189	u16 key_count, expected_len;
 7190	int i, err;
 7191
 7192	bt_dev_dbg(hdev, "sock %p", sk);
 7193
 7194	if (!lmp_le_capable(hdev))
 7195		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
 7196				       MGMT_STATUS_NOT_SUPPORTED);
 7197
 7198	key_count = __le16_to_cpu(cp->key_count);
 7199	if (key_count > max_key_count) {
 7200		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
 7201			   key_count);
 7202		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
 7203				       MGMT_STATUS_INVALID_PARAMS);
 7204	}
 7205
 7206	expected_len = struct_size(cp, keys, key_count);
 
 7207	if (expected_len != len) {
 7208		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
 7209			   expected_len, len);
 7210		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
 7211				       MGMT_STATUS_INVALID_PARAMS);
 7212	}
 7213
 7214	bt_dev_dbg(hdev, "key_count %u", key_count);
 7215
 7216	for (i = 0; i < key_count; i++) {
 7217		struct mgmt_ltk_info *key = &cp->keys[i];
 7218
 7219		if (!ltk_is_valid(key))
 7220			return mgmt_cmd_status(sk, hdev->id,
 7221					       MGMT_OP_LOAD_LONG_TERM_KEYS,
 7222					       MGMT_STATUS_INVALID_PARAMS);
 7223	}
 7224
 7225	hci_dev_lock(hdev);
 7226
 7227	hci_smp_ltks_clear(hdev);
 7228
 7229	for (i = 0; i < key_count; i++) {
 7230		struct mgmt_ltk_info *key = &cp->keys[i];
 7231		u8 type, authenticated;
 7232		u8 addr_type = le_addr_type(key->addr.type);
 7233
 7234		if (hci_is_blocked_key(hdev,
 7235				       HCI_BLOCKED_KEY_TYPE_LTK,
 7236				       key->val)) {
 7237			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
 7238				    &key->addr.bdaddr);
 7239			continue;
 7240		}
 7241
 7242		switch (key->type) {
 7243		case MGMT_LTK_UNAUTHENTICATED:
 7244			authenticated = 0x00;
 7245			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
 7246			break;
 7247		case MGMT_LTK_AUTHENTICATED:
 7248			authenticated = 0x01;
 7249			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
 7250			break;
 7251		case MGMT_LTK_P256_UNAUTH:
 7252			authenticated = 0x00;
 7253			type = SMP_LTK_P256;
 7254			break;
 7255		case MGMT_LTK_P256_AUTH:
 7256			authenticated = 0x01;
 7257			type = SMP_LTK_P256;
 7258			break;
 7259		case MGMT_LTK_P256_DEBUG:
 7260			authenticated = 0x00;
 7261			type = SMP_LTK_P256_DEBUG;
 7262			fallthrough;
 7263		default:
 7264			continue;
 7265		}
 7266
 7267		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
 7268		if (key->addr.type == BDADDR_BREDR)
 7269			addr_type = BDADDR_BREDR;
 
 7270
 7271		hci_add_ltk(hdev, &key->addr.bdaddr,
 7272			    addr_type, type, authenticated,
 7273			    key->val, key->enc_size, key->ediv, key->rand);
 
 7274	}
 7275
 7276	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
 7277			   NULL, 0);
 7278
 7279	hci_dev_unlock(hdev);
 7280
 7281	return err;
 7282}
 7283
 7284static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
 7285{
 7286	struct mgmt_pending_cmd *cmd = data;
 7287	struct hci_conn *conn = cmd->user_data;
 7288	struct mgmt_cp_get_conn_info *cp = cmd->param;
 7289	struct mgmt_rp_get_conn_info rp;
 7290	u8 status;
 7291
 7292	bt_dev_dbg(hdev, "err %d", err);
 7293
 7294	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
 7295
 7296	status = mgmt_status(err);
 7297	if (status == MGMT_STATUS_SUCCESS) {
 7298		rp.rssi = conn->rssi;
 7299		rp.tx_power = conn->tx_power;
 7300		rp.max_tx_power = conn->max_tx_power;
 7301	} else {
 7302		rp.rssi = HCI_RSSI_INVALID;
 7303		rp.tx_power = HCI_TX_POWER_INVALID;
 7304		rp.max_tx_power = HCI_TX_POWER_INVALID;
 7305	}
 7306
 7307	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
 7308			  &rp, sizeof(rp));
 7309
 7310	mgmt_pending_free(cmd);
 7311}
 7312
 7313static int get_conn_info_sync(struct hci_dev *hdev, void *data)
 7314{
 7315	struct mgmt_pending_cmd *cmd = data;
 7316	struct mgmt_cp_get_conn_info *cp = cmd->param;
 7317	struct hci_conn *conn;
 7318	int err;
 7319	__le16   handle;
 7320
 7321	/* Make sure we are still connected */
 7322	if (cp->addr.type == BDADDR_BREDR)
 7323		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
 7324					       &cp->addr.bdaddr);
 7325	else
 7326		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 7327
 7328	if (!conn || conn->state != BT_CONNECTED)
 7329		return MGMT_STATUS_NOT_CONNECTED;
 7330
 7331	cmd->user_data = conn;
 7332	handle = cpu_to_le16(conn->handle);
 7333
 7334	/* Refresh RSSI each time */
 7335	err = hci_read_rssi_sync(hdev, handle);
 7336
 7337	/* For LE links TX power does not change thus we don't need to
 7338	 * query for it once value is known.
 7339	 */
 7340	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
 7341		     conn->tx_power == HCI_TX_POWER_INVALID))
 7342		err = hci_read_tx_power_sync(hdev, handle, 0x00);
 7343
 7344	/* Max TX power needs to be read only once per connection */
 7345	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
 7346		err = hci_read_tx_power_sync(hdev, handle, 0x01);
 7347
 7348	return err;
 7349}
 7350
 7351static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
 7352			 u16 len)
 7353{
 7354	struct mgmt_cp_get_conn_info *cp = data;
 7355	struct mgmt_rp_get_conn_info rp;
 7356	struct hci_conn *conn;
 7357	unsigned long conn_info_age;
 7358	int err = 0;
 7359
 7360	bt_dev_dbg(hdev, "sock %p", sk);
 7361
 7362	memset(&rp, 0, sizeof(rp));
 7363	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
 7364	rp.addr.type = cp->addr.type;
 7365
 7366	if (!bdaddr_type_is_valid(cp->addr.type))
 7367		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
 7368					 MGMT_STATUS_INVALID_PARAMS,
 7369					 &rp, sizeof(rp));
 7370
 7371	hci_dev_lock(hdev);
 7372
 7373	if (!hdev_is_powered(hdev)) {
 7374		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
 7375					MGMT_STATUS_NOT_POWERED, &rp,
 7376					sizeof(rp));
 7377		goto unlock;
 7378	}
 7379
 7380	if (cp->addr.type == BDADDR_BREDR)
 7381		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
 7382					       &cp->addr.bdaddr);
 7383	else
 7384		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 7385
 7386	if (!conn || conn->state != BT_CONNECTED) {
 7387		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
 7388					MGMT_STATUS_NOT_CONNECTED, &rp,
 7389					sizeof(rp));
 7390		goto unlock;
 7391	}
 7392
 7393	/* To avoid client trying to guess when to poll again for information we
 7394	 * calculate conn info age as random value between min/max set in hdev.
 7395	 */
 7396	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
 7397						 hdev->conn_info_max_age - 1);
 7398
 7399	/* Query controller to refresh cached values if they are too old or were
 7400	 * never read.
 7401	 */
 7402	if (time_after(jiffies, conn->conn_info_timestamp +
 7403		       msecs_to_jiffies(conn_info_age)) ||
 7404	    !conn->conn_info_timestamp) {
 7405		struct mgmt_pending_cmd *cmd;
 7406
 7407		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
 7408				       len);
 7409		if (!cmd) {
 7410			err = -ENOMEM;
 7411		} else {
 7412			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
 7413						 cmd, get_conn_info_complete);
 7414		}
 7415
 7416		if (err < 0) {
 7417			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
 7418					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
 7419
 7420			if (cmd)
 7421				mgmt_pending_free(cmd);
 7422
 7423			goto unlock;
 7424		}
 7425
 7426		conn->conn_info_timestamp = jiffies;
 7427	} else {
 7428		/* Cache is valid, just reply with values cached in hci_conn */
 7429		rp.rssi = conn->rssi;
 7430		rp.tx_power = conn->tx_power;
 7431		rp.max_tx_power = conn->max_tx_power;
 7432
 7433		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
 7434					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
 7435	}
 7436
 7437unlock:
 7438	hci_dev_unlock(hdev);
 7439	return err;
 7440}
 7441
 7442static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
 7443{
 7444	struct mgmt_pending_cmd *cmd = data;
 7445	struct mgmt_cp_get_clock_info *cp = cmd->param;
 7446	struct mgmt_rp_get_clock_info rp;
 7447	struct hci_conn *conn = cmd->user_data;
 7448	u8 status = mgmt_status(err);
 7449
 7450	bt_dev_dbg(hdev, "err %d", err);
 7451
 7452	memset(&rp, 0, sizeof(rp));
 7453	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
 7454	rp.addr.type = cp->addr.type;
 7455
 7456	if (err)
 7457		goto complete;
 7458
 7459	rp.local_clock = cpu_to_le32(hdev->clock);
 7460
 7461	if (conn) {
 7462		rp.piconet_clock = cpu_to_le32(conn->clock);
 7463		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
 7464	}
 7465
 7466complete:
 7467	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
 7468			  sizeof(rp));
 7469
 7470	mgmt_pending_free(cmd);
 7471}
 7472
 7473static int get_clock_info_sync(struct hci_dev *hdev, void *data)
 7474{
 7475	struct mgmt_pending_cmd *cmd = data;
 7476	struct mgmt_cp_get_clock_info *cp = cmd->param;
 7477	struct hci_cp_read_clock hci_cp;
 7478	struct hci_conn *conn;
 7479
 7480	memset(&hci_cp, 0, sizeof(hci_cp));
 7481	hci_read_clock_sync(hdev, &hci_cp);
 7482
 7483	/* Make sure connection still exists */
 7484	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
 7485	if (!conn || conn->state != BT_CONNECTED)
 7486		return MGMT_STATUS_NOT_CONNECTED;
 7487
 7488	cmd->user_data = conn;
 7489	hci_cp.handle = cpu_to_le16(conn->handle);
 7490	hci_cp.which = 0x01; /* Piconet clock */
 7491
 7492	return hci_read_clock_sync(hdev, &hci_cp);
 7493}
 7494
 7495static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
 7496								u16 len)
 7497{
 7498	struct mgmt_cp_get_clock_info *cp = data;
 7499	struct mgmt_rp_get_clock_info rp;
 7500	struct mgmt_pending_cmd *cmd;
 7501	struct hci_conn *conn;
 7502	int err;
 7503
 7504	bt_dev_dbg(hdev, "sock %p", sk);
 7505
 7506	memset(&rp, 0, sizeof(rp));
 7507	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
 7508	rp.addr.type = cp->addr.type;
 7509
 7510	if (cp->addr.type != BDADDR_BREDR)
 7511		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
 7512					 MGMT_STATUS_INVALID_PARAMS,
 7513					 &rp, sizeof(rp));
 7514
 7515	hci_dev_lock(hdev);
 7516
 7517	if (!hdev_is_powered(hdev)) {
 7518		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
 7519					MGMT_STATUS_NOT_POWERED, &rp,
 7520					sizeof(rp));
 7521		goto unlock;
 7522	}
 7523
 7524	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
 7525		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
 7526					       &cp->addr.bdaddr);
 7527		if (!conn || conn->state != BT_CONNECTED) {
 7528			err = mgmt_cmd_complete(sk, hdev->id,
 7529						MGMT_OP_GET_CLOCK_INFO,
 7530						MGMT_STATUS_NOT_CONNECTED,
 7531						&rp, sizeof(rp));
 7532			goto unlock;
 7533		}
 7534	} else {
 7535		conn = NULL;
 7536	}
 7537
 7538	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
 7539	if (!cmd)
 7540		err = -ENOMEM;
 7541	else
 7542		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
 7543					 get_clock_info_complete);
 7544
 7545	if (err < 0) {
 7546		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
 7547					MGMT_STATUS_FAILED, &rp, sizeof(rp));
 7548
 7549		if (cmd)
 7550			mgmt_pending_free(cmd);
 7551	}
 7552
 7553
 7554unlock:
 7555	hci_dev_unlock(hdev);
 7556	return err;
 7557}
 7558
 7559static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
 7560{
 7561	struct hci_conn *conn;
 7562
 7563	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
 7564	if (!conn)
 7565		return false;
 7566
 7567	if (conn->dst_type != type)
 7568		return false;
 7569
 7570	if (conn->state != BT_CONNECTED)
 7571		return false;
 7572
 7573	return true;
 7574}
 7575
 7576/* This function requires the caller holds hdev->lock */
 7577static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
 7578			       u8 addr_type, u8 auto_connect)
 7579{
 7580	struct hci_conn_params *params;
 7581
 7582	params = hci_conn_params_add(hdev, addr, addr_type);
 7583	if (!params)
 7584		return -EIO;
 7585
 7586	if (params->auto_connect == auto_connect)
 7587		return 0;
 7588
 7589	hci_pend_le_list_del_init(params);
 7590
 7591	switch (auto_connect) {
 7592	case HCI_AUTO_CONN_DISABLED:
 7593	case HCI_AUTO_CONN_LINK_LOSS:
 7594		/* If auto connect is being disabled when we're trying to
 7595		 * connect to device, keep connecting.
 7596		 */
 7597		if (params->explicit_connect)
 7598			hci_pend_le_list_add(params, &hdev->pend_le_conns);
 7599		break;
 7600	case HCI_AUTO_CONN_REPORT:
 7601		if (params->explicit_connect)
 7602			hci_pend_le_list_add(params, &hdev->pend_le_conns);
 7603		else
 7604			hci_pend_le_list_add(params, &hdev->pend_le_reports);
 7605		break;
 7606	case HCI_AUTO_CONN_DIRECT:
 7607	case HCI_AUTO_CONN_ALWAYS:
 7608		if (!is_connected(hdev, addr, addr_type))
 7609			hci_pend_le_list_add(params, &hdev->pend_le_conns);
 7610		break;
 7611	}
 7612
 7613	params->auto_connect = auto_connect;
 7614
 7615	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
 7616		   addr, addr_type, auto_connect);
 7617
 7618	return 0;
 7619}
 7620
 7621static void device_added(struct sock *sk, struct hci_dev *hdev,
 7622			 bdaddr_t *bdaddr, u8 type, u8 action)
 7623{
 7624	struct mgmt_ev_device_added ev;
 7625
 7626	bacpy(&ev.addr.bdaddr, bdaddr);
 7627	ev.addr.type = type;
 7628	ev.action = action;
 7629
 7630	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
 7631}
 7632
 7633static int add_device_sync(struct hci_dev *hdev, void *data)
 7634{
 7635	return hci_update_passive_scan_sync(hdev);
 7636}
 7637
 7638static int add_device(struct sock *sk, struct hci_dev *hdev,
 7639		      void *data, u16 len)
 7640{
 7641	struct mgmt_cp_add_device *cp = data;
 7642	u8 auto_conn, addr_type;
 7643	struct hci_conn_params *params;
 7644	int err;
 7645	u32 current_flags = 0;
 7646	u32 supported_flags;
 7647
 7648	bt_dev_dbg(hdev, "sock %p", sk);
 7649
 7650	if (!bdaddr_type_is_valid(cp->addr.type) ||
 7651	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
 7652		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
 7653					 MGMT_STATUS_INVALID_PARAMS,
 7654					 &cp->addr, sizeof(cp->addr));
 7655
 7656	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
 7657		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
 7658					 MGMT_STATUS_INVALID_PARAMS,
 7659					 &cp->addr, sizeof(cp->addr));
 7660
 7661	hci_dev_lock(hdev);
 7662
 7663	if (cp->addr.type == BDADDR_BREDR) {
 7664		/* Only incoming connections action is supported for now */
 7665		if (cp->action != 0x01) {
 7666			err = mgmt_cmd_complete(sk, hdev->id,
 7667						MGMT_OP_ADD_DEVICE,
 7668						MGMT_STATUS_INVALID_PARAMS,
 7669						&cp->addr, sizeof(cp->addr));
 7670			goto unlock;
 7671		}
 7672
 7673		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
 7674						     &cp->addr.bdaddr,
 7675						     cp->addr.type, 0);
 7676		if (err)
 7677			goto unlock;
 7678
 7679		hci_update_scan(hdev);
 7680
 7681		goto added;
 7682	}
 7683
 7684	addr_type = le_addr_type(cp->addr.type);
 7685
 7686	if (cp->action == 0x02)
 7687		auto_conn = HCI_AUTO_CONN_ALWAYS;
 7688	else if (cp->action == 0x01)
 7689		auto_conn = HCI_AUTO_CONN_DIRECT;
 7690	else
 7691		auto_conn = HCI_AUTO_CONN_REPORT;
 7692
 7693	/* Kernel internally uses conn_params with resolvable private
 7694	 * address, but Add Device allows only identity addresses.
 7695	 * Make sure it is enforced before calling
 7696	 * hci_conn_params_lookup.
 7697	 */
 7698	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
 7699		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
 7700					MGMT_STATUS_INVALID_PARAMS,
 7701					&cp->addr, sizeof(cp->addr));
 7702		goto unlock;
 7703	}
 7704
 7705	/* If the connection parameters don't exist for this device,
 7706	 * they will be created and configured with defaults.
 7707	 */
 7708	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
 7709				auto_conn) < 0) {
 7710		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
 7711					MGMT_STATUS_FAILED, &cp->addr,
 7712					sizeof(cp->addr));
 7713		goto unlock;
 7714	} else {
 7715		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
 7716						addr_type);
 7717		if (params)
 7718			current_flags = params->flags;
 7719	}
 7720
 7721	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
 7722	if (err < 0)
 7723		goto unlock;
 7724
 7725added:
 7726	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
 7727	supported_flags = hdev->conn_flags;
 7728	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
 7729			     supported_flags, current_flags);
 7730
 7731	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
 7732				MGMT_STATUS_SUCCESS, &cp->addr,
 7733				sizeof(cp->addr));
 7734
 7735unlock:
 7736	hci_dev_unlock(hdev);
 7737	return err;
 7738}
 7739
 7740static void device_removed(struct sock *sk, struct hci_dev *hdev,
 7741			   bdaddr_t *bdaddr, u8 type)
 7742{
 7743	struct mgmt_ev_device_removed ev;
 7744
 7745	bacpy(&ev.addr.bdaddr, bdaddr);
 7746	ev.addr.type = type;
 7747
 7748	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
 7749}
 7750
 7751static int remove_device_sync(struct hci_dev *hdev, void *data)
 7752{
 7753	return hci_update_passive_scan_sync(hdev);
 7754}
 7755
 7756static int remove_device(struct sock *sk, struct hci_dev *hdev,
 7757			 void *data, u16 len)
 7758{
 7759	struct mgmt_cp_remove_device *cp = data;
 7760	int err;
 7761
 7762	bt_dev_dbg(hdev, "sock %p", sk);
 7763
 7764	hci_dev_lock(hdev);
 7765
 7766	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
 7767		struct hci_conn_params *params;
 7768		u8 addr_type;
 7769
 7770		if (!bdaddr_type_is_valid(cp->addr.type)) {
 7771			err = mgmt_cmd_complete(sk, hdev->id,
 7772						MGMT_OP_REMOVE_DEVICE,
 7773						MGMT_STATUS_INVALID_PARAMS,
 7774						&cp->addr, sizeof(cp->addr));
 7775			goto unlock;
 7776		}
 7777
 7778		if (cp->addr.type == BDADDR_BREDR) {
 7779			err = hci_bdaddr_list_del(&hdev->accept_list,
 7780						  &cp->addr.bdaddr,
 7781						  cp->addr.type);
 7782			if (err) {
 7783				err = mgmt_cmd_complete(sk, hdev->id,
 7784							MGMT_OP_REMOVE_DEVICE,
 7785							MGMT_STATUS_INVALID_PARAMS,
 7786							&cp->addr,
 7787							sizeof(cp->addr));
 7788				goto unlock;
 7789			}
 7790
 7791			hci_update_scan(hdev);
 7792
 7793			device_removed(sk, hdev, &cp->addr.bdaddr,
 7794				       cp->addr.type);
 7795			goto complete;
 7796		}
 7797
 7798		addr_type = le_addr_type(cp->addr.type);
 7799
 7800		/* Kernel internally uses conn_params with resolvable private
 7801		 * address, but Remove Device allows only identity addresses.
 7802		 * Make sure it is enforced before calling
 7803		 * hci_conn_params_lookup.
 7804		 */
 7805		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
 7806			err = mgmt_cmd_complete(sk, hdev->id,
 7807						MGMT_OP_REMOVE_DEVICE,
 7808						MGMT_STATUS_INVALID_PARAMS,
 7809						&cp->addr, sizeof(cp->addr));
 7810			goto unlock;
 7811		}
 7812
 7813		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
 7814						addr_type);
 7815		if (!params) {
 7816			err = mgmt_cmd_complete(sk, hdev->id,
 7817						MGMT_OP_REMOVE_DEVICE,
 7818						MGMT_STATUS_INVALID_PARAMS,
 7819						&cp->addr, sizeof(cp->addr));
 7820			goto unlock;
 7821		}
 7822
 7823		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
 7824		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
 7825			err = mgmt_cmd_complete(sk, hdev->id,
 7826						MGMT_OP_REMOVE_DEVICE,
 7827						MGMT_STATUS_INVALID_PARAMS,
 7828						&cp->addr, sizeof(cp->addr));
 7829			goto unlock;
 7830		}
 7831
 7832		hci_conn_params_free(params);
 7833
 7834		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
 7835	} else {
 7836		struct hci_conn_params *p, *tmp;
 7837		struct bdaddr_list *b, *btmp;
 7838
 7839		if (cp->addr.type) {
 7840			err = mgmt_cmd_complete(sk, hdev->id,
 7841						MGMT_OP_REMOVE_DEVICE,
 7842						MGMT_STATUS_INVALID_PARAMS,
 7843						&cp->addr, sizeof(cp->addr));
 7844			goto unlock;
 7845		}
 7846
 7847		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
 7848			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
 7849			list_del(&b->list);
 7850			kfree(b);
 7851		}
 7852
 7853		hci_update_scan(hdev);
 7854
 7855		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
 7856			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
 7857				continue;
 7858			device_removed(sk, hdev, &p->addr, p->addr_type);
 7859			if (p->explicit_connect) {
 7860				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
 7861				continue;
 7862			}
 7863			hci_conn_params_free(p);
 7864		}
 7865
 7866		bt_dev_dbg(hdev, "All LE connection parameters were removed");
 7867	}
 7868
 7869	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
 7870
 7871complete:
 7872	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
 7873				MGMT_STATUS_SUCCESS, &cp->addr,
 7874				sizeof(cp->addr));
 7875unlock:
 7876	hci_dev_unlock(hdev);
 7877	return err;
 7878}
 7879
 7880static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
 7881			   u16 len)
 7882{
 7883	struct mgmt_cp_load_conn_param *cp = data;
 7884	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
 7885				     sizeof(struct mgmt_conn_param));
 7886	u16 param_count, expected_len;
 7887	int i;
 7888
 7889	if (!lmp_le_capable(hdev))
 7890		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
 7891				       MGMT_STATUS_NOT_SUPPORTED);
 7892
 7893	param_count = __le16_to_cpu(cp->param_count);
 7894	if (param_count > max_param_count) {
 7895		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
 7896			   param_count);
 7897		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
 7898				       MGMT_STATUS_INVALID_PARAMS);
 7899	}
 7900
 7901	expected_len = struct_size(cp, params, param_count);
 7902	if (expected_len != len) {
 7903		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
 7904			   expected_len, len);
 7905		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
 7906				       MGMT_STATUS_INVALID_PARAMS);
 7907	}
 7908
 7909	bt_dev_dbg(hdev, "param_count %u", param_count);
 7910
 7911	hci_dev_lock(hdev);
 7912
 7913	hci_conn_params_clear_disabled(hdev);
 7914
 7915	for (i = 0; i < param_count; i++) {
 7916		struct mgmt_conn_param *param = &cp->params[i];
 7917		struct hci_conn_params *hci_param;
 7918		u16 min, max, latency, timeout;
 7919		u8 addr_type;
 7920
 7921		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
 7922			   param->addr.type);
 7923
 7924		if (param->addr.type == BDADDR_LE_PUBLIC) {
 7925			addr_type = ADDR_LE_DEV_PUBLIC;
 7926		} else if (param->addr.type == BDADDR_LE_RANDOM) {
 7927			addr_type = ADDR_LE_DEV_RANDOM;
 7928		} else {
 7929			bt_dev_err(hdev, "ignoring invalid connection parameters");
 7930			continue;
 7931		}
 7932
 7933		min = le16_to_cpu(param->min_interval);
 7934		max = le16_to_cpu(param->max_interval);
 7935		latency = le16_to_cpu(param->latency);
 7936		timeout = le16_to_cpu(param->timeout);
 7937
 7938		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
 7939			   min, max, latency, timeout);
 7940
 7941		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
 7942			bt_dev_err(hdev, "ignoring invalid connection parameters");
 7943			continue;
 7944		}
 7945
 7946		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
 7947						addr_type);
 7948		if (!hci_param) {
 7949			bt_dev_err(hdev, "failed to add connection parameters");
 7950			continue;
 7951		}
 7952
 7953		hci_param->conn_min_interval = min;
 7954		hci_param->conn_max_interval = max;
 7955		hci_param->conn_latency = latency;
 7956		hci_param->supervision_timeout = timeout;
 7957	}
 7958
 7959	hci_dev_unlock(hdev);
 7960
 7961	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
 7962				 NULL, 0);
 7963}
 7964
 7965static int set_external_config(struct sock *sk, struct hci_dev *hdev,
 7966			       void *data, u16 len)
 7967{
 7968	struct mgmt_cp_set_external_config *cp = data;
 7969	bool changed;
 7970	int err;
 7971
 7972	bt_dev_dbg(hdev, "sock %p", sk);
 7973
 7974	if (hdev_is_powered(hdev))
 7975		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
 7976				       MGMT_STATUS_REJECTED);
 7977
 7978	if (cp->config != 0x00 && cp->config != 0x01)
 7979		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
 7980				         MGMT_STATUS_INVALID_PARAMS);
 7981
 7982	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
 7983		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
 7984				       MGMT_STATUS_NOT_SUPPORTED);
 7985
 7986	hci_dev_lock(hdev);
 7987
 7988	if (cp->config)
 7989		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
 7990	else
 7991		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
 7992
 7993	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
 7994	if (err < 0)
 7995		goto unlock;
 7996
 7997	if (!changed)
 7998		goto unlock;
 7999
 8000	err = new_options(hdev, sk);
 8001
 8002	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
 8003		mgmt_index_removed(hdev);
 8004
 8005		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
 8006			hci_dev_set_flag(hdev, HCI_CONFIG);
 8007			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
 8008
 8009			queue_work(hdev->req_workqueue, &hdev->power_on);
 8010		} else {
 8011			set_bit(HCI_RAW, &hdev->flags);
 8012			mgmt_index_added(hdev);
 8013		}
 8014	}
 8015
 8016unlock:
 8017	hci_dev_unlock(hdev);
 8018	return err;
 8019}
 8020
 8021static int set_public_address(struct sock *sk, struct hci_dev *hdev,
 8022			      void *data, u16 len)
 8023{
 8024	struct mgmt_cp_set_public_address *cp = data;
 8025	bool changed;
 
 
 
 
 8026	int err;
 8027
 8028	bt_dev_dbg(hdev, "sock %p", sk);
 8029
 8030	if (hdev_is_powered(hdev))
 8031		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
 8032				       MGMT_STATUS_REJECTED);
 8033
 8034	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
 8035		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
 8036				       MGMT_STATUS_INVALID_PARAMS);
 8037
 8038	if (!hdev->set_bdaddr)
 8039		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
 8040				       MGMT_STATUS_NOT_SUPPORTED);
 8041
 8042	hci_dev_lock(hdev);
 8043
 8044	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
 8045	bacpy(&hdev->public_addr, &cp->bdaddr);
 8046
 8047	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
 8048	if (err < 0)
 8049		goto unlock;
 8050
 8051	if (!changed)
 8052		goto unlock;
 8053
 8054	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 8055		err = new_options(hdev, sk);
 8056
 8057	if (is_configured(hdev)) {
 8058		mgmt_index_removed(hdev);
 8059
 8060		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
 8061
 8062		hci_dev_set_flag(hdev, HCI_CONFIG);
 8063		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
 8064
 8065		queue_work(hdev->req_workqueue, &hdev->power_on);
 
 
 8066	}
 8067
 8068unlock:
 8069	hci_dev_unlock(hdev);
 8070	return err;
 8071}
 8072
 8073static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
 8074					     int err)
 8075{
 8076	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
 8077	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
 8078	u8 *h192, *r192, *h256, *r256;
 8079	struct mgmt_pending_cmd *cmd = data;
 8080	struct sk_buff *skb = cmd->skb;
 8081	u8 status = mgmt_status(err);
 8082	u16 eir_len;
 8083
 8084	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
 8085		return;
 8086
 8087	if (!status) {
 8088		if (!skb)
 8089			status = MGMT_STATUS_FAILED;
 8090		else if (IS_ERR(skb))
 8091			status = mgmt_status(PTR_ERR(skb));
 8092		else
 8093			status = mgmt_status(skb->data[0]);
 8094	}
 8095
 8096	bt_dev_dbg(hdev, "status %u", status);
 8097
 8098	mgmt_cp = cmd->param;
 8099
 8100	if (status) {
 8101		status = mgmt_status(status);
 8102		eir_len = 0;
 8103
 8104		h192 = NULL;
 8105		r192 = NULL;
 8106		h256 = NULL;
 8107		r256 = NULL;
 8108	} else if (!bredr_sc_enabled(hdev)) {
 8109		struct hci_rp_read_local_oob_data *rp;
 8110
 8111		if (skb->len != sizeof(*rp)) {
 8112			status = MGMT_STATUS_FAILED;
 8113			eir_len = 0;
 8114		} else {
 8115			status = MGMT_STATUS_SUCCESS;
 8116			rp = (void *)skb->data;
 8117
 8118			eir_len = 5 + 18 + 18;
 8119			h192 = rp->hash;
 8120			r192 = rp->rand;
 8121			h256 = NULL;
 8122			r256 = NULL;
 8123		}
 8124	} else {
 8125		struct hci_rp_read_local_oob_ext_data *rp;
 8126
 8127		if (skb->len != sizeof(*rp)) {
 8128			status = MGMT_STATUS_FAILED;
 8129			eir_len = 0;
 8130		} else {
 8131			status = MGMT_STATUS_SUCCESS;
 8132			rp = (void *)skb->data;
 8133
 8134			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
 8135				eir_len = 5 + 18 + 18;
 8136				h192 = NULL;
 8137				r192 = NULL;
 8138			} else {
 8139				eir_len = 5 + 18 + 18 + 18 + 18;
 8140				h192 = rp->hash192;
 8141				r192 = rp->rand192;
 8142			}
 8143
 8144			h256 = rp->hash256;
 8145			r256 = rp->rand256;
 8146		}
 8147	}
 8148
 8149	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
 8150	if (!mgmt_rp)
 
 
 
 8151		goto done;
 8152
 8153	if (eir_len == 0)
 8154		goto send_rsp;
 8155
 8156	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
 8157				  hdev->dev_class, 3);
 8158
 8159	if (h192 && r192) {
 8160		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
 8161					  EIR_SSP_HASH_C192, h192, 16);
 8162		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
 8163					  EIR_SSP_RAND_R192, r192, 16);
 8164	}
 8165
 8166	if (h256 && r256) {
 8167		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
 8168					  EIR_SSP_HASH_C256, h256, 16);
 8169		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
 8170					  EIR_SSP_RAND_R256, r256, 16);
 8171	}
 8172
 8173send_rsp:
 8174	mgmt_rp->type = mgmt_cp->type;
 8175	mgmt_rp->eir_len = cpu_to_le16(eir_len);
 8176
 8177	err = mgmt_cmd_complete(cmd->sk, hdev->id,
 8178				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
 8179				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
 8180	if (err < 0 || status)
 8181		goto done;
 8182
 8183	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
 8184
 8185	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
 8186				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
 8187				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
 8188done:
 8189	if (skb && !IS_ERR(skb))
 8190		kfree_skb(skb);
 8191
 8192	kfree(mgmt_rp);
 8193	mgmt_pending_remove(cmd);
 8194}
 8195
 8196static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
 8197				  struct mgmt_cp_read_local_oob_ext_data *cp)
 8198{
 8199	struct mgmt_pending_cmd *cmd;
 8200	int err;
 8201
 8202	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
 8203			       cp, sizeof(*cp));
 8204	if (!cmd)
 8205		return -ENOMEM;
 8206
 8207	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
 8208				 read_local_oob_ext_data_complete);
 8209
 8210	if (err < 0) {
 8211		mgmt_pending_remove(cmd);
 8212		return err;
 8213	}
 8214
 8215	return 0;
 8216}
 8217
 8218static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
 8219				   void *data, u16 data_len)
 8220{
 8221	struct mgmt_cp_read_local_oob_ext_data *cp = data;
 8222	struct mgmt_rp_read_local_oob_ext_data *rp;
 8223	size_t rp_len;
 8224	u16 eir_len;
 8225	u8 status, flags, role, addr[7], hash[16], rand[16];
 8226	int err;
 8227
 8228	bt_dev_dbg(hdev, "sock %p", sk);
 8229
 8230	if (hdev_is_powered(hdev)) {
 8231		switch (cp->type) {
 8232		case BIT(BDADDR_BREDR):
 8233			status = mgmt_bredr_support(hdev);
 8234			if (status)
 8235				eir_len = 0;
 8236			else
 8237				eir_len = 5;
 8238			break;
 8239		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
 8240			status = mgmt_le_support(hdev);
 8241			if (status)
 8242				eir_len = 0;
 8243			else
 8244				eir_len = 9 + 3 + 18 + 18 + 3;
 8245			break;
 8246		default:
 8247			status = MGMT_STATUS_INVALID_PARAMS;
 8248			eir_len = 0;
 8249			break;
 8250		}
 8251	} else {
 8252		status = MGMT_STATUS_NOT_POWERED;
 8253		eir_len = 0;
 8254	}
 8255
 8256	rp_len = sizeof(*rp) + eir_len;
 8257	rp = kmalloc(rp_len, GFP_ATOMIC);
 8258	if (!rp)
 8259		return -ENOMEM;
 8260
 8261	if (!status && !lmp_ssp_capable(hdev)) {
 8262		status = MGMT_STATUS_NOT_SUPPORTED;
 8263		eir_len = 0;
 8264	}
 8265
 8266	if (status)
 8267		goto complete;
 8268
 8269	hci_dev_lock(hdev);
 8270
 8271	eir_len = 0;
 8272	switch (cp->type) {
 8273	case BIT(BDADDR_BREDR):
 8274		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
 8275			err = read_local_ssp_oob_req(hdev, sk, cp);
 8276			hci_dev_unlock(hdev);
 8277			if (!err)
 8278				goto done;
 8279
 8280			status = MGMT_STATUS_FAILED;
 8281			goto complete;
 8282		} else {
 8283			eir_len = eir_append_data(rp->eir, eir_len,
 8284						  EIR_CLASS_OF_DEV,
 8285						  hdev->dev_class, 3);
 8286		}
 8287		break;
 8288	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
 8289		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
 8290		    smp_generate_oob(hdev, hash, rand) < 0) {
 8291			hci_dev_unlock(hdev);
 8292			status = MGMT_STATUS_FAILED;
 8293			goto complete;
 8294		}
 8295
 8296		/* This should return the active RPA, but since the RPA
 8297		 * is only programmed on demand, it is really hard to fill
 8298		 * this in at the moment. For now disallow retrieving
 8299		 * local out-of-band data when privacy is in use.
 8300		 *
 8301		 * Returning the identity address will not help here since
 8302		 * pairing happens before the identity resolving key is
 8303		 * known and thus the connection establishment happens
 8304		 * based on the RPA and not the identity address.
 8305		 */
 8306		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
 8307			hci_dev_unlock(hdev);
 8308			status = MGMT_STATUS_REJECTED;
 8309			goto complete;
 8310		}
 8311
 8312		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
 8313		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
 8314		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
 8315		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
 8316			memcpy(addr, &hdev->static_addr, 6);
 8317			addr[6] = 0x01;
 8318		} else {
 8319			memcpy(addr, &hdev->bdaddr, 6);
 8320			addr[6] = 0x00;
 8321		}
 8322
 8323		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
 8324					  addr, sizeof(addr));
 8325
 8326		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
 8327			role = 0x02;
 8328		else
 8329			role = 0x01;
 8330
 8331		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
 8332					  &role, sizeof(role));
 8333
 8334		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
 8335			eir_len = eir_append_data(rp->eir, eir_len,
 8336						  EIR_LE_SC_CONFIRM,
 8337						  hash, sizeof(hash));
 8338
 8339			eir_len = eir_append_data(rp->eir, eir_len,
 8340						  EIR_LE_SC_RANDOM,
 8341						  rand, sizeof(rand));
 8342		}
 8343
 8344		flags = mgmt_get_adv_discov_flags(hdev);
 8345
 8346		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 8347			flags |= LE_AD_NO_BREDR;
 8348
 8349		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
 8350					  &flags, sizeof(flags));
 8351		break;
 
 
 8352	}
 8353
 8354	hci_dev_unlock(hdev);
 8355
 8356	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
 8357
 8358	status = MGMT_STATUS_SUCCESS;
 8359
 8360complete:
 8361	rp->type = cp->type;
 8362	rp->eir_len = cpu_to_le16(eir_len);
 8363
 8364	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
 8365				status, rp, sizeof(*rp) + eir_len);
 8366	if (err < 0 || status)
 8367		goto done;
 8368
 8369	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
 8370				 rp, sizeof(*rp) + eir_len,
 8371				 HCI_MGMT_OOB_DATA_EVENTS, sk);
 8372
 8373done:
 8374	kfree(rp);
 8375
 8376	return err;
 8377}
 8378
 8379static u32 get_supported_adv_flags(struct hci_dev *hdev)
 8380{
 8381	u32 flags = 0;
 8382
 8383	flags |= MGMT_ADV_FLAG_CONNECTABLE;
 8384	flags |= MGMT_ADV_FLAG_DISCOV;
 8385	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
 8386	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
 8387	flags |= MGMT_ADV_FLAG_APPEARANCE;
 8388	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
 8389	flags |= MGMT_ADV_PARAM_DURATION;
 8390	flags |= MGMT_ADV_PARAM_TIMEOUT;
 8391	flags |= MGMT_ADV_PARAM_INTERVALS;
 8392	flags |= MGMT_ADV_PARAM_TX_POWER;
 8393	flags |= MGMT_ADV_PARAM_SCAN_RSP;
 8394
 8395	/* In extended adv TX_POWER returned from Set Adv Param
 8396	 * will be always valid.
 8397	 */
 8398	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
 8399		flags |= MGMT_ADV_FLAG_TX_POWER;
 8400
 8401	if (ext_adv_capable(hdev)) {
 8402		flags |= MGMT_ADV_FLAG_SEC_1M;
 8403		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
 8404		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
 8405
 8406		if (le_2m_capable(hdev))
 8407			flags |= MGMT_ADV_FLAG_SEC_2M;
 8408
 8409		if (le_coded_capable(hdev))
 8410			flags |= MGMT_ADV_FLAG_SEC_CODED;
 8411	}
 8412
 8413	return flags;
 8414}
 8415
 8416static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
 8417			     void *data, u16 data_len)
 8418{
 8419	struct mgmt_rp_read_adv_features *rp;
 8420	size_t rp_len;
 8421	int err;
 8422	struct adv_info *adv_instance;
 8423	u32 supported_flags;
 8424	u8 *instance;
 8425
 8426	bt_dev_dbg(hdev, "sock %p", sk);
 8427
 8428	if (!lmp_le_capable(hdev))
 8429		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
 8430				       MGMT_STATUS_REJECTED);
 8431
 8432	hci_dev_lock(hdev);
 8433
 8434	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
 8435	rp = kmalloc(rp_len, GFP_ATOMIC);
 8436	if (!rp) {
 8437		hci_dev_unlock(hdev);
 8438		return -ENOMEM;
 8439	}
 8440
 8441	supported_flags = get_supported_adv_flags(hdev);
 8442
 8443	rp->supported_flags = cpu_to_le32(supported_flags);
 8444	rp->max_adv_data_len = max_adv_len(hdev);
 8445	rp->max_scan_rsp_len = max_adv_len(hdev);
 8446	rp->max_instances = hdev->le_num_of_adv_sets;
 8447	rp->num_instances = hdev->adv_instance_cnt;
 8448
 8449	instance = rp->instance;
 8450	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
 8451		/* Only instances 1-le_num_of_adv_sets are externally visible */
 8452		if (adv_instance->instance <= hdev->adv_instance_cnt) {
 8453			*instance = adv_instance->instance;
 8454			instance++;
 8455		} else {
 8456			rp->num_instances--;
 8457			rp_len--;
 8458		}
 8459	}
 8460
 8461	hci_dev_unlock(hdev);
 8462
 8463	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
 8464				MGMT_STATUS_SUCCESS, rp, rp_len);
 8465
 8466	kfree(rp);
 8467
 
 8468	return err;
 8469}
 8470
 8471static u8 calculate_name_len(struct hci_dev *hdev)
 8472{
 8473	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
 8474
 8475	return eir_append_local_name(hdev, buf, 0);
 8476}
 8477
 8478static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
 8479			   bool is_adv_data)
 8480{
 8481	u8 max_len = max_adv_len(hdev);
 8482
 8483	if (is_adv_data) {
 8484		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
 8485				 MGMT_ADV_FLAG_LIMITED_DISCOV |
 8486				 MGMT_ADV_FLAG_MANAGED_FLAGS))
 8487			max_len -= 3;
 8488
 8489		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
 8490			max_len -= 3;
 8491	} else {
 8492		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
 8493			max_len -= calculate_name_len(hdev);
 8494
 8495		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
 8496			max_len -= 4;
 8497	}
 8498
 8499	return max_len;
 8500}
 8501
 8502static bool flags_managed(u32 adv_flags)
 8503{
 8504	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
 8505			    MGMT_ADV_FLAG_LIMITED_DISCOV |
 8506			    MGMT_ADV_FLAG_MANAGED_FLAGS);
 8507}
 8508
 8509static bool tx_power_managed(u32 adv_flags)
 8510{
 8511	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
 8512}
 8513
 8514static bool name_managed(u32 adv_flags)
 8515{
 8516	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
 8517}
 8518
 8519static bool appearance_managed(u32 adv_flags)
 8520{
 8521	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
 8522}
 8523
 8524static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
 8525			      u8 len, bool is_adv_data)
 8526{
 8527	int i, cur_len;
 8528	u8 max_len;
 8529
 8530	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
 8531
 8532	if (len > max_len)
 8533		return false;
 8534
 8535	/* Make sure that the data is correctly formatted. */
 8536	for (i = 0; i < len; i += (cur_len + 1)) {
 8537		cur_len = data[i];
 8538
 8539		if (!cur_len)
 8540			continue;
 8541
 8542		if (data[i + 1] == EIR_FLAGS &&
 8543		    (!is_adv_data || flags_managed(adv_flags)))
 8544			return false;
 8545
 8546		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
 8547			return false;
 8548
 8549		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
 8550			return false;
 8551
 8552		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
 8553			return false;
 8554
 8555		if (data[i + 1] == EIR_APPEARANCE &&
 8556		    appearance_managed(adv_flags))
 8557			return false;
 8558
 8559		/* If the current field length would exceed the total data
 8560		 * length, then it's invalid.
 8561		 */
 8562		if (i + cur_len >= len)
 8563			return false;
 8564	}
 8565
 8566	return true;
 8567}
 8568
 8569static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
 8570{
 8571	u32 supported_flags, phy_flags;
 8572
 8573	/* The current implementation only supports a subset of the specified
 8574	 * flags. Also need to check mutual exclusiveness of sec flags.
 8575	 */
 8576	supported_flags = get_supported_adv_flags(hdev);
 8577	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
 8578	if (adv_flags & ~supported_flags ||
 8579	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
 8580		return false;
 8581
 8582	return true;
 8583}
 8584
 8585static bool adv_busy(struct hci_dev *hdev)
 8586{
 8587	return pending_find(MGMT_OP_SET_LE, hdev);
 8588}
 
 8589
 8590static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
 8591			     int err)
 8592{
 8593	struct adv_info *adv, *n;
 8594
 8595	bt_dev_dbg(hdev, "err %d", err);
 8596
 8597	hci_dev_lock(hdev);
 8598
 8599	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
 8600		u8 instance;
 8601
 8602		if (!adv->pending)
 8603			continue;
 8604
 8605		if (!err) {
 8606			adv->pending = false;
 8607			continue;
 8608		}
 8609
 8610		instance = adv->instance;
 8611
 8612		if (hdev->cur_adv_instance == instance)
 8613			cancel_adv_timeout(hdev);
 8614
 8615		hci_remove_adv_instance(hdev, instance);
 8616		mgmt_advertising_removed(sk, hdev, instance);
 
 8617	}
 8618
 8619	hci_dev_unlock(hdev);
 8620}
 8621
 8622static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
 8623{
 8624	struct mgmt_pending_cmd *cmd = data;
 8625	struct mgmt_cp_add_advertising *cp = cmd->param;
 8626	struct mgmt_rp_add_advertising rp;
 8627
 8628	memset(&rp, 0, sizeof(rp));
 8629
 8630	rp.instance = cp->instance;
 8631
 8632	if (err)
 8633		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
 8634				mgmt_status(err));
 8635	else
 8636		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
 8637				  mgmt_status(err), &rp, sizeof(rp));
 8638
 8639	add_adv_complete(hdev, cmd->sk, cp->instance, err);
 8640
 8641	mgmt_pending_free(cmd);
 8642}
 8643
 8644static int add_advertising_sync(struct hci_dev *hdev, void *data)
 8645{
 8646	struct mgmt_pending_cmd *cmd = data;
 8647	struct mgmt_cp_add_advertising *cp = cmd->param;
 8648
 8649	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
 8650}
 8651
 8652static int add_advertising(struct sock *sk, struct hci_dev *hdev,
 8653			   void *data, u16 data_len)
 8654{
 8655	struct mgmt_cp_add_advertising *cp = data;
 8656	struct mgmt_rp_add_advertising rp;
 8657	u32 flags;
 8658	u8 status;
 8659	u16 timeout, duration;
 8660	unsigned int prev_instance_cnt;
 8661	u8 schedule_instance = 0;
 8662	struct adv_info *adv, *next_instance;
 8663	int err;
 8664	struct mgmt_pending_cmd *cmd;
 8665
 8666	bt_dev_dbg(hdev, "sock %p", sk);
 8667
 8668	status = mgmt_le_support(hdev);
 8669	if (status)
 8670		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 8671				       status);
 8672
 8673	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
 8674		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 8675				       MGMT_STATUS_INVALID_PARAMS);
 8676
 8677	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
 8678		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 8679				       MGMT_STATUS_INVALID_PARAMS);
 8680
 8681	flags = __le32_to_cpu(cp->flags);
 8682	timeout = __le16_to_cpu(cp->timeout);
 8683	duration = __le16_to_cpu(cp->duration);
 8684
 8685	if (!requested_adv_flags_are_valid(hdev, flags))
 8686		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 8687				       MGMT_STATUS_INVALID_PARAMS);
 8688
 8689	hci_dev_lock(hdev);
 8690
 8691	if (timeout && !hdev_is_powered(hdev)) {
 8692		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 8693				      MGMT_STATUS_REJECTED);
 8694		goto unlock;
 8695	}
 8696
 8697	if (adv_busy(hdev)) {
 8698		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 8699				      MGMT_STATUS_BUSY);
 8700		goto unlock;
 8701	}
 8702
 8703	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
 8704	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
 8705			       cp->scan_rsp_len, false)) {
 8706		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 8707				      MGMT_STATUS_INVALID_PARAMS);
 8708		goto unlock;
 8709	}
 8710
 8711	prev_instance_cnt = hdev->adv_instance_cnt;
 8712
 8713	adv = hci_add_adv_instance(hdev, cp->instance, flags,
 8714				   cp->adv_data_len, cp->data,
 8715				   cp->scan_rsp_len,
 8716				   cp->data + cp->adv_data_len,
 8717				   timeout, duration,
 8718				   HCI_ADV_TX_POWER_NO_PREFERENCE,
 8719				   hdev->le_adv_min_interval,
 8720				   hdev->le_adv_max_interval, 0);
 8721	if (IS_ERR(adv)) {
 8722		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 8723				      MGMT_STATUS_FAILED);
 8724		goto unlock;
 8725	}
 8726
 8727	/* Only trigger an advertising added event if a new instance was
 8728	 * actually added.
 8729	 */
 8730	if (hdev->adv_instance_cnt > prev_instance_cnt)
 8731		mgmt_advertising_added(sk, hdev, cp->instance);
 8732
 8733	if (hdev->cur_adv_instance == cp->instance) {
 8734		/* If the currently advertised instance is being changed then
 8735		 * cancel the current advertising and schedule the next
 8736		 * instance. If there is only one instance then the overridden
 8737		 * advertising data will be visible right away.
 8738		 */
 8739		cancel_adv_timeout(hdev);
 8740
 8741		next_instance = hci_get_next_instance(hdev, cp->instance);
 8742		if (next_instance)
 8743			schedule_instance = next_instance->instance;
 8744	} else if (!hdev->adv_instance_timeout) {
 8745		/* Immediately advertise the new instance if no other
 8746		 * instance is currently being advertised.
 8747		 */
 8748		schedule_instance = cp->instance;
 8749	}
 8750
 8751	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
 8752	 * there is no instance to be advertised then we have no HCI
 8753	 * communication to make. Simply return.
 8754	 */
 8755	if (!hdev_is_powered(hdev) ||
 8756	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
 8757	    !schedule_instance) {
 8758		rp.instance = cp->instance;
 8759		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 8760					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
 8761		goto unlock;
 8762	}
 8763
 8764	/* We're good to go, update advertising data, parameters, and start
 8765	 * advertising.
 8766	 */
 8767	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
 8768			       data_len);
 8769	if (!cmd) {
 8770		err = -ENOMEM;
 8771		goto unlock;
 8772	}
 8773
 8774	cp->instance = schedule_instance;
 8775
 8776	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
 8777				 add_advertising_complete);
 8778	if (err < 0)
 8779		mgmt_pending_free(cmd);
 8780
 8781unlock:
 8782	hci_dev_unlock(hdev);
 8783
 8784	return err;
 8785}
 8786
 8787static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
 8788					int err)
 8789{
 8790	struct mgmt_pending_cmd *cmd = data;
 8791	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
 8792	struct mgmt_rp_add_ext_adv_params rp;
 8793	struct adv_info *adv;
 8794	u32 flags;
 8795
 8796	BT_DBG("%s", hdev->name);
 8797
 8798	hci_dev_lock(hdev);
 8799
 8800	adv = hci_find_adv_instance(hdev, cp->instance);
 8801	if (!adv)
 8802		goto unlock;
 8803
 8804	rp.instance = cp->instance;
 8805	rp.tx_power = adv->tx_power;
 8806
 8807	/* While we're at it, inform userspace of the available space for this
 8808	 * advertisement, given the flags that will be used.
 8809	 */
 8810	flags = __le32_to_cpu(cp->flags);
 8811	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
 8812	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
 8813
 8814	if (err) {
 8815		/* If this advertisement was previously advertising and we
 8816		 * failed to update it, we signal that it has been removed and
 8817		 * delete its structure
 8818		 */
 8819		if (!adv->pending)
 8820			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
 8821
 8822		hci_remove_adv_instance(hdev, cp->instance);
 8823
 8824		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
 8825				mgmt_status(err));
 8826	} else {
 8827		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
 8828				  mgmt_status(err), &rp, sizeof(rp));
 8829	}
 8830
 8831unlock:
 8832	if (cmd)
 8833		mgmt_pending_free(cmd);
 8834
 8835	hci_dev_unlock(hdev);
 8836}
 8837
 8838static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
 8839{
 8840	struct mgmt_pending_cmd *cmd = data;
 8841	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
 8842
 8843	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
 8844}
 8845
 8846static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
 8847			      void *data, u16 data_len)
 8848{
 8849	struct mgmt_cp_add_ext_adv_params *cp = data;
 8850	struct mgmt_rp_add_ext_adv_params rp;
 8851	struct mgmt_pending_cmd *cmd = NULL;
 8852	struct adv_info *adv;
 8853	u32 flags, min_interval, max_interval;
 8854	u16 timeout, duration;
 8855	u8 status;
 8856	s8 tx_power;
 8857	int err;
 8858
 8859	BT_DBG("%s", hdev->name);
 
 8860
 8861	status = mgmt_le_support(hdev);
 8862	if (status)
 8863		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
 8864				       status);
 8865
 8866	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
 8867		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
 8868				       MGMT_STATUS_INVALID_PARAMS);
 8869
 8870	/* The purpose of breaking add_advertising into two separate MGMT calls
 8871	 * for params and data is to allow more parameters to be added to this
 8872	 * structure in the future. For this reason, we verify that we have the
 8873	 * bare minimum structure we know of when the interface was defined. Any
 8874	 * extra parameters we don't know about will be ignored in this request.
 8875	 */
 8876	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
 8877		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
 8878				       MGMT_STATUS_INVALID_PARAMS);
 8879
 8880	flags = __le32_to_cpu(cp->flags);
 8881
 8882	if (!requested_adv_flags_are_valid(hdev, flags))
 8883		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
 8884				       MGMT_STATUS_INVALID_PARAMS);
 8885
 8886	hci_dev_lock(hdev);
 
 8887
 8888	/* In new interface, we require that we are powered to register */
 8889	if (!hdev_is_powered(hdev)) {
 8890		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
 8891				      MGMT_STATUS_REJECTED);
 8892		goto unlock;
 8893	}
 8894
 8895	if (adv_busy(hdev)) {
 8896		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
 8897				      MGMT_STATUS_BUSY);
 8898		goto unlock;
 8899	}
 8900
 8901	/* Parse defined parameters from request, use defaults otherwise */
 8902	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
 8903		  __le16_to_cpu(cp->timeout) : 0;
 8904
 8905	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
 8906		   __le16_to_cpu(cp->duration) :
 8907		   hdev->def_multi_adv_rotation_duration;
 8908
 8909	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
 8910		       __le32_to_cpu(cp->min_interval) :
 8911		       hdev->le_adv_min_interval;
 8912
 8913	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
 8914		       __le32_to_cpu(cp->max_interval) :
 8915		       hdev->le_adv_max_interval;
 8916
 8917	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
 8918		   cp->tx_power :
 8919		   HCI_ADV_TX_POWER_NO_PREFERENCE;
 8920
 8921	/* Create advertising instance with no advertising or response data */
 8922	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
 8923				   timeout, duration, tx_power, min_interval,
 8924				   max_interval, 0);
 8925
 8926	if (IS_ERR(adv)) {
 8927		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
 8928				      MGMT_STATUS_FAILED);
 8929		goto unlock;
 8930	}
 8931
 8932	/* Submit request for advertising params if ext adv available */
 8933	if (ext_adv_capable(hdev)) {
 8934		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
 8935				       data, data_len);
 8936		if (!cmd) {
 8937			err = -ENOMEM;
 8938			hci_remove_adv_instance(hdev, cp->instance);
 8939			goto unlock;
 8940		}
 8941
 8942		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
 8943					 add_ext_adv_params_complete);
 8944		if (err < 0)
 8945			mgmt_pending_free(cmd);
 8946	} else {
 8947		rp.instance = cp->instance;
 8948		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
 8949		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
 8950		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
 8951		err = mgmt_cmd_complete(sk, hdev->id,
 8952					MGMT_OP_ADD_EXT_ADV_PARAMS,
 8953					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
 8954	}
 8955
 8956unlock:
 8957	hci_dev_unlock(hdev);
 8958
 8959	return err;
 8960}
 8961
 8962static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
 8963{
 8964	struct mgmt_pending_cmd *cmd = data;
 8965	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
 8966	struct mgmt_rp_add_advertising rp;
 8967
 8968	add_adv_complete(hdev, cmd->sk, cp->instance, err);
 8969
 8970	memset(&rp, 0, sizeof(rp));
 8971
 8972	rp.instance = cp->instance;
 8973
 8974	if (err)
 8975		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
 8976				mgmt_status(err));
 8977	else
 8978		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
 8979				  mgmt_status(err), &rp, sizeof(rp));
 8980
 8981	mgmt_pending_free(cmd);
 8982}
 8983
 8984static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
 8985{
 8986	struct mgmt_pending_cmd *cmd = data;
 8987	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
 8988	int err;
 8989
 8990	if (ext_adv_capable(hdev)) {
 8991		err = hci_update_adv_data_sync(hdev, cp->instance);
 8992		if (err)
 8993			return err;
 8994
 8995		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
 8996		if (err)
 8997			return err;
 8998
 8999		return hci_enable_ext_advertising_sync(hdev, cp->instance);
 9000	}
 9001
 9002	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
 9003}
 9004
 9005static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
 9006			    u16 data_len)
 9007{
 9008	struct mgmt_cp_add_ext_adv_data *cp = data;
 9009	struct mgmt_rp_add_ext_adv_data rp;
 9010	u8 schedule_instance = 0;
 9011	struct adv_info *next_instance;
 9012	struct adv_info *adv_instance;
 9013	int err = 0;
 9014	struct mgmt_pending_cmd *cmd;
 9015
 9016	BT_DBG("%s", hdev->name);
 9017
 9018	hci_dev_lock(hdev);
 9019
 9020	adv_instance = hci_find_adv_instance(hdev, cp->instance);
 9021
 9022	if (!adv_instance) {
 9023		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
 9024				      MGMT_STATUS_INVALID_PARAMS);
 9025		goto unlock;
 9026	}
 9027
 9028	/* In new interface, we require that we are powered to register */
 9029	if (!hdev_is_powered(hdev)) {
 9030		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
 9031				      MGMT_STATUS_REJECTED);
 9032		goto clear_new_instance;
 9033	}
 9034
 9035	if (adv_busy(hdev)) {
 9036		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
 9037				      MGMT_STATUS_BUSY);
 9038		goto clear_new_instance;
 9039	}
 9040
 9041	/* Validate new data */
 9042	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
 9043			       cp->adv_data_len, true) ||
 9044	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
 9045			       cp->adv_data_len, cp->scan_rsp_len, false)) {
 9046		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
 9047				      MGMT_STATUS_INVALID_PARAMS);
 9048		goto clear_new_instance;
 9049	}
 9050
 9051	/* Set the data in the advertising instance */
 9052	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
 9053				  cp->data, cp->scan_rsp_len,
 9054				  cp->data + cp->adv_data_len);
 9055
 9056	/* If using software rotation, determine next instance to use */
 9057	if (hdev->cur_adv_instance == cp->instance) {
 9058		/* If the currently advertised instance is being changed
 9059		 * then cancel the current advertising and schedule the
 9060		 * next instance. If there is only one instance then the
 9061		 * overridden advertising data will be visible right
 9062		 * away
 9063		 */
 9064		cancel_adv_timeout(hdev);
 9065
 9066		next_instance = hci_get_next_instance(hdev, cp->instance);
 9067		if (next_instance)
 9068			schedule_instance = next_instance->instance;
 9069	} else if (!hdev->adv_instance_timeout) {
 9070		/* Immediately advertise the new instance if no other
 9071		 * instance is currently being advertised.
 9072		 */
 9073		schedule_instance = cp->instance;
 9074	}
 9075
 9076	/* If the HCI_ADVERTISING flag is set or there is no instance to
 9077	 * be advertised then we have no HCI communication to make.
 9078	 * Simply return.
 9079	 */
 9080	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
 9081		if (adv_instance->pending) {
 9082			mgmt_advertising_added(sk, hdev, cp->instance);
 9083			adv_instance->pending = false;
 9084		}
 9085		rp.instance = cp->instance;
 9086		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
 9087					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
 9088		goto unlock;
 9089	}
 9090
 9091	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
 9092			       data_len);
 9093	if (!cmd) {
 9094		err = -ENOMEM;
 9095		goto clear_new_instance;
 9096	}
 9097
 9098	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
 9099				 add_ext_adv_data_complete);
 9100	if (err < 0) {
 9101		mgmt_pending_free(cmd);
 9102		goto clear_new_instance;
 9103	}
 9104
 9105	/* We were successful in updating data, so trigger advertising_added
 9106	 * event if this is an instance that wasn't previously advertising. If
 9107	 * a failure occurs in the requests we initiated, we will remove the
 9108	 * instance again in add_advertising_complete
 9109	 */
 9110	if (adv_instance->pending)
 9111		mgmt_advertising_added(sk, hdev, cp->instance);
 9112
 9113	goto unlock;
 9114
 9115clear_new_instance:
 9116	hci_remove_adv_instance(hdev, cp->instance);
 9117
 9118unlock:
 9119	hci_dev_unlock(hdev);
 9120
 9121	return err;
 9122}
 9123
 9124static void remove_advertising_complete(struct hci_dev *hdev, void *data,
 9125					int err)
 9126{
 9127	struct mgmt_pending_cmd *cmd = data;
 9128	struct mgmt_cp_remove_advertising *cp = cmd->param;
 9129	struct mgmt_rp_remove_advertising rp;
 9130
 9131	bt_dev_dbg(hdev, "err %d", err);
 9132
 9133	memset(&rp, 0, sizeof(rp));
 9134	rp.instance = cp->instance;
 9135
 9136	if (err)
 9137		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
 9138				mgmt_status(err));
 9139	else
 9140		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
 9141				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
 9142
 9143	mgmt_pending_free(cmd);
 9144}
 9145
 9146static int remove_advertising_sync(struct hci_dev *hdev, void *data)
 9147{
 9148	struct mgmt_pending_cmd *cmd = data;
 9149	struct mgmt_cp_remove_advertising *cp = cmd->param;
 9150	int err;
 9151
 9152	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
 9153	if (err)
 9154		return err;
 9155
 9156	if (list_empty(&hdev->adv_instances))
 9157		err = hci_disable_advertising_sync(hdev);
 9158
 9159	return err;
 9160}
 9161
 9162static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
 9163			      void *data, u16 data_len)
 9164{
 9165	struct mgmt_cp_remove_advertising *cp = data;
 9166	struct mgmt_pending_cmd *cmd;
 9167	int err;
 9168
 9169	bt_dev_dbg(hdev, "sock %p", sk);
 9170
 9171	hci_dev_lock(hdev);
 9172
 9173	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
 9174		err = mgmt_cmd_status(sk, hdev->id,
 9175				      MGMT_OP_REMOVE_ADVERTISING,
 9176				      MGMT_STATUS_INVALID_PARAMS);
 9177		goto unlock;
 9178	}
 9179
 9180	if (pending_find(MGMT_OP_SET_LE, hdev)) {
 9181		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
 9182				      MGMT_STATUS_BUSY);
 9183		goto unlock;
 9184	}
 9185
 9186	if (list_empty(&hdev->adv_instances)) {
 9187		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
 9188				      MGMT_STATUS_INVALID_PARAMS);
 9189		goto unlock;
 9190	}
 9191
 9192	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
 9193			       data_len);
 9194	if (!cmd) {
 9195		err = -ENOMEM;
 9196		goto unlock;
 
 9197	}
 9198
 9199	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
 9200				 remove_advertising_complete);
 9201	if (err < 0)
 9202		mgmt_pending_free(cmd);
 9203
 9204unlock:
 9205	hci_dev_unlock(hdev);
 9206
 9207	return err;
 9208}
 9209
 9210static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
 9211			     void *data, u16 data_len)
 9212{
 9213	struct mgmt_cp_get_adv_size_info *cp = data;
 9214	struct mgmt_rp_get_adv_size_info rp;
 9215	u32 flags, supported_flags;
 9216
 9217	bt_dev_dbg(hdev, "sock %p", sk);
 9218
 9219	if (!lmp_le_capable(hdev))
 9220		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
 9221				       MGMT_STATUS_REJECTED);
 9222
 9223	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
 9224		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
 9225				       MGMT_STATUS_INVALID_PARAMS);
 9226
 9227	flags = __le32_to_cpu(cp->flags);
 9228
 9229	/* The current implementation only supports a subset of the specified
 9230	 * flags.
 9231	 */
 9232	supported_flags = get_supported_adv_flags(hdev);
 9233	if (flags & ~supported_flags)
 9234		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
 9235				       MGMT_STATUS_INVALID_PARAMS);
 9236
 9237	rp.instance = cp->instance;
 9238	rp.flags = cp->flags;
 9239	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
 9240	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
 9241
 9242	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
 9243				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
 9244}
 9245
 9246static const struct hci_mgmt_handler mgmt_handlers[] = {
 9247	{ NULL }, /* 0x0000 (no command) */
 9248	{ read_version,            MGMT_READ_VERSION_SIZE,
 9249						HCI_MGMT_NO_HDEV |
 9250						HCI_MGMT_UNTRUSTED },
 9251	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
 9252						HCI_MGMT_NO_HDEV |
 9253						HCI_MGMT_UNTRUSTED },
 9254	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
 9255						HCI_MGMT_NO_HDEV |
 9256						HCI_MGMT_UNTRUSTED },
 9257	{ read_controller_info,    MGMT_READ_INFO_SIZE,
 9258						HCI_MGMT_UNTRUSTED },
 9259	{ set_powered,             MGMT_SETTING_SIZE },
 9260	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
 9261	{ set_connectable,         MGMT_SETTING_SIZE },
 9262	{ set_fast_connectable,    MGMT_SETTING_SIZE },
 9263	{ set_bondable,            MGMT_SETTING_SIZE },
 9264	{ set_link_security,       MGMT_SETTING_SIZE },
 9265	{ set_ssp,                 MGMT_SETTING_SIZE },
 9266	{ set_hs,                  MGMT_SETTING_SIZE },
 9267	{ set_le,                  MGMT_SETTING_SIZE },
 9268	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
 9269	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
 9270	{ add_uuid,                MGMT_ADD_UUID_SIZE },
 9271	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
 9272	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
 9273						HCI_MGMT_VAR_LEN },
 9274	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
 9275						HCI_MGMT_VAR_LEN },
 9276	{ disconnect,              MGMT_DISCONNECT_SIZE },
 9277	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
 9278	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
 9279	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
 9280	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
 9281	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
 9282	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
 9283	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
 9284	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
 9285	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
 9286	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
 9287	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
 9288	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
 9289	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
 9290						HCI_MGMT_VAR_LEN },
 9291	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
 9292	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
 9293	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
 9294	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
 9295	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
 9296	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
 9297	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
 9298	{ set_advertising,         MGMT_SETTING_SIZE },
 9299	{ set_bredr,               MGMT_SETTING_SIZE },
 9300	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
 9301	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
 9302	{ set_secure_conn,         MGMT_SETTING_SIZE },
 9303	{ set_debug_keys,          MGMT_SETTING_SIZE },
 9304	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
 9305	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
 9306						HCI_MGMT_VAR_LEN },
 9307	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
 9308	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
 9309	{ add_device,              MGMT_ADD_DEVICE_SIZE },
 9310	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
 9311	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
 9312						HCI_MGMT_VAR_LEN },
 9313	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
 9314						HCI_MGMT_NO_HDEV |
 9315						HCI_MGMT_UNTRUSTED },
 9316	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
 9317						HCI_MGMT_UNCONFIGURED |
 9318						HCI_MGMT_UNTRUSTED },
 9319	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
 9320						HCI_MGMT_UNCONFIGURED },
 9321	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
 9322						HCI_MGMT_UNCONFIGURED },
 9323	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
 9324						HCI_MGMT_VAR_LEN },
 9325	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
 9326	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
 9327						HCI_MGMT_NO_HDEV |
 9328						HCI_MGMT_UNTRUSTED },
 9329	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
 9330	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
 9331						HCI_MGMT_VAR_LEN },
 9332	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
 9333	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
 9334	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
 9335	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
 9336						HCI_MGMT_UNTRUSTED },
 9337	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
 9338	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
 9339	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
 9340	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
 9341						HCI_MGMT_VAR_LEN },
 9342	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
 9343	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
 9344						HCI_MGMT_UNTRUSTED },
 9345	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
 9346						HCI_MGMT_UNTRUSTED |
 9347						HCI_MGMT_HDEV_OPTIONAL },
 9348	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
 9349						HCI_MGMT_VAR_LEN |
 9350						HCI_MGMT_HDEV_OPTIONAL },
 9351	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
 9352						HCI_MGMT_UNTRUSTED },
 9353	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
 9354						HCI_MGMT_VAR_LEN },
 9355	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
 9356						HCI_MGMT_UNTRUSTED },
 9357	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
 9358						HCI_MGMT_VAR_LEN },
 9359	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
 9360	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
 9361	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
 9362	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
 9363						HCI_MGMT_VAR_LEN },
 9364	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
 9365	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
 9366						HCI_MGMT_VAR_LEN },
 9367	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
 9368						HCI_MGMT_VAR_LEN },
 9369	{ add_adv_patterns_monitor_rssi,
 9370				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
 9371						HCI_MGMT_VAR_LEN },
 9372	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
 9373						HCI_MGMT_VAR_LEN },
 9374	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
 9375	{ mesh_send,               MGMT_MESH_SEND_SIZE,
 9376						HCI_MGMT_VAR_LEN },
 9377	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
 9378};
 9379
 9380void mgmt_index_added(struct hci_dev *hdev)
 9381{
 9382	struct mgmt_ev_ext_index ev;
 9383
 9384	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 9385		return;
 9386
 9387	switch (hdev->dev_type) {
 9388	case HCI_PRIMARY:
 9389		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
 9390			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
 9391					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
 9392			ev.type = 0x01;
 9393		} else {
 9394			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
 9395					 HCI_MGMT_INDEX_EVENTS);
 9396			ev.type = 0x00;
 9397		}
 9398		break;
 9399	case HCI_AMP:
 9400		ev.type = 0x02;
 9401		break;
 9402	default:
 9403		return;
 9404	}
 9405
 9406	ev.bus = hdev->bus;
 9407
 9408	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
 9409			 HCI_MGMT_EXT_INDEX_EVENTS);
 9410}
 9411
 9412void mgmt_index_removed(struct hci_dev *hdev)
 9413{
 9414	struct mgmt_ev_ext_index ev;
 9415	u8 status = MGMT_STATUS_INVALID_INDEX;
 9416
 9417	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 9418		return;
 9419
 9420	switch (hdev->dev_type) {
 9421	case HCI_PRIMARY:
 9422		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
 9423
 9424		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
 9425			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
 9426					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
 9427			ev.type = 0x01;
 9428		} else {
 9429			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
 9430					 HCI_MGMT_INDEX_EVENTS);
 9431			ev.type = 0x00;
 9432		}
 9433		break;
 9434	case HCI_AMP:
 9435		ev.type = 0x02;
 9436		break;
 9437	default:
 9438		return;
 9439	}
 9440
 9441	ev.bus = hdev->bus;
 9442
 9443	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
 9444			 HCI_MGMT_EXT_INDEX_EVENTS);
 9445
 9446	/* Cancel any remaining timed work */
 9447	if (!hci_dev_test_flag(hdev, HCI_MGMT))
 9448		return;
 9449	cancel_delayed_work_sync(&hdev->discov_off);
 9450	cancel_delayed_work_sync(&hdev->service_cache);
 9451	cancel_delayed_work_sync(&hdev->rpa_expired);
 9452}
 9453
 9454void mgmt_power_on(struct hci_dev *hdev, int err)
 9455{
 9456	struct cmd_lookup match = { NULL, hdev };
 
 
 9457
 9458	bt_dev_dbg(hdev, "err %d", err);
 9459
 9460	hci_dev_lock(hdev);
 9461
 9462	if (!err) {
 9463		restart_le_actions(hdev);
 9464		hci_update_passive_scan(hdev);
 9465	}
 9466
 9467	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
 
 9468
 9469	new_settings(hdev, match.sk);
 
 9470
 9471	if (match.sk)
 9472		sock_put(match.sk);
 9473
 9474	hci_dev_unlock(hdev);
 9475}
 9476
 9477void __mgmt_power_off(struct hci_dev *hdev)
 9478{
 9479	struct cmd_lookup match = { NULL, hdev };
 9480	u8 status, zero_cod[] = { 0, 0, 0 };
 9481
 9482	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
 9483
 9484	/* If the power off is because of hdev unregistration let
 9485	 * use the appropriate INVALID_INDEX status. Otherwise use
 9486	 * NOT_POWERED. We cover both scenarios here since later in
 9487	 * mgmt_index_removed() any hci_conn callbacks will have already
 9488	 * been triggered, potentially causing misleading DISCONNECTED
 9489	 * status responses.
 9490	 */
 9491	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
 9492		status = MGMT_STATUS_INVALID_INDEX;
 9493	else
 9494		status = MGMT_STATUS_NOT_POWERED;
 9495
 9496	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
 9497
 9498	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
 9499		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
 9500				   zero_cod, sizeof(zero_cod),
 9501				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
 9502		ext_info_changed(hdev, NULL);
 
 9503	}
 9504
 9505	new_settings(hdev, match.sk);
 
 
 
 
 9506
 9507	if (match.sk)
 9508		sock_put(match.sk);
 
 
 9509}
 9510
 9511void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
 9512{
 9513	struct mgmt_pending_cmd *cmd;
 9514	u8 status;
 9515
 9516	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
 9517	if (!cmd)
 9518		return;
 9519
 9520	if (err == -ERFKILL)
 9521		status = MGMT_STATUS_RFKILLED;
 9522	else
 9523		status = MGMT_STATUS_FAILED;
 9524
 9525	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
 
 
 9526
 9527	mgmt_pending_remove(cmd);
 9528}
 9529
 9530void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
 9531		       bool persistent)
 9532{
 9533	struct mgmt_ev_new_link_key ev;
 9534
 9535	memset(&ev, 0, sizeof(ev));
 9536
 9537	ev.store_hint = persistent;
 9538	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
 9539	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
 9540	ev.key.type = key->type;
 9541	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
 9542	ev.key.pin_len = key->pin_len;
 9543
 9544	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
 9545}
 9546
 9547static u8 mgmt_ltk_type(struct smp_ltk *ltk)
 9548{
 9549	switch (ltk->type) {
 9550	case SMP_LTK:
 9551	case SMP_LTK_RESPONDER:
 9552		if (ltk->authenticated)
 9553			return MGMT_LTK_AUTHENTICATED;
 9554		return MGMT_LTK_UNAUTHENTICATED;
 9555	case SMP_LTK_P256:
 9556		if (ltk->authenticated)
 9557			return MGMT_LTK_P256_AUTH;
 9558		return MGMT_LTK_P256_UNAUTH;
 9559	case SMP_LTK_P256_DEBUG:
 9560		return MGMT_LTK_P256_DEBUG;
 9561	}
 9562
 9563	return MGMT_LTK_UNAUTHENTICATED;
 9564}
 9565
 9566void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
 9567{
 9568	struct mgmt_ev_new_long_term_key ev;
 9569
 9570	memset(&ev, 0, sizeof(ev));
 9571
 9572	/* Devices using resolvable or non-resolvable random addresses
 9573	 * without providing an identity resolving key don't require
 9574	 * to store long term keys. Their addresses will change the
 9575	 * next time around.
 9576	 *
 9577	 * Only when a remote device provides an identity address
 9578	 * make sure the long term key is stored. If the remote
 9579	 * identity is known, the long term keys are internally
 9580	 * mapped to the identity address. So allow static random
 9581	 * and public addresses here.
 9582	 */
 9583	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
 9584	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
 9585		ev.store_hint = 0x00;
 9586	else
 9587		ev.store_hint = persistent;
 9588
 9589	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
 9590	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
 9591	ev.key.type = mgmt_ltk_type(key);
 9592	ev.key.enc_size = key->enc_size;
 9593	ev.key.ediv = key->ediv;
 9594	ev.key.rand = key->rand;
 9595
 9596	if (key->type == SMP_LTK)
 9597		ev.key.initiator = 1;
 9598
 9599	/* Make sure we copy only the significant bytes based on the
 9600	 * encryption key size, and set the rest of the value to zeroes.
 9601	 */
 9602	memcpy(ev.key.val, key->val, key->enc_size);
 9603	memset(ev.key.val + key->enc_size, 0,
 9604	       sizeof(ev.key.val) - key->enc_size);
 9605
 9606	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
 9607}
 9608
 9609void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
 9610{
 9611	struct mgmt_ev_new_irk ev;
 9612
 9613	memset(&ev, 0, sizeof(ev));
 9614
 9615	ev.store_hint = persistent;
 9616
 9617	bacpy(&ev.rpa, &irk->rpa);
 9618	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
 9619	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
 9620	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
 9621
 9622	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
 9623}
 9624
 9625void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
 9626		   bool persistent)
 9627{
 9628	struct mgmt_ev_new_csrk ev;
 9629
 9630	memset(&ev, 0, sizeof(ev));
 9631
 9632	/* Devices using resolvable or non-resolvable random addresses
 9633	 * without providing an identity resolving key don't require
 9634	 * to store signature resolving keys. Their addresses will change
 9635	 * the next time around.
 9636	 *
 9637	 * Only when a remote device provides an identity address
 9638	 * make sure the signature resolving key is stored. So allow
 9639	 * static random and public addresses here.
 9640	 */
 9641	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
 9642	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
 9643		ev.store_hint = 0x00;
 9644	else
 9645		ev.store_hint = persistent;
 9646
 9647	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
 9648	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
 9649	ev.key.type = csrk->type;
 9650	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
 9651
 9652	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
 9653}
 9654
 9655void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9656			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
 9657			 u16 max_interval, u16 latency, u16 timeout)
 9658{
 9659	struct mgmt_ev_new_conn_param ev;
 9660
 9661	if (!hci_is_identity_address(bdaddr, bdaddr_type))
 9662		return;
 9663
 9664	memset(&ev, 0, sizeof(ev));
 9665	bacpy(&ev.addr.bdaddr, bdaddr);
 9666	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
 9667	ev.store_hint = store_hint;
 9668	ev.min_interval = cpu_to_le16(min_interval);
 9669	ev.max_interval = cpu_to_le16(max_interval);
 9670	ev.latency = cpu_to_le16(latency);
 9671	ev.timeout = cpu_to_le16(timeout);
 9672
 9673	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
 9674}
 9675
 9676void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
 9677			   u8 *name, u8 name_len)
 
 9678{
 9679	struct sk_buff *skb;
 9680	struct mgmt_ev_device_connected *ev;
 9681	u16 eir_len = 0;
 9682	u32 flags = 0;
 9683
 9684	/* allocate buff for LE or BR/EDR adv */
 9685	if (conn->le_adv_data_len > 0)
 9686		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
 9687				     sizeof(*ev) + conn->le_adv_data_len);
 9688	else
 9689		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
 9690				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
 9691				     eir_precalc_len(sizeof(conn->dev_class)));
 9692
 9693	ev = skb_put(skb, sizeof(*ev));
 9694	bacpy(&ev->addr.bdaddr, &conn->dst);
 9695	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
 9696
 9697	if (conn->out)
 9698		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
 9699
 9700	ev->flags = __cpu_to_le32(flags);
 9701
 9702	/* We must ensure that the EIR Data fields are ordered and
 9703	 * unique. Keep it simple for now and avoid the problem by not
 9704	 * adding any BR/EDR data to the LE adv.
 9705	 */
 9706	if (conn->le_adv_data_len > 0) {
 9707		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
 9708		eir_len = conn->le_adv_data_len;
 9709	} else {
 9710		if (name)
 9711			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
 9712
 9713		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
 9714			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
 9715						    conn->dev_class, sizeof(conn->dev_class));
 9716	}
 9717
 9718	ev->eir_len = cpu_to_le16(eir_len);
 9719
 9720	mgmt_event_skb(skb, NULL);
 
 9721}
 9722
 9723static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
 9724{
 
 9725	struct sock **sk = data;
 
 
 
 
 9726
 9727	cmd->cmd_complete(cmd, 0);
 
 9728
 9729	*sk = cmd->sk;
 9730	sock_hold(*sk);
 9731
 9732	mgmt_pending_remove(cmd);
 9733}
 9734
 9735static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
 9736{
 9737	struct hci_dev *hdev = data;
 9738	struct mgmt_cp_unpair_device *cp = cmd->param;
 
 9739
 9740	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
 9741
 9742	cmd->cmd_complete(cmd, 0);
 9743	mgmt_pending_remove(cmd);
 9744}
 9745
 9746bool mgmt_powering_down(struct hci_dev *hdev)
 9747{
 9748	struct mgmt_pending_cmd *cmd;
 9749	struct mgmt_mode *cp;
 9750
 9751	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
 9752	if (!cmd)
 9753		return false;
 9754
 9755	cp = cmd->param;
 9756	if (!cp->val)
 9757		return true;
 9758
 9759	return false;
 9760}
 9761
 9762void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9763			      u8 link_type, u8 addr_type, u8 reason,
 9764			      bool mgmt_connected)
 9765{
 9766	struct mgmt_ev_device_disconnected ev;
 9767	struct sock *sk = NULL;
 9768
 9769	/* The connection is still in hci_conn_hash so test for 1
 9770	 * instead of 0 to know if this is the last one.
 9771	 */
 9772	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
 9773		cancel_delayed_work(&hdev->power_off);
 9774		queue_work(hdev->req_workqueue, &hdev->power_off.work);
 9775	}
 9776
 9777	if (!mgmt_connected)
 9778		return;
 9779
 9780	if (link_type != ACL_LINK && link_type != LE_LINK)
 9781		return;
 9782
 9783	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
 9784
 9785	bacpy(&ev.addr.bdaddr, bdaddr);
 9786	ev.addr.type = link_to_bdaddr(link_type, addr_type);
 9787	ev.reason = reason;
 9788
 9789	/* Report disconnects due to suspend */
 9790	if (hdev->suspended)
 9791		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
 9792
 9793	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
 
 9794
 9795	if (sk)
 9796		sock_put(sk);
 9797
 9798	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
 9799			     hdev);
 
 
 9800}
 9801
 9802void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9803			    u8 link_type, u8 addr_type, u8 status)
 9804{
 9805	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
 9806	struct mgmt_cp_disconnect *cp;
 9807	struct mgmt_pending_cmd *cmd;
 9808
 9809	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
 9810			     hdev);
 9811
 9812	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
 9813	if (!cmd)
 9814		return;
 9815
 9816	cp = cmd->param;
 
 9817
 9818	if (bacmp(bdaddr, &cp->addr.bdaddr))
 9819		return;
 9820
 9821	if (cp->addr.type != bdaddr_type)
 9822		return;
 9823
 9824	cmd->cmd_complete(cmd, mgmt_status(status));
 9825	mgmt_pending_remove(cmd);
 
 
 
 
 9826}
 9827
 9828void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 9829			 u8 addr_type, u8 status)
 9830{
 9831	struct mgmt_ev_connect_failed ev;
 9832
 9833	/* The connection is still in hci_conn_hash so test for 1
 9834	 * instead of 0 to know if this is the last one.
 9835	 */
 9836	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
 9837		cancel_delayed_work(&hdev->power_off);
 9838		queue_work(hdev->req_workqueue, &hdev->power_off.work);
 9839	}
 9840
 9841	bacpy(&ev.addr.bdaddr, bdaddr);
 9842	ev.addr.type = link_to_bdaddr(link_type, addr_type);
 9843	ev.status = mgmt_status(status);
 9844
 9845	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
 9846}
 9847
 9848void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
 9849{
 9850	struct mgmt_ev_pin_code_request ev;
 9851
 9852	bacpy(&ev.addr.bdaddr, bdaddr);
 9853	ev.addr.type = BDADDR_BREDR;
 9854	ev.secure = secure;
 9855
 9856	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
 
 9857}
 9858
 9859void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9860				  u8 status)
 9861{
 9862	struct mgmt_pending_cmd *cmd;
 
 
 9863
 9864	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
 9865	if (!cmd)
 9866		return;
 
 
 
 
 
 
 9867
 9868	cmd->cmd_complete(cmd, mgmt_status(status));
 9869	mgmt_pending_remove(cmd);
 
 
 9870}
 9871
 9872void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9873				      u8 status)
 9874{
 9875	struct mgmt_pending_cmd *cmd;
 
 
 9876
 9877	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
 9878	if (!cmd)
 9879		return;
 
 
 
 
 
 
 9880
 9881	cmd->cmd_complete(cmd, mgmt_status(status));
 9882	mgmt_pending_remove(cmd);
 
 
 9883}
 9884
 9885int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9886			      u8 link_type, u8 addr_type, u32 value,
 9887			      u8 confirm_hint)
 9888{
 9889	struct mgmt_ev_user_confirm_request ev;
 9890
 9891	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
 9892
 9893	bacpy(&ev.addr.bdaddr, bdaddr);
 9894	ev.addr.type = link_to_bdaddr(link_type, addr_type);
 9895	ev.confirm_hint = confirm_hint;
 9896	ev.value = cpu_to_le32(value);
 9897
 9898	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
 9899			  NULL);
 9900}
 9901
 9902int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9903			      u8 link_type, u8 addr_type)
 9904{
 9905	struct mgmt_ev_user_passkey_request ev;
 9906
 9907	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
 9908
 9909	bacpy(&ev.addr.bdaddr, bdaddr);
 9910	ev.addr.type = link_to_bdaddr(link_type, addr_type);
 9911
 9912	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
 9913			  NULL);
 9914}
 9915
 9916static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9917				      u8 link_type, u8 addr_type, u8 status,
 9918				      u8 opcode)
 9919{
 9920	struct mgmt_pending_cmd *cmd;
 
 
 9921
 9922	cmd = pending_find(opcode, hdev);
 9923	if (!cmd)
 9924		return -ENOENT;
 9925
 9926	cmd->cmd_complete(cmd, mgmt_status(status));
 
 
 
 
 9927	mgmt_pending_remove(cmd);
 9928
 9929	return 0;
 9930}
 9931
 9932int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9933				     u8 link_type, u8 addr_type, u8 status)
 9934{
 9935	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
 9936					  status, MGMT_OP_USER_CONFIRM_REPLY);
 9937}
 9938
 9939int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9940					 u8 link_type, u8 addr_type, u8 status)
 9941{
 9942	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
 9943					  status,
 9944					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
 9945}
 9946
 9947int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9948				     u8 link_type, u8 addr_type, u8 status)
 9949{
 9950	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
 9951					  status, MGMT_OP_USER_PASSKEY_REPLY);
 9952}
 9953
 9954int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9955					 u8 link_type, u8 addr_type, u8 status)
 9956{
 9957	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
 9958					  status,
 9959					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
 9960}
 9961
 9962int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
 9963			     u8 link_type, u8 addr_type, u32 passkey,
 9964			     u8 entered)
 9965{
 9966	struct mgmt_ev_passkey_notify ev;
 9967
 9968	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
 9969
 9970	bacpy(&ev.addr.bdaddr, bdaddr);
 9971	ev.addr.type = link_to_bdaddr(link_type, addr_type);
 9972	ev.passkey = __cpu_to_le32(passkey);
 9973	ev.entered = entered;
 9974
 9975	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
 9976}
 9977
 9978void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
 9979{
 9980	struct mgmt_ev_auth_failed ev;
 9981	struct mgmt_pending_cmd *cmd;
 9982	u8 status = mgmt_status(hci_status);
 9983
 9984	bacpy(&ev.addr.bdaddr, &conn->dst);
 9985	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
 9986	ev.status = status;
 
 
 
 9987
 9988	cmd = find_pairing(conn);
 
 
 
 
 
 
 9989
 9990	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
 9991		    cmd ? cmd->sk : NULL);
 9992
 9993	if (cmd) {
 9994		cmd->cmd_complete(cmd, status);
 9995		mgmt_pending_remove(cmd);
 9996	}
 
 
 
 9997}
 9998
 9999void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10000{
10001	struct cmd_lookup match = { NULL, hdev };
10002	bool changed;
 
10003
10004	if (status) {
10005		u8 mgmt_err = mgmt_status(status);
10006		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10007				     cmd_status_rsp, &mgmt_err);
10008		return;
 
 
 
 
 
 
10009	}
10010
10011	if (test_bit(HCI_AUTH, &hdev->flags))
10012		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10013	else
10014		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
 
 
 
10015
10016	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10017			     &match);
10018
10019	if (changed)
10020		new_settings(hdev, match.sk);
10021
10022	if (match.sk)
10023		sock_put(match.sk);
 
 
 
 
 
 
 
10024}
10025
10026static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10027{
10028	struct cmd_lookup *match = data;
10029
 
 
 
 
 
10030	if (match->sk == NULL) {
10031		match->sk = cmd->sk;
10032		sock_hold(match->sk);
10033	}
 
 
10034}
10035
10036void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10037				    u8 status)
10038{
10039	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
 
10040
10041	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10042	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10043	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10044
10045	if (!status) {
10046		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10047				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10048		ext_info_changed(hdev, NULL);
10049	}
10050
10051	if (match.sk)
10052		sock_put(match.sk);
 
 
10053}
10054
10055void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10056{
 
10057	struct mgmt_cp_set_local_name ev;
10058	struct mgmt_pending_cmd *cmd;
 
10059
10060	if (status)
10061		return;
 
 
10062
10063	memset(&ev, 0, sizeof(ev));
10064	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10065	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10066
10067	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10068	if (!cmd) {
10069		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10070
10071		/* If this is a HCI command related to powering on the
10072		 * HCI dev don't send any mgmt signals.
10073		 */
10074		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10075			return;
 
 
 
10076	}
10077
10078	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10079			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10080	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10081}
10082
10083static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10084{
10085	int i;
 
10086
10087	for (i = 0; i < uuid_count; i++) {
10088		if (!memcmp(uuid, uuids[i], 16))
10089			return true;
10090	}
10091
10092	return false;
 
 
 
10093}
10094
10095static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
 
10096{
10097	u16 parsed = 0;
10098
10099	while (parsed < eir_len) {
10100		u8 field_len = eir[0];
10101		u8 uuid[16];
10102		int i;
10103
10104		if (field_len == 0)
10105			break;
10106
10107		if (eir_len - parsed < field_len + 1)
10108			break;
10109
10110		switch (eir[1]) {
10111		case EIR_UUID16_ALL:
10112		case EIR_UUID16_SOME:
10113			for (i = 0; i + 3 <= field_len; i += 2) {
10114				memcpy(uuid, bluetooth_base_uuid, 16);
10115				uuid[13] = eir[i + 3];
10116				uuid[12] = eir[i + 2];
10117				if (has_uuid(uuid, uuid_count, uuids))
10118					return true;
10119			}
10120			break;
10121		case EIR_UUID32_ALL:
10122		case EIR_UUID32_SOME:
10123			for (i = 0; i + 5 <= field_len; i += 4) {
10124				memcpy(uuid, bluetooth_base_uuid, 16);
10125				uuid[15] = eir[i + 5];
10126				uuid[14] = eir[i + 4];
10127				uuid[13] = eir[i + 3];
10128				uuid[12] = eir[i + 2];
10129				if (has_uuid(uuid, uuid_count, uuids))
10130					return true;
10131			}
10132			break;
10133		case EIR_UUID128_ALL:
10134		case EIR_UUID128_SOME:
10135			for (i = 0; i + 17 <= field_len; i += 16) {
10136				memcpy(uuid, eir + i + 2, 16);
10137				if (has_uuid(uuid, uuid_count, uuids))
10138					return true;
10139			}
10140			break;
10141		}
10142
10143		parsed += field_len + 1;
10144		eir += field_len + 1;
10145	}
 
 
10146
10147	return false;
10148}
10149
10150static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10151			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10152{
10153	/* If a RSSI threshold has been specified, and
10154	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10155	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10156	 * is set, let it through for further processing, as we might need to
10157	 * restart the scan.
10158	 *
10159	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10160	 * the results are also dropped.
10161	 */
10162	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10163	    (rssi == HCI_RSSI_INVALID ||
10164	    (rssi < hdev->discovery.rssi &&
10165	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10166		return  false;
10167
10168	if (hdev->discovery.uuid_count != 0) {
10169		/* If a list of UUIDs is provided in filter, results with no
10170		 * matching UUID should be dropped.
10171		 */
10172		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10173				   hdev->discovery.uuids) &&
10174		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10175				   hdev->discovery.uuid_count,
10176				   hdev->discovery.uuids))
10177			return false;
10178	}
10179
10180	/* If duplicate filtering does not report RSSI changes, then restart
10181	 * scanning to ensure updated result with updated RSSI values.
10182	 */
10183	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10184		/* Validate RSSI value against the RSSI threshold once more. */
10185		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10186		    rssi < hdev->discovery.rssi)
10187			return false;
10188	}
10189
10190	return true;
10191}
10192
10193void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10194				  bdaddr_t *bdaddr, u8 addr_type)
10195{
10196	struct mgmt_ev_adv_monitor_device_lost ev;
10197
10198	ev.monitor_handle = cpu_to_le16(handle);
10199	bacpy(&ev.addr.bdaddr, bdaddr);
10200	ev.addr.type = addr_type;
10201
10202	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10203		   NULL);
10204}
10205
10206static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10207					       struct sk_buff *skb,
10208					       struct sock *skip_sk,
10209					       u16 handle)
10210{
10211	struct sk_buff *advmon_skb;
10212	size_t advmon_skb_len;
10213	__le16 *monitor_handle;
10214
10215	if (!skb)
10216		return;
 
10217
10218	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10219			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10220	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10221				    advmon_skb_len);
10222	if (!advmon_skb)
10223		return;
10224
10225	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10226	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10227	 * store monitor_handle of the matched monitor.
10228	 */
10229	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10230	*monitor_handle = cpu_to_le16(handle);
10231	skb_put_data(advmon_skb, skb->data, skb->len);
10232
10233	mgmt_event_skb(advmon_skb, skip_sk);
10234}
10235
10236static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10237					  bdaddr_t *bdaddr, bool report_device,
10238					  struct sk_buff *skb,
10239					  struct sock *skip_sk)
10240{
10241	struct monitored_device *dev, *tmp;
10242	bool matched = false;
10243	bool notified = false;
10244
10245	/* We have received the Advertisement Report because:
10246	 * 1. the kernel has initiated active discovery
10247	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10248	 *    passive scanning
10249	 * 3. if none of the above is true, we have one or more active
10250	 *    Advertisement Monitor
10251	 *
10252	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10253	 * and report ONLY one advertisement per device for the matched Monitor
10254	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10255	 *
10256	 * For case 3, since we are not active scanning and all advertisements
10257	 * received are due to a matched Advertisement Monitor, report all
10258	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10259	 */
10260	if (report_device && !hdev->advmon_pend_notify) {
10261		mgmt_event_skb(skb, skip_sk);
10262		return;
10263	}
10264
10265	hdev->advmon_pend_notify = false;
 
 
 
 
 
 
10266
10267	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10268		if (!bacmp(&dev->bdaddr, bdaddr)) {
10269			matched = true;
10270
10271			if (!dev->notified) {
10272				mgmt_send_adv_monitor_device_found(hdev, skb,
10273								   skip_sk,
10274								   dev->handle);
10275				notified = true;
10276				dev->notified = true;
10277			}
10278		}
10279
10280		if (!dev->notified)
10281			hdev->advmon_pend_notify = true;
10282	}
10283
10284	if (!report_device &&
10285	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10286		/* Handle 0 indicates that we are not active scanning and this
10287		 * is a subsequent advertisement report for an already matched
10288		 * Advertisement Monitor or the controller offloading support
10289		 * is not available.
10290		 */
10291		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10292	}
10293
10294	if (report_device)
10295		mgmt_event_skb(skb, skip_sk);
10296	else
10297		kfree_skb(skb);
10298}
10299
10300static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10301			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10302			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10303			      u64 instant)
10304{
10305	struct sk_buff *skb;
10306	struct mgmt_ev_mesh_device_found *ev;
10307	int i, j;
10308
10309	if (!hdev->mesh_ad_types[0])
10310		goto accepted;
10311
10312	/* Scan for requested AD types */
10313	if (eir_len > 0) {
10314		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10315			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10316				if (!hdev->mesh_ad_types[j])
10317					break;
10318
10319				if (hdev->mesh_ad_types[j] == eir[i + 1])
10320					goto accepted;
10321			}
10322		}
10323	}
10324
10325	if (scan_rsp_len > 0) {
10326		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10327			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10328				if (!hdev->mesh_ad_types[j])
10329					break;
10330
10331				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10332					goto accepted;
10333			}
10334		}
10335	}
10336
10337	return;
10338
10339accepted:
10340	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10341			     sizeof(*ev) + eir_len + scan_rsp_len);
10342	if (!skb)
10343		return;
10344
10345	ev = skb_put(skb, sizeof(*ev));
10346
10347	bacpy(&ev->addr.bdaddr, bdaddr);
10348	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10349	ev->rssi = rssi;
10350	ev->flags = cpu_to_le32(flags);
10351	ev->instant = cpu_to_le64(instant);
 
 
10352
10353	if (eir_len > 0)
10354		/* Copy EIR or advertising data into event */
10355		skb_put_data(skb, eir, eir_len);
10356
10357	if (scan_rsp_len > 0)
10358		/* Append scan response data to event */
10359		skb_put_data(skb, scan_rsp, scan_rsp_len);
 
 
10360
10361	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10362
10363	mgmt_event_skb(skb, NULL);
10364}
10365
10366void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10367		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10368		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10369		       u64 instant)
10370{
10371	struct sk_buff *skb;
10372	struct mgmt_ev_device_found *ev;
10373	bool report_device = hci_discovery_active(hdev);
10374
10375	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10376		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10377				  eir, eir_len, scan_rsp, scan_rsp_len,
10378				  instant);
10379
10380	/* Don't send events for a non-kernel initiated discovery. With
10381	 * LE one exception is if we have pend_le_reports > 0 in which
10382	 * case we're doing passive scanning and want these events.
10383	 */
10384	if (!hci_discovery_active(hdev)) {
10385		if (link_type == ACL_LINK)
10386			return;
10387		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10388			report_device = true;
10389		else if (!hci_is_adv_monitoring(hdev))
10390			return;
10391	}
10392
10393	if (hdev->discovery.result_filtering) {
10394		/* We are using service discovery */
10395		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10396				     scan_rsp_len))
10397			return;
10398	}
10399
10400	if (hdev->discovery.limited) {
10401		/* Check for limited discoverable bit */
10402		if (dev_class) {
10403			if (!(dev_class[1] & 0x20))
10404				return;
10405		} else {
10406			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10407			if (!flags || !(flags[0] & LE_AD_LIMITED))
10408				return;
10409		}
10410	}
10411
10412	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10413	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10414			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10415	if (!skb)
10416		return;
10417
10418	ev = skb_put(skb, sizeof(*ev));
10419
10420	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10421	 * RSSI value was reported as 0 when not available. This behavior
10422	 * is kept when using device discovery. This is required for full
10423	 * backwards compatibility with the API.
10424	 *
10425	 * However when using service discovery, the value 127 will be
10426	 * returned when the RSSI is not available.
10427	 */
10428	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10429	    link_type == ACL_LINK)
10430		rssi = 0;
10431
10432	bacpy(&ev->addr.bdaddr, bdaddr);
10433	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10434	ev->rssi = rssi;
10435	ev->flags = cpu_to_le32(flags);
10436
10437	if (eir_len > 0)
10438		/* Copy EIR or advertising data into event */
10439		skb_put_data(skb, eir, eir_len);
10440
10441	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10442		u8 eir_cod[5];
10443
10444		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10445					   dev_class, 3);
10446		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10447	}
10448
10449	if (scan_rsp_len > 0)
10450		/* Append scan response data to event */
10451		skb_put_data(skb, scan_rsp, scan_rsp_len);
10452
10453	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10454
10455	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
 
10456}
10457
10458void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10459		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10460{
10461	struct sk_buff *skb;
10462	struct mgmt_ev_device_found *ev;
10463	u16 eir_len = 0;
10464	u32 flags = 0;
10465
10466	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10467			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10468
10469	ev = skb_put(skb, sizeof(*ev));
10470	bacpy(&ev->addr.bdaddr, bdaddr);
10471	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10472	ev->rssi = rssi;
10473
10474	if (name)
10475		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10476	else
10477		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10478
10479	ev->eir_len = cpu_to_le16(eir_len);
10480	ev->flags = cpu_to_le32(flags);
 
10481
10482	mgmt_event_skb(skb, NULL);
10483}
10484
10485void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10486{
10487	struct mgmt_ev_discovering ev;
 
10488
10489	bt_dev_dbg(hdev, "discovering %u", discovering);
 
 
10490
10491	memset(&ev, 0, sizeof(ev));
10492	ev.type = hdev->discovery.type;
10493	ev.discovering = discovering;
10494
10495	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10496}
10497
10498void mgmt_suspending(struct hci_dev *hdev, u8 state)
10499{
10500	struct mgmt_ev_controller_suspend ev;
 
10501
10502	ev.suspend_state = state;
10503	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10504}
10505
10506void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10507		   u8 addr_type)
10508{
10509	struct mgmt_ev_controller_resume ev;
10510
10511	ev.wake_reason = reason;
10512	if (bdaddr) {
10513		bacpy(&ev.addr.bdaddr, bdaddr);
10514		ev.addr.type = addr_type;
10515	} else {
10516		memset(&ev.addr, 0, sizeof(ev.addr));
10517	}
10518
10519	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10520}
 
 
10521
10522static struct hci_mgmt_chan chan = {
10523	.channel	= HCI_CHANNEL_CONTROL,
10524	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10525	.handlers	= mgmt_handlers,
10526	.hdev_init	= mgmt_init_hdev,
10527};
10528
10529int mgmt_init(void)
10530{
10531	return hci_mgmt_chan_register(&chan);
10532}
10533
10534void mgmt_exit(void)
10535{
10536	hci_mgmt_chan_unregister(&chan);
 
 
 
 
 
 
 
 
 
10537}
10538
10539void mgmt_cleanup(struct sock *sk)
10540{
10541	struct mgmt_mesh_tx *mesh_tx;
10542	struct hci_dev *hdev;
10543
10544	read_lock(&hci_dev_list_lock);
10545
10546	list_for_each_entry(hdev, &hci_dev_list, list) {
10547		do {
10548			mesh_tx = mgmt_mesh_next(hdev, sk);
10549
10550			if (mesh_tx)
10551				mesh_send_complete(hdev, mesh_tx, true);
10552		} while (mesh_tx);
10553	}
10554
10555	read_unlock(&hci_dev_list_lock);
 
10556}
v3.5.6
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2010  Nokia Corporation
   5   Copyright (C) 2011-2012 Intel Corporation
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI Management interface */
  26
  27#include <linux/kernel.h>
  28#include <linux/uaccess.h>
  29#include <linux/module.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
 
 
  34#include <net/bluetooth/mgmt.h>
  35#include <net/bluetooth/smp.h>
  36
  37bool enable_hs;
 
 
 
 
 
 
  38
  39#define MGMT_VERSION	1
  40#define MGMT_REVISION	1
  41
  42static const u16 mgmt_commands[] = {
  43	MGMT_OP_READ_INDEX_LIST,
  44	MGMT_OP_READ_INFO,
  45	MGMT_OP_SET_POWERED,
  46	MGMT_OP_SET_DISCOVERABLE,
  47	MGMT_OP_SET_CONNECTABLE,
  48	MGMT_OP_SET_FAST_CONNECTABLE,
  49	MGMT_OP_SET_PAIRABLE,
  50	MGMT_OP_SET_LINK_SECURITY,
  51	MGMT_OP_SET_SSP,
  52	MGMT_OP_SET_HS,
  53	MGMT_OP_SET_LE,
  54	MGMT_OP_SET_DEV_CLASS,
  55	MGMT_OP_SET_LOCAL_NAME,
  56	MGMT_OP_ADD_UUID,
  57	MGMT_OP_REMOVE_UUID,
  58	MGMT_OP_LOAD_LINK_KEYS,
  59	MGMT_OP_LOAD_LONG_TERM_KEYS,
  60	MGMT_OP_DISCONNECT,
  61	MGMT_OP_GET_CONNECTIONS,
  62	MGMT_OP_PIN_CODE_REPLY,
  63	MGMT_OP_PIN_CODE_NEG_REPLY,
  64	MGMT_OP_SET_IO_CAPABILITY,
  65	MGMT_OP_PAIR_DEVICE,
  66	MGMT_OP_CANCEL_PAIR_DEVICE,
  67	MGMT_OP_UNPAIR_DEVICE,
  68	MGMT_OP_USER_CONFIRM_REPLY,
  69	MGMT_OP_USER_CONFIRM_NEG_REPLY,
  70	MGMT_OP_USER_PASSKEY_REPLY,
  71	MGMT_OP_USER_PASSKEY_NEG_REPLY,
  72	MGMT_OP_READ_LOCAL_OOB_DATA,
  73	MGMT_OP_ADD_REMOTE_OOB_DATA,
  74	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
  75	MGMT_OP_START_DISCOVERY,
  76	MGMT_OP_STOP_DISCOVERY,
  77	MGMT_OP_CONFIRM_NAME,
  78	MGMT_OP_BLOCK_DEVICE,
  79	MGMT_OP_UNBLOCK_DEVICE,
  80	MGMT_OP_SET_DEVICE_ID,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81};
  82
  83static const u16 mgmt_events[] = {
  84	MGMT_EV_CONTROLLER_ERROR,
  85	MGMT_EV_INDEX_ADDED,
  86	MGMT_EV_INDEX_REMOVED,
  87	MGMT_EV_NEW_SETTINGS,
  88	MGMT_EV_CLASS_OF_DEV_CHANGED,
  89	MGMT_EV_LOCAL_NAME_CHANGED,
  90	MGMT_EV_NEW_LINK_KEY,
  91	MGMT_EV_NEW_LONG_TERM_KEY,
  92	MGMT_EV_DEVICE_CONNECTED,
  93	MGMT_EV_DEVICE_DISCONNECTED,
  94	MGMT_EV_CONNECT_FAILED,
  95	MGMT_EV_PIN_CODE_REQUEST,
  96	MGMT_EV_USER_CONFIRM_REQUEST,
  97	MGMT_EV_USER_PASSKEY_REQUEST,
  98	MGMT_EV_AUTH_FAILED,
  99	MGMT_EV_DEVICE_FOUND,
 100	MGMT_EV_DISCOVERING,
 101	MGMT_EV_DEVICE_BLOCKED,
 102	MGMT_EV_DEVICE_UNBLOCKED,
 103	MGMT_EV_DEVICE_UNPAIRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 104};
 105
 106/*
 107 * These LE scan and inquiry parameters were chosen according to LE General
 108 * Discovery Procedure specification.
 109 */
 110#define LE_SCAN_TYPE			0x01
 111#define LE_SCAN_WIN			0x12
 112#define LE_SCAN_INT			0x12
 113#define LE_SCAN_TIMEOUT_LE_ONLY		10240	/* TGAP(gen_disc_scan_min) */
 114#define LE_SCAN_TIMEOUT_BREDR_LE	5120	/* TGAP(100)/2 */
 
 
 
 115
 116#define INQUIRY_LEN_BREDR		0x08	/* TGAP(100) */
 117#define INQUIRY_LEN_BREDR_LE		0x04	/* TGAP(100)/2 */
 
 
 
 
 
 
 
 
 
 
 
 
 118
 119#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
 120
 121#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
 122				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
 123
 124struct pending_cmd {
 125	struct list_head list;
 126	u16 opcode;
 127	int index;
 128	void *param;
 129	struct sock *sk;
 130	void *user_data;
 131};
 132
 133/* HCI to MGMT error code conversion table */
 134static u8 mgmt_status_table[] = {
 135	MGMT_STATUS_SUCCESS,
 136	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
 137	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
 138	MGMT_STATUS_FAILED,		/* Hardware Failure */
 139	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
 140	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
 141	MGMT_STATUS_NOT_PAIRED,		/* PIN or Key Missing */
 142	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
 143	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
 144	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
 145	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
 146	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
 147	MGMT_STATUS_BUSY,		/* Command Disallowed */
 148	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
 149	MGMT_STATUS_REJECTED,		/* Rejected Security */
 150	MGMT_STATUS_REJECTED,		/* Rejected Personal */
 151	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
 152	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
 153	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
 154	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
 155	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
 156	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
 157	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
 158	MGMT_STATUS_BUSY,		/* Repeated Attempts */
 159	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
 160	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
 161	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
 162	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
 163	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
 164	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
 165	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
 166	MGMT_STATUS_FAILED,		/* Unspecified Error */
 167	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
 168	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
 169	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
 170	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
 171	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
 172	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
 173	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
 174	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
 175	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
 176	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
 177	MGMT_STATUS_FAILED,		/* Transaction Collision */
 
 178	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
 179	MGMT_STATUS_REJECTED,		/* QoS Rejected */
 180	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
 181	MGMT_STATUS_REJECTED,		/* Insufficient Security */
 182	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
 
 183	MGMT_STATUS_BUSY,		/* Role Switch Pending */
 
 184	MGMT_STATUS_FAILED,		/* Slot Violation */
 185	MGMT_STATUS_FAILED,		/* Role Switch Failed */
 186	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
 187	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
 188	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
 189	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
 190	MGMT_STATUS_BUSY,		/* Controller Busy */
 191	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
 192	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
 193	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
 194	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
 195	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
 196};
 197
 198static u8 mgmt_status(u8 hci_status)
 199{
 200	if (hci_status < ARRAY_SIZE(mgmt_status_table))
 201		return mgmt_status_table[hci_status];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 202
 203	return MGMT_STATUS_FAILED;
 204}
 205
 206static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
 207{
 208	struct sk_buff *skb;
 209	struct mgmt_hdr *hdr;
 210	struct mgmt_ev_cmd_status *ev;
 211	int err;
 212
 213	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
 
 214
 215	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
 216	if (!skb)
 217		return -ENOMEM;
 218
 219	hdr = (void *) skb_put(skb, sizeof(*hdr));
 
 
 
 
 
 220
 221	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
 222	hdr->index = cpu_to_le16(index);
 223	hdr->len = cpu_to_le16(sizeof(*ev));
 
 
 
 224
 225	ev = (void *) skb_put(skb, sizeof(*ev));
 226	ev->status = status;
 227	ev->opcode = cpu_to_le16(cmd);
 
 
 
 228
 229	err = sock_queue_rcv_skb(sk, skb);
 230	if (err < 0)
 231		kfree_skb(skb);
 
 
 232
 233	return err;
 
 
 
 
 
 234}
 235
 236static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
 237			void *rp, size_t rp_len)
 238{
 239	struct sk_buff *skb;
 240	struct mgmt_hdr *hdr;
 241	struct mgmt_ev_cmd_complete *ev;
 242	int err;
 243
 244	BT_DBG("sock %p", sk);
 245
 246	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
 247	if (!skb)
 248		return -ENOMEM;
 249
 250	hdr = (void *) skb_put(skb, sizeof(*hdr));
 251
 252	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
 253	hdr->index = cpu_to_le16(index);
 254	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
 255
 256	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
 257	ev->opcode = cpu_to_le16(cmd);
 258	ev->status = status;
 259
 260	if (rp)
 261		memcpy(ev->data, rp, rp_len);
 262
 263	err = sock_queue_rcv_skb(sk, skb);
 264	if (err < 0)
 265		kfree_skb(skb);
 266
 267	return err;
 268}
 269
 270static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
 271			u16 data_len)
 272{
 273	struct mgmt_rp_read_version rp;
 274
 275	BT_DBG("sock %p", sk);
 276
 277	rp.version = MGMT_VERSION;
 278	rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
 279
 280	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
 281			    sizeof(rp));
 282}
 283
 284static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
 285			 u16 data_len)
 286{
 287	struct mgmt_rp_read_commands *rp;
 288	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
 289	const u16 num_events = ARRAY_SIZE(mgmt_events);
 290	__le16 *opcode;
 291	size_t rp_size;
 292	int i, err;
 293
 294	BT_DBG("sock %p", sk);
 
 
 
 
 
 
 
 
 295
 296	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
 297
 298	rp = kmalloc(rp_size, GFP_KERNEL);
 299	if (!rp)
 300		return -ENOMEM;
 301
 302	rp->num_commands = __constant_cpu_to_le16(num_commands);
 303	rp->num_events = __constant_cpu_to_le16(num_events);
 
 
 
 304
 305	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
 306		put_unaligned_le16(mgmt_commands[i], opcode);
 
 
 
 
 
 307
 308	for (i = 0; i < num_events; i++, opcode++)
 309		put_unaligned_le16(mgmt_events[i], opcode);
 310
 311	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
 312			   rp_size);
 
 
 
 
 313	kfree(rp);
 314
 315	return err;
 316}
 317
 318static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
 319			   u16 data_len)
 320{
 321	struct mgmt_rp_read_index_list *rp;
 322	struct list_head *p;
 323	struct hci_dev *d;
 324	size_t rp_len;
 325	u16 count;
 326	int i, err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 327
 328	BT_DBG("sock %p", sk);
 329
 330	read_lock(&hci_dev_list_lock);
 331
 332	count = 0;
 333	list_for_each(p, &hci_dev_list) {
 334		count++;
 
 
 335	}
 336
 337	rp_len = sizeof(*rp) + (2 * count);
 338	rp = kmalloc(rp_len, GFP_ATOMIC);
 339	if (!rp) {
 340		read_unlock(&hci_dev_list_lock);
 341		return -ENOMEM;
 342	}
 343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 344	rp->num_controllers = cpu_to_le16(count);
 
 
 
 
 
 
 345
 346	i = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347	list_for_each_entry(d, &hci_dev_list, list) {
 348		if (test_bit(HCI_SETUP, &d->dev_flags))
 
 
 349			continue;
 350
 351		rp->index[i++] = cpu_to_le16(d->id);
 352		BT_DBG("Added hci%u", d->id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 353	}
 354
 
 
 355	read_unlock(&hci_dev_list_lock);
 356
 357	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
 358			   rp_len);
 
 
 
 
 
 
 
 
 
 359
 360	kfree(rp);
 361
 362	return err;
 363}
 364
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 365static u32 get_supported_settings(struct hci_dev *hdev)
 366{
 367	u32 settings = 0;
 368
 369	settings |= MGMT_SETTING_POWERED;
 
 
 370	settings |= MGMT_SETTING_CONNECTABLE;
 371	settings |= MGMT_SETTING_FAST_CONNECTABLE;
 372	settings |= MGMT_SETTING_DISCOVERABLE;
 373	settings |= MGMT_SETTING_PAIRABLE;
 374
 375	if (hdev->features[6] & LMP_SIMPLE_PAIR)
 376		settings |= MGMT_SETTING_SSP;
 377
 378	if (!(hdev->features[4] & LMP_NO_BREDR)) {
 379		settings |= MGMT_SETTING_BREDR;
 380		settings |= MGMT_SETTING_LINK_SECURITY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 381	}
 382
 383	if (enable_hs)
 384		settings |= MGMT_SETTING_HS;
 
 
 
 
 
 
 
 385
 386	if (hdev->features[4] & LMP_LE)
 387		settings |= MGMT_SETTING_LE;
 388
 389	return settings;
 390}
 391
 392static u32 get_current_settings(struct hci_dev *hdev)
 393{
 394	u32 settings = 0;
 395
 396	if (hdev_is_powered(hdev))
 397		settings |= MGMT_SETTING_POWERED;
 398
 399	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
 400		settings |= MGMT_SETTING_CONNECTABLE;
 401
 402	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
 
 
 
 403		settings |= MGMT_SETTING_DISCOVERABLE;
 404
 405	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
 406		settings |= MGMT_SETTING_PAIRABLE;
 407
 408	if (!(hdev->features[4] & LMP_NO_BREDR))
 409		settings |= MGMT_SETTING_BREDR;
 410
 411	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
 412		settings |= MGMT_SETTING_LE;
 413
 414	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
 415		settings |= MGMT_SETTING_LINK_SECURITY;
 416
 417	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
 418		settings |= MGMT_SETTING_SSP;
 419
 420	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
 421		settings |= MGMT_SETTING_HS;
 422
 423	return settings;
 424}
 425
 426#define PNP_INFO_SVCLASS_ID		0x1200
 
 427
 428static u8 bluetooth_base_uuid[] = {
 429			0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
 430			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 431};
 432
 433static u16 get_uuid16(u8 *uuid128)
 434{
 435	u32 val;
 436	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437
 438	for (i = 0; i < 12; i++) {
 439		if (bluetooth_base_uuid[i] != uuid128[i])
 440			return 0;
 441	}
 442
 443	val = get_unaligned_le32(&uuid128[12]);
 444	if (val > 0xffff)
 445		return 0;
 446
 447	return (u16) val;
 448}
 449
 450static void create_eir(struct hci_dev *hdev, u8 *data)
 451{
 452	u8 *ptr = data;
 453	u16 eir_len = 0;
 454	u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
 455	int i, truncated = 0;
 456	struct bt_uuid *uuid;
 457	size_t name_len;
 458
 459	name_len = strlen(hdev->dev_name);
 
 460
 461	if (name_len > 0) {
 462		/* EIR Data type */
 463		if (name_len > 48) {
 464			name_len = 48;
 465			ptr[1] = EIR_NAME_SHORT;
 466		} else
 467			ptr[1] = EIR_NAME_COMPLETE;
 468
 469		/* EIR Data length */
 470		ptr[0] = name_len + 1;
 
 
 471
 472		memcpy(ptr + 2, hdev->dev_name, name_len);
 
 
 473
 474		eir_len += (name_len + 2);
 475		ptr += (name_len + 2);
 
 
 
 
 
 
 
 
 
 
 
 
 
 476	}
 477
 478	if (hdev->inq_tx_power) {
 479		ptr[0] = 2;
 480		ptr[1] = EIR_TX_POWER;
 481		ptr[2] = (u8) hdev->inq_tx_power;
 482
 483		eir_len += 3;
 484		ptr += 3;
 485	}
 486
 487	if (hdev->devid_source > 0) {
 488		ptr[0] = 9;
 489		ptr[1] = EIR_DEVICE_ID;
 490
 491		put_unaligned_le16(hdev->devid_source, ptr + 2);
 492		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
 493		put_unaligned_le16(hdev->devid_product, ptr + 6);
 494		put_unaligned_le16(hdev->devid_version, ptr + 8);
 495
 496		eir_len += 10;
 497		ptr += 10;
 498	}
 499
 500	memset(uuid16_list, 0, sizeof(uuid16_list));
 
 501
 502	/* Group all UUID16 types */
 503	list_for_each_entry(uuid, &hdev->uuids, list) {
 504		u16 uuid16;
 
 505
 506		uuid16 = get_uuid16(uuid->uuid);
 507		if (uuid16 == 0)
 508			return;
 509
 510		if (uuid16 < 0x1100)
 511			continue;
 
 
 512
 513		if (uuid16 == PNP_INFO_SVCLASS_ID)
 514			continue;
 515
 516		/* Stop if not enough space to put next UUID */
 517		if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
 518			truncated = 1;
 519			break;
 520		}
 521
 522		/* Check for duplicates */
 523		for (i = 0; uuid16_list[i] != 0; i++)
 524			if (uuid16_list[i] == uuid16)
 525				break;
 
 
 
 
 
 
 
 526
 527		if (uuid16_list[i] == 0) {
 528			uuid16_list[i] = uuid16;
 529			eir_len += sizeof(u16);
 530		}
 531	}
 532
 533	if (uuid16_list[0] != 0) {
 534		u8 *length = ptr;
 535
 536		/* EIR Data type */
 537		ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
 538
 539		ptr += 2;
 540		eir_len += 2;
 541
 542		for (i = 0; uuid16_list[i] != 0; i++) {
 543			*ptr++ = (uuid16_list[i] & 0x00ff);
 544			*ptr++ = (uuid16_list[i] & 0xff00) >> 8;
 545		}
 546
 547		/* EIR Data length */
 548		*length = (i * sizeof(u16)) + 1;
 549	}
 550}
 551
 552static int update_eir(struct hci_dev *hdev)
 553{
 554	struct hci_cp_write_eir cp;
 
 555
 556	if (!hdev_is_powered(hdev))
 557		return 0;
 558
 559	if (!(hdev->features[6] & LMP_EXT_INQ))
 560		return 0;
 561
 562	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
 563		return 0;
 
 
 
 
 
 
 564
 565	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
 566		return 0;
 567
 568	memset(&cp, 0, sizeof(cp));
 569
 570	create_eir(hdev, cp.data);
 571
 572	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
 573		return 0;
 574
 575	memcpy(hdev->eir, cp.data, sizeof(cp.data));
 576
 577	return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 578}
 579
 580static u8 get_service_classes(struct hci_dev *hdev)
 
 581{
 582	struct bt_uuid *uuid;
 583	u8 val = 0;
 584
 585	list_for_each_entry(uuid, &hdev->uuids, list)
 586		val |= uuid->svc_hint;
 
 587
 588	return val;
 589}
 590
 591static int update_class(struct hci_dev *hdev)
 592{
 593	u8 cod[3];
 594	int err;
 595
 596	BT_DBG("%s", hdev->name);
 
 
 597
 598	if (!hdev_is_powered(hdev))
 599		return 0;
 600
 601	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
 602		return 0;
 603
 604	cod[0] = hdev->minor_class;
 605	cod[1] = hdev->major_class;
 606	cod[2] = get_service_classes(hdev);
 
 
 607
 608	if (memcmp(cod, hdev->dev_class, 3) == 0)
 609		return 0;
 610
 611	err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
 612	if (err == 0)
 613		set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
 614
 615	return err;
 
 
 
 616}
 617
 618static void service_cache_off(struct work_struct *work)
 619{
 620	struct hci_dev *hdev = container_of(work, struct hci_dev,
 621					    service_cache.work);
 622
 623	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
 624		return;
 625
 626	hci_dev_lock(hdev);
 627
 628	update_eir(hdev);
 629	update_class(hdev);
 630
 631	hci_dev_unlock(hdev);
 632}
 633
 634static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
 635{
 636	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
 637		return;
 638
 
 
 
 639	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
 
 
 640
 641	/* Non-mgmt controlled devices get this bit set
 642	 * implicitly so that pairing works for them, however
 643	 * for mgmt we require user-space to explicitly enable
 644	 * it
 645	 */
 646	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
 
 
 647}
 648
 649static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
 650				void *data, u16 data_len)
 651{
 652	struct mgmt_rp_read_info rp;
 653
 654	BT_DBG("sock %p %s", sk, hdev->name);
 655
 656	hci_dev_lock(hdev);
 657
 658	memset(&rp, 0, sizeof(rp));
 659
 660	bacpy(&rp.bdaddr, &hdev->bdaddr);
 661
 662	rp.version = hdev->hci_ver;
 663	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
 664
 665	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
 666	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
 667
 668	memcpy(rp.dev_class, hdev->dev_class, 3);
 669
 670	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
 671	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
 672
 673	hci_dev_unlock(hdev);
 674
 675	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
 676			    sizeof(rp));
 677}
 678
 679static void mgmt_pending_free(struct pending_cmd *cmd)
 680{
 681	sock_put(cmd->sk);
 682	kfree(cmd->param);
 683	kfree(cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684}
 685
 686static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
 687					    struct hci_dev *hdev, void *data,
 688					    u16 len)
 689{
 690	struct pending_cmd *cmd;
 
 
 
 
 
 
 691
 692	cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
 693	if (!cmd)
 694		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 695
 696	cmd->opcode = opcode;
 697	cmd->index = hdev->id;
 
 698
 699	cmd->param = kmalloc(len, GFP_ATOMIC);
 700	if (!cmd->param) {
 701		kfree(cmd);
 702		return NULL;
 703	}
 704
 705	if (data)
 706		memcpy(cmd->param, data, len);
 707
 708	cmd->sk = sk;
 709	sock_hold(sk);
 710
 711	list_add(&cmd->list, &hdev->mgmt_pending);
 
 
 
 
 
 
 
 712
 713	return cmd;
 
 714}
 715
 716static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
 717				 void (*cb)(struct pending_cmd *cmd, void *data),
 718				 void *data)
 719{
 720	struct list_head *p, *n;
 
 
 721
 722	list_for_each_safe(p, n, &hdev->mgmt_pending) {
 723		struct pending_cmd *cmd;
 724
 725		cmd = list_entry(p, struct pending_cmd, list);
 
 
 
 726
 727		if (opcode > 0 && cmd->opcode != opcode)
 728			continue;
 
 
 729
 730		cb(cmd, data);
 
 
 
 
 731	}
 732}
 733
 734static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
 
 735{
 736	struct pending_cmd *cmd;
 
 
 
 
 
 
 737
 738	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
 739		if (cmd->opcode == opcode)
 740			return cmd;
 
 
 
 
 
 
 
 
 741	}
 
 
 
 
 
 742
 743	return NULL;
 
 744}
 745
 746static void mgmt_pending_remove(struct pending_cmd *cmd)
 747{
 748	list_del(&cmd->list);
 749	mgmt_pending_free(cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 750}
 751
 752static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
 753{
 754	__le32 settings = cpu_to_le32(get_current_settings(hdev));
 
 
 
 755
 756	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
 757			    sizeof(settings));
 758}
 759
 760static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
 761		       u16 len)
 762{
 763	struct mgmt_mode *cp = data;
 764	struct pending_cmd *cmd;
 765	int err;
 766
 767	BT_DBG("request for %s", hdev->name);
 
 
 
 
 768
 769	hci_dev_lock(hdev);
 770
 771	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
 772		cancel_delayed_work(&hdev->power_off);
 773
 774		if (cp->val) {
 775			err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
 776			mgmt_powered(hdev, 1);
 777			goto failed;
 778		}
 779	}
 780
 781	if (!!cp->val == hdev_is_powered(hdev)) {
 782		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
 783		goto failed;
 784	}
 785
 786	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
 787		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
 788				 MGMT_STATUS_BUSY);
 789		goto failed;
 790	}
 791
 792	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
 793	if (!cmd) {
 794		err = -ENOMEM;
 795		goto failed;
 796	}
 797
 798	if (cp->val)
 799		schedule_work(&hdev->power_on);
 800	else
 801		schedule_work(&hdev->power_off.work);
 
 
 
 
 
 
 802
 803	err = 0;
 
 804
 805failed:
 806	hci_dev_unlock(hdev);
 807	return err;
 808}
 809
 810static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
 811		      struct sock *skip_sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 812{
 813	struct sk_buff *skb;
 814	struct mgmt_hdr *hdr;
 
 815
 816	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
 817	if (!skb)
 818		return -ENOMEM;
 
 
 
 
 
 
 819
 820	hdr = (void *) skb_put(skb, sizeof(*hdr));
 821	hdr->opcode = cpu_to_le16(event);
 822	if (hdev)
 823		hdr->index = cpu_to_le16(hdev->id);
 
 
 824	else
 825		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
 826	hdr->len = cpu_to_le16(data_len);
 
 
 
 
 
 
 
 
 
 
 
 827
 828	if (data)
 829		memcpy(skb_put(skb, data_len), data, data_len);
 830
 831	/* Time stamp */
 832	__net_timestamp(skb);
 
 
 
 
 833
 834	hci_send_to_control(skb, skip_sk);
 835	kfree_skb(skb);
 
 
 
 836
 837	return 0;
 
 
 
 
 
 838}
 839
 840static int new_settings(struct hci_dev *hdev, struct sock *skip)
 841{
 842	__le32 ev;
 843
 844	ev = cpu_to_le32(get_current_settings(hdev));
 845
 846	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
 847}
 848
 849static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
 850			    u16 len)
 851{
 852	struct mgmt_cp_set_discoverable *cp = data;
 853	struct pending_cmd *cmd;
 854	u16 timeout;
 855	u8 scan;
 856	int err;
 857
 858	BT_DBG("request for %s", hdev->name);
 
 
 
 
 
 
 
 
 
 859
 860	timeout = __le16_to_cpu(cp->timeout);
 861	if (!cp->val && timeout > 0)
 862		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 863				  MGMT_STATUS_INVALID_PARAMS);
 
 
 
 
 
 864
 865	hci_dev_lock(hdev);
 866
 867	if (!hdev_is_powered(hdev) && timeout > 0) {
 868		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 869				 MGMT_STATUS_NOT_POWERED);
 
 
 
 
 
 
 
 870		goto failed;
 871	}
 872
 873	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
 874			mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
 875		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 876				 MGMT_STATUS_BUSY);
 877		goto failed;
 878	}
 879
 880	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
 881		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 882				 MGMT_STATUS_REJECTED);
 883		goto failed;
 884	}
 885
 886	if (!hdev_is_powered(hdev)) {
 887		bool changed = false;
 888
 889		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
 890			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
 
 
 
 
 891			changed = true;
 892		}
 893
 894		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
 895		if (err < 0)
 896			goto failed;
 897
 898		if (changed)
 899			err = new_settings(hdev, sk);
 900
 901		goto failed;
 902	}
 903
 904	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
 905		if (hdev->discov_timeout > 0) {
 906			cancel_delayed_work(&hdev->discov_off);
 907			hdev->discov_timeout = 0;
 908		}
 
 
 
 
 909
 910		if (cp->val && timeout > 0) {
 911			hdev->discov_timeout = timeout;
 912			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
 913				msecs_to_jiffies(hdev->discov_timeout * 1000));
 914		}
 915
 916		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
 917		goto failed;
 918	}
 919
 920	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
 921	if (!cmd) {
 922		err = -ENOMEM;
 923		goto failed;
 924	}
 925
 926	scan = SCAN_PAGE;
 
 
 
 
 
 927
 928	if (cp->val)
 929		scan |= SCAN_INQUIRY;
 930	else
 931		cancel_delayed_work(&hdev->discov_off);
 
 
 
 
 
 
 
 
 
 932
 933	err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 934	if (err < 0)
 935		mgmt_pending_remove(cmd);
 936
 937	if (cp->val)
 938		hdev->discov_timeout = timeout;
 939
 940failed:
 941	hci_dev_unlock(hdev);
 942	return err;
 943}
 944
 945static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
 946			   u16 len)
 947{
 948	struct mgmt_mode *cp = data;
 949	struct pending_cmd *cmd;
 950	u8 scan;
 951	int err;
 952
 953	BT_DBG("request for %s", hdev->name);
 
 
 954
 955	hci_dev_lock(hdev);
 956
 957	if (!hdev_is_powered(hdev)) {
 958		bool changed = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 959
 960		if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
 961			changed = true;
 
 
 
 962
 963		if (cp->val) {
 964			set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
 965		} else {
 966			clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
 967			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
 968		}
 969
 970		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
 971		if (err < 0)
 972			goto failed;
 
 
 
 973
 974		if (changed)
 975			err = new_settings(hdev, sk);
 
 976
 977		goto failed;
 
 
 
 978	}
 979
 980	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
 981			mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
 982		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
 983				 MGMT_STATUS_BUSY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 984		goto failed;
 985	}
 986
 987	if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
 988		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
 
 
 989		goto failed;
 990	}
 991
 992	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
 993	if (!cmd) {
 994		err = -ENOMEM;
 995		goto failed;
 996	}
 997
 998	if (cp->val) {
 999		scan = SCAN_PAGE;
1000	} else {
1001		scan = 0;
 
1002
1003		if (test_bit(HCI_ISCAN, &hdev->flags) &&
1004						hdev->discov_timeout > 0)
1005			cancel_delayed_work(&hdev->discov_off);
1006	}
1007
1008	err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 
 
1009	if (err < 0)
1010		mgmt_pending_remove(cmd);
1011
1012failed:
1013	hci_dev_unlock(hdev);
1014	return err;
1015}
1016
1017static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1018			u16 len)
1019{
1020	struct mgmt_mode *cp = data;
 
1021	int err;
1022
1023	BT_DBG("request for %s", hdev->name);
 
 
 
 
1024
1025	hci_dev_lock(hdev);
1026
1027	if (cp->val)
1028		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1029	else
1030		clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1031
1032	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1033	if (err < 0)
1034		goto failed;
 
 
 
 
 
 
1035
1036	err = new_settings(hdev, sk);
 
1037
1038failed:
1039	hci_dev_unlock(hdev);
1040	return err;
1041}
1042
1043static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1044			     u16 len)
1045{
1046	struct mgmt_mode *cp = data;
1047	struct pending_cmd *cmd;
1048	u8 val;
1049	int err;
1050
1051	BT_DBG("request for %s", hdev->name);
 
 
 
 
 
 
 
 
 
1052
1053	hci_dev_lock(hdev);
1054
1055	if (!hdev_is_powered(hdev)) {
1056		bool changed = false;
1057
1058		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1059							&hdev->dev_flags)) {
1060			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1061			changed = true;
1062		}
1063
1064		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1065		if (err < 0)
1066			goto failed;
1067
1068		if (changed)
1069			err = new_settings(hdev, sk);
1070
1071		goto failed;
1072	}
1073
1074	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1075		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1076				 MGMT_STATUS_BUSY);
1077		goto failed;
1078	}
1079
1080	val = !!cp->val;
1081
1082	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1083		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1084		goto failed;
1085	}
1086
1087	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1088	if (!cmd) {
1089		err = -ENOMEM;
1090		goto failed;
1091	}
1092
1093	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1094	if (err < 0) {
1095		mgmt_pending_remove(cmd);
1096		goto failed;
1097	}
1098
1099failed:
1100	hci_dev_unlock(hdev);
1101	return err;
1102}
1103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1104static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1105{
1106	struct mgmt_mode *cp = data;
1107	struct pending_cmd *cmd;
1108	u8 val;
1109	int err;
1110
1111	BT_DBG("request for %s", hdev->name);
 
 
 
 
 
 
 
 
 
 
 
 
1112
1113	hci_dev_lock(hdev);
1114
1115	if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1116		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1117				 MGMT_STATUS_NOT_SUPPORTED);
1118		goto failed;
1119	}
1120
1121	val = !!cp->val;
1122
1123	if (!hdev_is_powered(hdev)) {
1124		bool changed = false;
1125
1126		if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1127			change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1128			changed = true;
 
 
 
 
 
 
 
 
1129		}
1130
1131		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1132		if (err < 0)
1133			goto failed;
1134
1135		if (changed)
1136			err = new_settings(hdev, sk);
1137
1138		goto failed;
1139	}
1140
1141	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1142		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1143				 MGMT_STATUS_BUSY);
1144		goto failed;
1145	}
1146
1147	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1148		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1149		goto failed;
1150	}
1151
1152	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1153	if (!cmd) {
1154		err = -ENOMEM;
1155		goto failed;
1156	}
 
1157
1158	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1159	if (err < 0) {
1160		mgmt_pending_remove(cmd);
1161		goto failed;
 
 
 
1162	}
1163
1164failed:
1165	hci_dev_unlock(hdev);
1166	return err;
1167}
1168
1169static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1170{
1171	struct mgmt_mode *cp = data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1172
1173	BT_DBG("request for %s", hdev->name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174
1175	if (!enable_hs)
1176		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1177				  MGMT_STATUS_NOT_SUPPORTED);
1178
1179	if (cp->val)
1180		set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
 
 
1181	else
1182		clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1183
1184	return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185}
1186
1187static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1188{
1189	struct mgmt_mode *cp = data;
1190	struct hci_cp_write_le_host_supported hci_cp;
1191	struct pending_cmd *cmd;
1192	int err;
1193	u8 val, enabled;
1194
1195	BT_DBG("request for %s", hdev->name);
1196
1197	hci_dev_lock(hdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1198
1199	if (!(hdev->features[4] & LMP_LE)) {
1200		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1201				 MGMT_STATUS_NOT_SUPPORTED);
1202		goto unlock;
1203	}
1204
 
 
1205	val = !!cp->val;
1206	enabled = !!(hdev->host_features[0] & LMP_HOST_LE);
1207
1208	if (!hdev_is_powered(hdev) || val == enabled) {
1209		bool changed = false;
1210
1211		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1212			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
 
 
 
 
 
1213			changed = true;
1214		}
1215
1216		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1217		if (err < 0)
1218			goto unlock;
1219
1220		if (changed)
1221			err = new_settings(hdev, sk);
1222
1223		goto unlock;
1224	}
1225
1226	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1227		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1228				 MGMT_STATUS_BUSY);
 
1229		goto unlock;
1230	}
1231
1232	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1233	if (!cmd) {
1234		err = -ENOMEM;
1235		goto unlock;
1236	}
 
1237
1238	memset(&hci_cp, 0, sizeof(hci_cp));
 
 
1239
1240	if (val) {
1241		hci_cp.le = val;
1242		hci_cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
1243	}
1244
1245	err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1246			   &hci_cp);
1247	if (err < 0)
1248		mgmt_pending_remove(cmd);
1249
1250unlock:
1251	hci_dev_unlock(hdev);
1252	return err;
1253}
1254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1256{
1257	struct mgmt_cp_add_uuid *cp = data;
1258	struct pending_cmd *cmd;
1259	struct bt_uuid *uuid;
1260	int err;
1261
1262	BT_DBG("request for %s", hdev->name);
1263
1264	hci_dev_lock(hdev);
1265
1266	if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1267		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1268				 MGMT_STATUS_BUSY);
1269		goto failed;
1270	}
1271
1272	uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
1273	if (!uuid) {
1274		err = -ENOMEM;
1275		goto failed;
1276	}
1277
1278	memcpy(uuid->uuid, cp->uuid, 16);
1279	uuid->svc_hint = cp->svc_hint;
 
1280
1281	list_add(&uuid->list, &hdev->uuids);
1282
1283	err = update_class(hdev);
1284	if (err < 0)
 
1285		goto failed;
 
1286
1287	err = update_eir(hdev);
1288	if (err < 0)
1289		goto failed;
1290
1291	if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1292		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1293				   hdev->dev_class, 3);
1294		goto failed;
1295	}
1296
1297	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1298	if (!cmd)
1299		err = -ENOMEM;
1300
1301failed:
1302	hci_dev_unlock(hdev);
1303	return err;
1304}
1305
1306static bool enable_service_cache(struct hci_dev *hdev)
1307{
1308	if (!hdev_is_powered(hdev))
1309		return false;
1310
1311	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1312		schedule_delayed_work(&hdev->service_cache, CACHE_TIMEOUT);
 
1313		return true;
1314	}
1315
1316	return false;
1317}
1318
 
 
 
 
 
 
 
 
 
 
 
1319static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1320								u16 len)
1321{
1322	struct mgmt_cp_remove_uuid *cp = data;
1323	struct pending_cmd *cmd;
1324	struct list_head *p, *n;
1325	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
 
 
1326	int err, found;
1327
1328	BT_DBG("request for %s", hdev->name);
1329
1330	hci_dev_lock(hdev);
1331
1332	if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1333		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1334				 MGMT_STATUS_BUSY);
1335		goto unlock;
1336	}
1337
1338	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1339		err = hci_uuids_clear(hdev);
1340
1341		if (enable_service_cache(hdev)) {
1342			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1343					   0, hdev->dev_class, 3);
 
1344			goto unlock;
1345		}
1346
1347		goto update_class;
1348	}
1349
1350	found = 0;
1351
1352	list_for_each_safe(p, n, &hdev->uuids) {
1353		struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
1354
1355		if (memcmp(match->uuid, cp->uuid, 16) != 0)
1356			continue;
1357
1358		list_del(&match->list);
 
1359		found++;
1360	}
1361
1362	if (found == 0) {
1363		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1364				 MGMT_STATUS_INVALID_PARAMS);
1365		goto unlock;
1366	}
1367
1368update_class:
1369	err = update_class(hdev);
1370	if (err < 0)
 
1371		goto unlock;
 
1372
1373	err = update_eir(hdev);
 
1374	if (err < 0)
1375		goto unlock;
 
 
 
 
 
 
 
 
 
1376
1377	if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1378		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1379				   hdev->dev_class, 3);
1380		goto unlock;
1381	}
1382
1383	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1384	if (!cmd)
1385		err = -ENOMEM;
1386
1387unlock:
1388	hci_dev_unlock(hdev);
1389	return err;
1390}
1391
1392static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1393			 u16 len)
1394{
1395	struct mgmt_cp_set_dev_class *cp = data;
1396	struct pending_cmd *cmd;
1397	int err;
1398
1399	BT_DBG("request for %s", hdev->name);
 
 
 
 
1400
1401	hci_dev_lock(hdev);
1402
1403	if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1404		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1405				 MGMT_STATUS_BUSY);
 
 
 
 
 
 
1406		goto unlock;
1407	}
1408
1409	hdev->major_class = cp->major;
1410	hdev->minor_class = cp->minor;
1411
1412	if (!hdev_is_powered(hdev)) {
1413		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1414				   hdev->dev_class, 3);
1415		goto unlock;
1416	}
1417
1418	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1419		hci_dev_unlock(hdev);
1420		cancel_delayed_work_sync(&hdev->service_cache);
1421		hci_dev_lock(hdev);
1422		update_eir(hdev);
1423	}
1424
1425	err = update_class(hdev);
 
1426	if (err < 0)
1427		goto unlock;
1428
1429	if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1430		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1431				   hdev->dev_class, 3);
1432		goto unlock;
1433	}
1434
1435	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1436	if (!cmd)
1437		err = -ENOMEM;
1438
1439unlock:
1440	hci_dev_unlock(hdev);
1441	return err;
1442}
1443
1444static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1445								u16 len)
1446{
1447	struct mgmt_cp_load_link_keys *cp = data;
 
 
1448	u16 key_count, expected_len;
 
1449	int i;
1450
 
 
 
 
 
 
1451	key_count = __le16_to_cpu(cp->key_count);
 
 
 
 
 
 
1452
1453	expected_len = sizeof(*cp) + key_count *
1454					sizeof(struct mgmt_link_key_info);
1455	if (expected_len != len) {
1456		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1457							len, expected_len);
1458		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1459				  MGMT_STATUS_INVALID_PARAMS);
1460	}
1461
1462	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1463								key_count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464
1465	hci_dev_lock(hdev);
1466
1467	hci_link_keys_clear(hdev);
1468
1469	set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1470
1471	if (cp->debug_keys)
1472		set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1473	else
1474		clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
 
 
 
 
1475
1476	for (i = 0; i < key_count; i++) {
1477		struct mgmt_link_key_info *key = &cp->keys[i];
1478
1479		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1480				 key->type, key->pin_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1481	}
1482
1483	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1484
1485	hci_dev_unlock(hdev);
1486
1487	return 0;
1488}
1489
1490static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1491			   u8 addr_type, struct sock *skip_sk)
1492{
1493	struct mgmt_ev_device_unpaired ev;
1494
1495	bacpy(&ev.addr.bdaddr, bdaddr);
1496	ev.addr.type = addr_type;
1497
1498	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1499			  skip_sk);
1500}
1501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1502static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1503			 u16 len)
1504{
1505	struct mgmt_cp_unpair_device *cp = data;
1506	struct mgmt_rp_unpair_device rp;
1507	struct hci_cp_disconnect dc;
1508	struct pending_cmd *cmd;
1509	struct hci_conn *conn;
 
1510	int err;
1511
1512	hci_dev_lock(hdev);
1513
1514	memset(&rp, 0, sizeof(rp));
1515	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1516	rp.addr.type = cp->addr.type;
1517
 
 
 
 
 
 
 
 
 
 
 
 
1518	if (!hdev_is_powered(hdev)) {
1519		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1520				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
 
1521		goto unlock;
1522	}
1523
1524	if (cp->addr.type == BDADDR_BREDR)
 
 
 
 
 
 
 
 
 
 
 
 
 
1525		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1526	else
1527		err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
 
 
 
 
 
 
 
 
 
 
 
1528
 
 
1529	if (err < 0) {
1530		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1531				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
 
1532		goto unlock;
1533	}
1534
1535	if (cp->disconnect) {
1536		if (cp->addr.type == BDADDR_BREDR)
1537			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1538							&cp->addr.bdaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
1539		else
1540			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1541							&cp->addr.bdaddr);
1542	} else {
 
 
 
 
1543		conn = NULL;
1544	}
1545
 
 
 
 
1546	if (!conn) {
1547		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1548				   &rp, sizeof(rp));
1549		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1550		goto unlock;
1551	}
1552
1553	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1554			       sizeof(*cp));
1555	if (!cmd) {
1556		err = -ENOMEM;
1557		goto unlock;
1558	}
1559
1560	dc.handle = cpu_to_le16(conn->handle);
1561	dc.reason = 0x13; /* Remote User Terminated Connection */
1562	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
 
1563	if (err < 0)
1564		mgmt_pending_remove(cmd);
1565
1566unlock:
1567	hci_dev_unlock(hdev);
1568	return err;
1569}
1570
1571static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1572		      u16 len)
1573{
1574	struct mgmt_cp_disconnect *cp = data;
1575	struct hci_cp_disconnect dc;
1576	struct pending_cmd *cmd;
1577	struct hci_conn *conn;
1578	int err;
1579
1580	BT_DBG("");
 
 
 
 
 
 
 
 
 
1581
1582	hci_dev_lock(hdev);
1583
1584	if (!test_bit(HCI_UP, &hdev->flags)) {
1585		err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1586				 MGMT_STATUS_NOT_POWERED);
 
1587		goto failed;
1588	}
1589
1590	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1591		err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1592				 MGMT_STATUS_BUSY);
1593		goto failed;
1594	}
1595
1596	if (cp->addr.type == BDADDR_BREDR)
1597		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
 
1598	else
1599		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 
1600
1601	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1602		err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1603				 MGMT_STATUS_NOT_CONNECTED);
 
1604		goto failed;
1605	}
1606
1607	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1608	if (!cmd) {
1609		err = -ENOMEM;
1610		goto failed;
1611	}
1612
1613	dc.handle = cpu_to_le16(conn->handle);
1614	dc.reason = 0x13; /* Remote User Terminated Connection */
1615
1616	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1617	if (err < 0)
1618		mgmt_pending_remove(cmd);
1619
1620failed:
1621	hci_dev_unlock(hdev);
1622	return err;
1623}
1624
1625static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1626{
1627	switch (link_type) {
1628	case LE_LINK:
1629		switch (addr_type) {
1630		case ADDR_LE_DEV_PUBLIC:
1631			return BDADDR_LE_PUBLIC;
1632
1633		default:
1634			/* Fallback to LE Random address type */
1635			return BDADDR_LE_RANDOM;
1636		}
1637
1638	default:
1639		/* Fallback to BR/EDR type */
1640		return BDADDR_BREDR;
1641	}
1642}
1643
1644static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1645			   u16 data_len)
1646{
1647	struct mgmt_rp_get_connections *rp;
1648	struct hci_conn *c;
1649	size_t rp_len;
1650	int err;
1651	u16 i;
1652
1653	BT_DBG("");
1654
1655	hci_dev_lock(hdev);
1656
1657	if (!hdev_is_powered(hdev)) {
1658		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1659				 MGMT_STATUS_NOT_POWERED);
1660		goto unlock;
1661	}
1662
1663	i = 0;
1664	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1665		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1666			i++;
1667	}
1668
1669	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1670	rp = kmalloc(rp_len, GFP_ATOMIC);
1671	if (!rp) {
1672		err = -ENOMEM;
1673		goto unlock;
1674	}
1675
1676	i = 0;
1677	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1678		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1679			continue;
1680		bacpy(&rp->addr[i].bdaddr, &c->dst);
1681		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1682		if (c->type == SCO_LINK || c->type == ESCO_LINK)
1683			continue;
1684		i++;
1685	}
1686
1687	rp->conn_count = cpu_to_le16(i);
1688
1689	/* Recalculate length in case of filtered SCO connections, etc */
1690	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1691
1692	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1693			   rp_len);
1694
1695	kfree(rp);
1696
1697unlock:
1698	hci_dev_unlock(hdev);
1699	return err;
1700}
1701
1702static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1703				   struct mgmt_cp_pin_code_neg_reply *cp)
1704{
1705	struct pending_cmd *cmd;
1706	int err;
1707
1708	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1709			       sizeof(*cp));
1710	if (!cmd)
1711		return -ENOMEM;
1712
 
 
1713	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1714			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1715	if (err < 0)
1716		mgmt_pending_remove(cmd);
1717
1718	return err;
1719}
1720
1721static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1722			  u16 len)
1723{
1724	struct hci_conn *conn;
1725	struct mgmt_cp_pin_code_reply *cp = data;
1726	struct hci_cp_pin_code_reply reply;
1727	struct pending_cmd *cmd;
1728	int err;
1729
1730	BT_DBG("");
1731
1732	hci_dev_lock(hdev);
1733
1734	if (!hdev_is_powered(hdev)) {
1735		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1736				 MGMT_STATUS_NOT_POWERED);
1737		goto failed;
1738	}
1739
1740	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1741	if (!conn) {
1742		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1743				 MGMT_STATUS_NOT_CONNECTED);
1744		goto failed;
1745	}
1746
1747	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1748		struct mgmt_cp_pin_code_neg_reply ncp;
1749
1750		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1751
1752		BT_ERR("PIN code is not 16 bytes long");
1753
1754		err = send_pin_code_neg_reply(sk, hdev, &ncp);
1755		if (err >= 0)
1756			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1757					 MGMT_STATUS_INVALID_PARAMS);
1758
1759		goto failed;
1760	}
1761
1762	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
1763	if (!cmd) {
1764		err = -ENOMEM;
1765		goto failed;
1766	}
1767
 
 
1768	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
1769	reply.pin_len = cp->pin_len;
1770	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1771
1772	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
1773	if (err < 0)
1774		mgmt_pending_remove(cmd);
1775
1776failed:
1777	hci_dev_unlock(hdev);
1778	return err;
1779}
1780
1781static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1782			      void *data, u16 len)
1783{
1784	struct mgmt_cp_pin_code_neg_reply *cp = data;
1785	int err;
1786
1787	BT_DBG("");
1788
1789	hci_dev_lock(hdev);
1790
1791	if (!hdev_is_powered(hdev)) {
1792		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
1793				 MGMT_STATUS_NOT_POWERED);
1794		goto failed;
1795	}
1796
1797	err = send_pin_code_neg_reply(sk, hdev, cp);
1798
1799failed:
1800	hci_dev_unlock(hdev);
1801	return err;
1802}
1803
1804static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1805			     u16 len)
1806{
1807	struct mgmt_cp_set_io_capability *cp = data;
1808
1809	BT_DBG("");
 
 
 
 
1810
1811	hci_dev_lock(hdev);
1812
1813	hdev->io_capability = cp->io_capability;
1814
1815	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1816							hdev->io_capability);
1817
1818	hci_dev_unlock(hdev);
1819
1820	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
1821			    0);
1822}
1823
1824static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
1825{
1826	struct hci_dev *hdev = conn->hdev;
1827	struct pending_cmd *cmd;
1828
1829	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1830		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
1831			continue;
1832
1833		if (cmd->user_data != conn)
1834			continue;
1835
1836		return cmd;
1837	}
1838
1839	return NULL;
1840}
1841
1842static void pairing_complete(struct pending_cmd *cmd, u8 status)
1843{
1844	struct mgmt_rp_pair_device rp;
1845	struct hci_conn *conn = cmd->user_data;
 
1846
1847	bacpy(&rp.addr.bdaddr, &conn->dst);
1848	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
1849
1850	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
1851		     &rp, sizeof(rp));
1852
1853	/* So we don't get further callbacks for this connection */
1854	conn->connect_cfm_cb = NULL;
1855	conn->security_cfm_cb = NULL;
1856	conn->disconn_cfm_cb = NULL;
1857
 
 
 
 
 
 
 
1858	hci_conn_put(conn);
1859
1860	mgmt_pending_remove(cmd);
 
 
 
 
 
 
 
 
 
 
 
 
1861}
1862
1863static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1864{
1865	struct pending_cmd *cmd;
1866
1867	BT_DBG("status %u", status);
1868
1869	cmd = find_pairing(conn);
1870	if (!cmd)
1871		BT_DBG("Unable to find a pending command");
1872	else
1873		pairing_complete(cmd, mgmt_status(status));
 
 
 
1874}
1875
1876static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
1877{
1878	struct pending_cmd *cmd;
1879
1880	BT_DBG("status %u", status);
1881
1882	if (!status)
1883		return;
1884
1885	cmd = find_pairing(conn);
1886	if (!cmd)
1887		BT_DBG("Unable to find a pending command");
1888	else
1889		pairing_complete(cmd, mgmt_status(status));
 
 
 
1890}
1891
1892static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1893		       u16 len)
1894{
1895	struct mgmt_cp_pair_device *cp = data;
1896	struct mgmt_rp_pair_device rp;
1897	struct pending_cmd *cmd;
1898	u8 sec_level, auth_type;
1899	struct hci_conn *conn;
1900	int err;
1901
1902	BT_DBG("");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1903
1904	hci_dev_lock(hdev);
1905
1906	if (!hdev_is_powered(hdev)) {
1907		err = cmd_status(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1908				 MGMT_STATUS_NOT_POWERED);
 
 
 
 
 
 
 
 
1909		goto unlock;
1910	}
1911
1912	sec_level = BT_SECURITY_MEDIUM;
1913	if (cp->io_cap == 0x03)
1914		auth_type = HCI_AT_DEDICATED_BONDING;
1915	else
1916		auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1917
1918	if (cp->addr.type == BDADDR_BREDR)
1919		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
1920				   cp->addr.type, sec_level, auth_type);
1921	else
1922		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
1923				   cp->addr.type, sec_level, auth_type);
1924
1925	memset(&rp, 0, sizeof(rp));
1926	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1927	rp.addr.type = cp->addr.type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1928
1929	if (IS_ERR(conn)) {
1930		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1931				   MGMT_STATUS_CONNECT_FAILED, &rp,
1932				   sizeof(rp));
 
 
 
 
 
 
 
 
 
 
1933		goto unlock;
1934	}
1935
1936	if (conn->connect_cfm_cb) {
1937		hci_conn_put(conn);
1938		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1939				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
1940		goto unlock;
1941	}
1942
1943	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
1944	if (!cmd) {
1945		err = -ENOMEM;
1946		hci_conn_put(conn);
1947		goto unlock;
1948	}
1949
 
 
1950	/* For LE, just connecting isn't a proof that the pairing finished */
1951	if (cp->addr.type == BDADDR_BREDR)
1952		conn->connect_cfm_cb = pairing_complete_cb;
1953	else
1954		conn->connect_cfm_cb = le_connect_complete_cb;
 
 
 
 
 
1955
1956	conn->security_cfm_cb = pairing_complete_cb;
1957	conn->disconn_cfm_cb = pairing_complete_cb;
1958	conn->io_capability = cp->io_cap;
1959	cmd->user_data = conn;
1960
1961	if (conn->state == BT_CONNECTED &&
1962				hci_conn_security(conn, sec_level, auth_type))
1963		pairing_complete(cmd, 0);
 
 
1964
1965	err = 0;
1966
1967unlock:
1968	hci_dev_unlock(hdev);
1969	return err;
1970}
1971
1972static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1973			      u16 len)
1974{
1975	struct mgmt_addr_info *addr = data;
1976	struct pending_cmd *cmd;
1977	struct hci_conn *conn;
1978	int err;
1979
1980	BT_DBG("");
1981
1982	hci_dev_lock(hdev);
1983
1984	if (!hdev_is_powered(hdev)) {
1985		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1986				 MGMT_STATUS_NOT_POWERED);
1987		goto unlock;
1988	}
1989
1990	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
1991	if (!cmd) {
1992		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1993				 MGMT_STATUS_INVALID_PARAMS);
1994		goto unlock;
1995	}
1996
1997	conn = cmd->user_data;
1998
1999	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2000		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2001				 MGMT_STATUS_INVALID_PARAMS);
2002		goto unlock;
2003	}
2004
2005	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2006
2007	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2008			   addr, sizeof(*addr));
2009unlock:
2010	hci_dev_unlock(hdev);
2011	return err;
2012}
2013
2014static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2015			     bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2016			     u16 hci_op, __le32 passkey)
2017{
2018	struct pending_cmd *cmd;
2019	struct hci_conn *conn;
2020	int err;
2021
2022	hci_dev_lock(hdev);
2023
2024	if (!hdev_is_powered(hdev)) {
2025		err = cmd_status(sk, hdev->id, mgmt_op,
2026				 MGMT_STATUS_NOT_POWERED);
 
2027		goto done;
2028	}
2029
2030	if (type == BDADDR_BREDR)
2031		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2032	else
2033		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
 
2034
2035	if (!conn) {
2036		err = cmd_status(sk, hdev->id, mgmt_op,
2037				 MGMT_STATUS_NOT_CONNECTED);
 
2038		goto done;
2039	}
2040
2041	if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2042		/* Continue with pairing via SMP */
2043		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2044
2045		if (!err)
2046			err = cmd_status(sk, hdev->id, mgmt_op,
2047					 MGMT_STATUS_SUCCESS);
 
2048		else
2049			err = cmd_status(sk, hdev->id, mgmt_op,
2050					 MGMT_STATUS_FAILED);
 
2051
2052		goto done;
2053	}
2054
2055	cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2056	if (!cmd) {
2057		err = -ENOMEM;
2058		goto done;
2059	}
2060
 
 
2061	/* Continue with pairing via HCI */
2062	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2063		struct hci_cp_user_passkey_reply cp;
2064
2065		bacpy(&cp.bdaddr, bdaddr);
2066		cp.passkey = passkey;
2067		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2068	} else
2069		err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
 
2070
2071	if (err < 0)
2072		mgmt_pending_remove(cmd);
2073
2074done:
2075	hci_dev_unlock(hdev);
2076	return err;
2077}
2078
 
 
 
 
 
 
 
 
 
 
 
 
2079static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2080			      u16 len)
2081{
2082	struct mgmt_cp_user_confirm_reply *cp = data;
2083
2084	BT_DBG("");
2085
2086	if (len != sizeof(*cp))
2087		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2088				  MGMT_STATUS_INVALID_PARAMS);
2089
2090	return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2091				 MGMT_OP_USER_CONFIRM_REPLY,
2092				 HCI_OP_USER_CONFIRM_REPLY, 0);
2093}
2094
2095static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2096				  void *data, u16 len)
2097{
2098	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2099
2100	BT_DBG("");
2101
2102	return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2103				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2104				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2105}
2106
2107static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2108			      u16 len)
2109{
2110	struct mgmt_cp_user_passkey_reply *cp = data;
2111
2112	BT_DBG("");
2113
2114	return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2115				 MGMT_OP_USER_PASSKEY_REPLY,
2116				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2117}
2118
2119static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2120				  void *data, u16 len)
2121{
2122	struct mgmt_cp_user_passkey_neg_reply *cp = data;
2123
2124	BT_DBG("");
2125
2126	return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2127				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2128				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2129}
2130
2131static int update_name(struct hci_dev *hdev, const char *name)
2132{
2133	struct hci_cp_write_local_name cp;
2134
2135	memcpy(cp.name, name, sizeof(cp.name));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2136
2137	return hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2138}
2139
2140static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2141			  u16 len)
2142{
2143	struct mgmt_cp_set_local_name *cp = data;
2144	struct pending_cmd *cmd;
2145	int err;
2146
2147	BT_DBG("");
2148
2149	hci_dev_lock(hdev);
2150
 
 
 
 
 
 
 
 
 
 
 
2151	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2152
2153	if (!hdev_is_powered(hdev)) {
2154		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2155
2156		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2157				   data, len);
2158		if (err < 0)
2159			goto failed;
2160
2161		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2162				 sk);
 
2163
2164		goto failed;
2165	}
2166
2167	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2168	if (!cmd) {
2169		err = -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
2170		goto failed;
2171	}
2172
2173	err = update_name(hdev, cp->name);
2174	if (err < 0)
2175		mgmt_pending_remove(cmd);
2176
2177failed:
2178	hci_dev_unlock(hdev);
2179	return err;
2180}
2181
2182static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2183			       void *data, u16 data_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2184{
2185	struct pending_cmd *cmd;
 
 
 
 
2186	int err;
2187
2188	BT_DBG("%s", hdev->name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2189
2190	hci_dev_lock(hdev);
2191
2192	if (!hdev_is_powered(hdev)) {
2193		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2194				 MGMT_STATUS_NOT_POWERED);
 
2195		goto unlock;
2196	}
2197
2198	if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
2199		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2200				 MGMT_STATUS_NOT_SUPPORTED);
 
2201		goto unlock;
2202	}
2203
2204	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2205		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2206				 MGMT_STATUS_BUSY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2207		goto unlock;
2208	}
2209
2210	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2211	if (!cmd) {
 
2212		err = -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2213		goto unlock;
2214	}
2215
2216	err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
 
 
 
 
 
 
 
2217	if (err < 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2218		mgmt_pending_remove(cmd);
2219
 
 
 
 
 
 
 
 
 
 
 
 
2220unlock:
2221	hci_dev_unlock(hdev);
2222	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2223}
2224
2225static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2226			       void *data, u16 len)
2227{
2228	struct mgmt_cp_add_remote_oob_data *cp = data;
2229	u8 status;
2230	int err;
2231
2232	BT_DBG("%s ", hdev->name);
2233
2234	hci_dev_lock(hdev);
2235
2236	if (!hdev_is_powered(hdev)) {
2237		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
2238				   MGMT_STATUS_NOT_POWERED, &cp->addr,
2239				   sizeof(cp->addr));
2240		goto unlock;
2241	}
2242
2243	err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2244				      cp->randomizer);
2245	if (err < 0)
2246		status = MGMT_STATUS_FAILED;
 
 
 
 
 
2247	else
2248		status = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2249
2250	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2251			   &cp->addr, sizeof(cp->addr));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2252
2253unlock:
2254	hci_dev_unlock(hdev);
2255	return err;
2256}
2257
2258static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2259						void *data, u16 len)
2260{
2261	struct mgmt_cp_remove_remote_oob_data *cp = data;
2262	u8 status;
2263	int err;
2264
2265	BT_DBG("%s", hdev->name);
 
 
 
 
 
 
2266
2267	hci_dev_lock(hdev);
2268
2269	if (!hdev_is_powered(hdev)) {
2270		err = cmd_complete(sk, hdev->id,
2271				   MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2272				   MGMT_STATUS_NOT_POWERED, &cp->addr,
2273				   sizeof(cp->addr));
2274		goto unlock;
2275	}
2276
2277	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2278	if (err < 0)
2279		status = MGMT_STATUS_INVALID_PARAMS;
2280	else
2281		status = 0;
2282
2283	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2284			   status, &cp->addr, sizeof(cp->addr));
 
2285
2286unlock:
2287	hci_dev_unlock(hdev);
2288	return err;
2289}
2290
2291int mgmt_interleaved_discovery(struct hci_dev *hdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2292{
 
 
 
2293	int err;
2294
2295	BT_DBG("%s", hdev->name);
2296
2297	hci_dev_lock(hdev);
2298
2299	err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2300	if (err < 0)
2301		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2302
 
2303	hci_dev_unlock(hdev);
2304
2305	return err;
2306}
2307
2308static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2309			   void *data, u16 len)
2310{
2311	struct mgmt_cp_start_discovery *cp = data;
2312	struct pending_cmd *cmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2313	int err;
2314
2315	BT_DBG("%s", hdev->name);
2316
2317	hci_dev_lock(hdev);
2318
2319	if (!hdev_is_powered(hdev)) {
2320		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2321				 MGMT_STATUS_NOT_POWERED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2322		goto failed;
2323	}
2324
2325	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2326		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2327				 MGMT_STATUS_BUSY);
 
 
 
 
 
2328		goto failed;
2329	}
2330
2331	if (hdev->discovery.state != DISCOVERY_STOPPED) {
2332		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2333				 MGMT_STATUS_BUSY);
 
 
 
 
 
 
 
 
 
 
 
 
2334		goto failed;
2335	}
2336
2337	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
 
2338	if (!cmd) {
2339		err = -ENOMEM;
2340		goto failed;
2341	}
2342
 
 
 
 
 
 
2343	hdev->discovery.type = cp->type;
 
 
2344
2345	switch (hdev->discovery.type) {
2346	case DISCOV_TYPE_BREDR:
2347		if (lmp_bredr_capable(hdev))
2348			err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2349		else
2350			err = -ENOTSUPP;
2351		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2352
2353	case DISCOV_TYPE_LE:
2354		if (lmp_host_le_capable(hdev))
2355			err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2356					  LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2357		else
2358			err = -ENOTSUPP;
2359		break;
2360
2361	case DISCOV_TYPE_INTERLEAVED:
2362		if (lmp_host_le_capable(hdev) && lmp_bredr_capable(hdev))
2363			err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2364					  LE_SCAN_WIN,
2365					  LE_SCAN_TIMEOUT_BREDR_LE);
2366		else
2367			err = -ENOTSUPP;
2368		break;
2369
2370	default:
2371		err = -EINVAL;
2372	}
2373
2374	if (err < 0)
 
 
2375		mgmt_pending_remove(cmd);
2376	else
2377		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2378
2379failed:
2380	hci_dev_unlock(hdev);
2381	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2382}
2383
2384static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2385			  u16 len)
2386{
2387	struct mgmt_cp_stop_discovery *mgmt_cp = data;
2388	struct pending_cmd *cmd;
2389	struct hci_cp_remote_name_req_cancel cp;
2390	struct inquiry_entry *e;
2391	int err;
2392
2393	BT_DBG("%s", hdev->name);
2394
2395	hci_dev_lock(hdev);
2396
2397	if (!hci_discovery_active(hdev)) {
2398		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2399				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
2400				   sizeof(mgmt_cp->type));
2401		goto unlock;
2402	}
2403
2404	if (hdev->discovery.type != mgmt_cp->type) {
2405		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2406				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2407				   sizeof(mgmt_cp->type));
2408		goto unlock;
2409	}
2410
2411	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2412	if (!cmd) {
2413		err = -ENOMEM;
2414		goto unlock;
2415	}
2416
2417	switch (hdev->discovery.state) {
2418	case DISCOVERY_FINDING:
2419		if (test_bit(HCI_INQUIRY, &hdev->flags))
2420			err = hci_cancel_inquiry(hdev);
2421		else
2422			err = hci_cancel_le_scan(hdev);
2423
2424		break;
2425
2426	case DISCOVERY_RESOLVING:
2427		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2428							NAME_PENDING);
2429		if (!e) {
2430			mgmt_pending_remove(cmd);
2431			err = cmd_complete(sk, hdev->id,
2432					   MGMT_OP_STOP_DISCOVERY, 0,
2433					   &mgmt_cp->type,
2434					   sizeof(mgmt_cp->type));
2435			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2436			goto unlock;
2437		}
2438
2439		bacpy(&cp.bdaddr, &e->data.bdaddr);
2440		err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2441				   sizeof(cp), &cp);
2442
2443		break;
2444
2445	default:
2446		BT_DBG("unknown discovery state %u", hdev->discovery.state);
2447		err = -EFAULT;
2448	}
2449
2450	if (err < 0)
2451		mgmt_pending_remove(cmd);
2452	else
2453		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2454
2455unlock:
2456	hci_dev_unlock(hdev);
2457	return err;
2458}
2459
2460static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2461			u16 len)
2462{
2463	struct mgmt_cp_confirm_name *cp = data;
2464	struct inquiry_entry *e;
2465	int err;
2466
2467	BT_DBG("%s", hdev->name);
2468
2469	hci_dev_lock(hdev);
2470
2471	if (!hci_discovery_active(hdev)) {
2472		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2473				 MGMT_STATUS_FAILED);
 
2474		goto failed;
2475	}
2476
2477	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2478	if (!e) {
2479		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2480				 MGMT_STATUS_INVALID_PARAMS);
 
2481		goto failed;
2482	}
2483
2484	if (cp->name_known) {
2485		e->name_state = NAME_KNOWN;
2486		list_del(&e->list);
2487	} else {
2488		e->name_state = NAME_NEEDED;
2489		hci_inquiry_cache_update_resolve(hdev, e);
2490	}
2491
2492	err = 0;
 
2493
2494failed:
2495	hci_dev_unlock(hdev);
2496	return err;
2497}
2498
2499static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2500			u16 len)
2501{
2502	struct mgmt_cp_block_device *cp = data;
2503	u8 status;
2504	int err;
2505
2506	BT_DBG("%s", hdev->name);
 
 
 
 
 
2507
2508	hci_dev_lock(hdev);
2509
2510	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2511	if (err < 0)
 
2512		status = MGMT_STATUS_FAILED;
2513	else
2514		status = 0;
 
 
 
 
2515
2516	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2517			   &cp->addr, sizeof(cp->addr));
 
2518
2519	hci_dev_unlock(hdev);
2520
2521	return err;
2522}
2523
2524static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2525			  u16 len)
2526{
2527	struct mgmt_cp_unblock_device *cp = data;
2528	u8 status;
2529	int err;
2530
2531	BT_DBG("%s", hdev->name);
 
 
 
 
 
2532
2533	hci_dev_lock(hdev);
2534
2535	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2536	if (err < 0)
 
2537		status = MGMT_STATUS_INVALID_PARAMS;
2538	else
2539		status = 0;
 
 
 
 
2540
2541	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2542			   &cp->addr, sizeof(cp->addr));
 
2543
2544	hci_dev_unlock(hdev);
2545
2546	return err;
2547}
2548
 
 
 
 
 
2549static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2550			 u16 len)
2551{
2552	struct mgmt_cp_set_device_id *cp = data;
2553	int err;
2554	__u16 source;
2555
2556	BT_DBG("%s", hdev->name);
2557
2558	source = __le16_to_cpu(cp->source);
2559
2560	if (source > 0x0002)
2561		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2562				  MGMT_STATUS_INVALID_PARAMS);
2563
2564	hci_dev_lock(hdev);
2565
2566	hdev->devid_source = source;
2567	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2568	hdev->devid_product = __le16_to_cpu(cp->product);
2569	hdev->devid_version = __le16_to_cpu(cp->version);
2570
2571	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
 
2572
2573	update_eir(hdev);
2574
2575	hci_dev_unlock(hdev);
2576
2577	return err;
2578}
2579
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2580static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2581				void *data, u16 len)
2582{
2583	struct mgmt_mode *cp = data;
2584	struct hci_cp_write_page_scan_activity acp;
2585	u8 type;
2586	int err;
2587
2588	BT_DBG("%s", hdev->name);
2589
2590	if (!hdev_is_powered(hdev))
2591		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2592				  MGMT_STATUS_NOT_POWERED);
 
 
2593
2594	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2595		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2596				  MGMT_STATUS_REJECTED);
 
2597
2598	hci_dev_lock(hdev);
2599
2600	if (cp->val) {
2601		type = PAGE_SCAN_TYPE_INTERLACED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2602
2603		/* 22.5 msec page scan interval */
2604		acp.interval = __constant_cpu_to_le16(0x0024);
2605	} else {
2606		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2607
2608		/* default 1.28 sec page scan */
2609		acp.interval = __constant_cpu_to_le16(0x0800);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2610	}
2611
2612	/* default 11.25 msec page scan window */
2613	acp.window = __constant_cpu_to_le16(0x0012);
 
 
 
 
2614
2615	err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2616			   &acp);
2617	if (err < 0) {
2618		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2619				 MGMT_STATUS_FAILED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2620		goto done;
2621	}
2622
2623	err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2624	if (err < 0) {
2625		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2626				 MGMT_STATUS_FAILED);
2627		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2628	}
2629
2630	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2631			   NULL, 0);
2632done:
 
2633	hci_dev_unlock(hdev);
 
2634	return err;
2635}
2636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2637static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2638			       void *cp_data, u16 len)
2639{
2640	struct mgmt_cp_load_long_term_keys *cp = cp_data;
 
 
2641	u16 key_count, expected_len;
2642	int i;
 
 
 
 
 
 
2643
2644	key_count = __le16_to_cpu(cp->key_count);
 
 
 
 
 
 
2645
2646	expected_len = sizeof(*cp) + key_count *
2647					sizeof(struct mgmt_ltk_info);
2648	if (expected_len != len) {
2649		BT_ERR("load_keys: expected %u bytes, got %u bytes",
2650							len, expected_len);
2651		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2652				  EINVAL);
2653	}
2654
2655	BT_DBG("%s key_count %u", hdev->name, key_count);
 
 
 
 
 
 
 
 
 
2656
2657	hci_dev_lock(hdev);
2658
2659	hci_smp_ltks_clear(hdev);
2660
2661	for (i = 0; i < key_count; i++) {
2662		struct mgmt_ltk_info *key = &cp->keys[i];
2663		u8 type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2664
2665		if (key->master)
2666			type = HCI_SMP_LTK;
2667		else
2668			type = HCI_SMP_LTK_SLAVE;
2669
2670		hci_add_ltk(hdev, &key->addr.bdaddr,
2671			    bdaddr_to_le(key->addr.type),
2672			    type, 0, key->authenticated, key->val,
2673			    key->enc_size, key->ediv, key->rand);
2674	}
2675
 
 
 
2676	hci_dev_unlock(hdev);
2677
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2678	return 0;
2679}
2680
2681static const struct mgmt_handler {
2682	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2683		     u16 data_len);
2684	bool var_len;
2685	size_t data_len;
2686} mgmt_handlers[] = {
2687	{ NULL }, /* 0x0000 (no command) */
2688	{ read_version,           false, MGMT_READ_VERSION_SIZE },
2689	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
2690	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
2691	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
2692	{ set_powered,            false, MGMT_SETTING_SIZE },
2693	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
2694	{ set_connectable,        false, MGMT_SETTING_SIZE },
2695	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
2696	{ set_pairable,           false, MGMT_SETTING_SIZE },
2697	{ set_link_security,      false, MGMT_SETTING_SIZE },
2698	{ set_ssp,                false, MGMT_SETTING_SIZE },
2699	{ set_hs,                 false, MGMT_SETTING_SIZE },
2700	{ set_le,                 false, MGMT_SETTING_SIZE },
2701	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
2702	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
2703	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
2704	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
2705	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
2706	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
2707	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
2708	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
2709	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
2710	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
2711	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
2712	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
2713	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
2714	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
2715	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
2716	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
2717	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
2718	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
2719	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
2720	{ add_remote_oob_data,    false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
2721	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
2722	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
2723	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
2724	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
2725	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
2726	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
2727	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
2728};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2729
 
 
 
 
2730
2731int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
 
2732{
2733	void *buf;
2734	u8 *cp;
2735	struct mgmt_hdr *hdr;
2736	u16 opcode, index, len;
2737	struct hci_dev *hdev = NULL;
2738	const struct mgmt_handler *handler;
2739	int err;
2740
2741	BT_DBG("got %zu bytes", msglen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2742
2743	if (msglen < sizeof(*hdr))
2744		return -EINVAL;
 
 
 
 
2745
2746	buf = kmalloc(msglen, GFP_KERNEL);
2747	if (!buf)
2748		return -ENOMEM;
 
 
 
 
 
 
 
2749
2750	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
2751		err = -EFAULT;
2752		goto done;
2753	}
2754
2755	hdr = buf;
2756	opcode = __le16_to_cpu(hdr->opcode);
2757	index = __le16_to_cpu(hdr->index);
2758	len = __le16_to_cpu(hdr->len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2759
2760	if (len != msglen - sizeof(*hdr)) {
2761		err = -EINVAL;
2762		goto done;
 
 
 
 
2763	}
2764
2765	if (index != MGMT_INDEX_NONE) {
2766		hdev = hci_dev_get(index);
2767		if (!hdev) {
2768			err = cmd_status(sk, index, opcode,
2769					 MGMT_STATUS_INVALID_INDEX);
2770			goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2771		}
2772	}
2773
2774	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2775					mgmt_handlers[opcode].func == NULL) {
2776		BT_DBG("Unknown op %u", opcode);
2777		err = cmd_status(sk, index, opcode,
2778				 MGMT_STATUS_UNKNOWN_COMMAND);
2779		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2780	}
2781
2782	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2783			(!hdev && opcode >= MGMT_OP_READ_INFO)) {
2784		err = cmd_status(sk, index, opcode,
2785				 MGMT_STATUS_INVALID_INDEX);
2786		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2787	}
2788
2789	handler = &mgmt_handlers[opcode];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2790
2791	if ((handler->var_len && len < handler->data_len) ||
2792			(!handler->var_len && len != handler->data_len)) {
2793		err = cmd_status(sk, index, opcode,
2794				 MGMT_STATUS_INVALID_PARAMS);
2795		goto done;
2796	}
2797
2798	if (hdev)
2799		mgmt_init_hdev(sk, hdev);
 
 
 
2800
2801	cp = buf + sizeof(*hdr);
 
 
2802
2803	err = handler->func(sk, hdev, cp, len);
2804	if (err < 0)
 
2805		goto done;
2806
2807	err = msglen;
 
 
2808
2809done:
2810	if (hdev)
2811		hci_dev_put(hdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2812
2813	kfree(buf);
2814	return err;
2815}
2816
2817static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2818{
2819	u8 *status = data;
 
2820
2821	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
2822	mgmt_pending_remove(cmd);
 
2823}
2824
2825int mgmt_index_added(struct hci_dev *hdev)
2826{
2827	return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
2828}
2829
2830int mgmt_index_removed(struct hci_dev *hdev)
 
2831{
2832	u8 status = MGMT_STATUS_INVALID_INDEX;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2833
2834	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
 
 
2835
2836	return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
 
 
 
 
 
 
 
 
 
2837}
2838
2839struct cmd_lookup {
2840	struct sock *sk;
2841	struct hci_dev *hdev;
2842	u8 mgmt_status;
2843};
2844
2845static void settings_rsp(struct pending_cmd *cmd, void *data)
 
2846{
2847	struct cmd_lookup *match = data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2848
2849	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
2850
2851	list_del(&cmd->list);
 
2852
2853	if (match->sk == NULL) {
2854		match->sk = cmd->sk;
2855		sock_hold(match->sk);
2856	}
2857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2858	mgmt_pending_free(cmd);
2859}
2860
2861int mgmt_powered(struct hci_dev *hdev, u8 powered)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2862{
2863	struct cmd_lookup match = { NULL, hdev };
 
 
 
 
 
 
 
2864	int err;
2865
2866	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2867		return 0;
2868
2869	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2870
2871	if (powered) {
2872		u8 scan = 0;
2873
2874		if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2875			scan |= SCAN_PAGE;
2876		if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2877			scan |= SCAN_INQUIRY;
 
 
2878
2879		if (scan)
2880			hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 
 
 
2881
2882		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2883			u8 ssp = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2884
2885			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
 
 
 
 
 
 
 
2886		}
2887
2888		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2889			struct hci_cp_write_le_host_supported cp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2890
2891			cp.le = 1;
2892			cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2893
2894			hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2895				     sizeof(cp), &cp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2896		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2897
2898		update_class(hdev);
2899		update_name(hdev, hdev->dev_name);
2900		update_eir(hdev);
2901	} else {
2902		u8 status = MGMT_STATUS_NOT_POWERED;
2903		mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2904	}
2905
2906	err = new_settings(hdev, match.sk);
 
 
 
2907
2908	if (match.sk)
2909		sock_put(match.sk);
2910
2911	return err;
2912}
2913
2914int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2915{
2916	struct cmd_lookup match = { NULL, hdev };
2917	bool changed = false;
2918	int err = 0;
2919
2920	if (discoverable) {
2921		if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2922			changed = true;
2923	} else {
2924		if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2925			changed = true;
 
2926	}
2927
2928	mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
2929			     &match);
2930
2931	if (changed)
2932		err = new_settings(hdev, match.sk);
2933
2934	if (match.sk)
2935		sock_put(match.sk);
2936
2937	return err;
2938}
2939
2940int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
2941{
2942	struct cmd_lookup match = { NULL, hdev };
2943	bool changed = false;
2944	int err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2945
2946	if (connectable) {
2947		if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2948			changed = true;
2949	} else {
2950		if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2951			changed = true;
2952	}
2953
2954	mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
2955			     &match);
2956
2957	if (changed)
2958		err = new_settings(hdev, match.sk);
2959
2960	if (match.sk)
2961		sock_put(match.sk);
2962
2963	return err;
2964}
2965
2966int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2967{
2968	u8 mgmt_err = mgmt_status(status);
 
 
 
 
 
2969
2970	if (scan & SCAN_PAGE)
2971		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
2972				     cmd_status_rsp, &mgmt_err);
 
2973
2974	if (scan & SCAN_INQUIRY)
2975		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
2976				     cmd_status_rsp, &mgmt_err);
2977
2978	return 0;
2979}
2980
2981int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2982		      bool persistent)
2983{
2984	struct mgmt_ev_new_link_key ev;
2985
2986	memset(&ev, 0, sizeof(ev));
2987
2988	ev.store_hint = persistent;
2989	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2990	ev.key.addr.type = BDADDR_BREDR;
2991	ev.key.type = key->type;
2992	memcpy(ev.key.val, key->val, 16);
2993	ev.key.pin_len = key->pin_len;
2994
2995	return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2996}
2997
2998int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
2999{
3000	struct mgmt_ev_new_long_term_key ev;
3001
3002	memset(&ev, 0, sizeof(ev));
3003
3004	ev.store_hint = persistent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3005	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3006	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3007	ev.key.authenticated = key->authenticated;
3008	ev.key.enc_size = key->enc_size;
3009	ev.key.ediv = key->ediv;
 
 
 
 
 
 
 
 
 
 
 
3010
3011	if (key->type == HCI_SMP_LTK)
3012		ev.key.master = 1;
 
 
 
 
 
 
 
 
3013
3014	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3015	memcpy(ev.key.val, key->val, sizeof(key->val));
 
 
3016
3017	return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3018			  NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3019}
3020
3021int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3022			  u8 addr_type, u32 flags, u8 *name, u8 name_len,
3023			  u8 *dev_class)
3024{
3025	char buf[512];
3026	struct mgmt_ev_device_connected *ev = (void *) buf;
3027	u16 eir_len = 0;
 
3028
3029	bacpy(&ev->addr.bdaddr, bdaddr);
3030	ev->addr.type = link_to_bdaddr(link_type, addr_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
3031
3032	ev->flags = __cpu_to_le32(flags);
3033
3034	if (name_len > 0)
3035		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3036					  name, name_len);
3037
3038	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3039		eir_len = eir_append_data(ev->eir, eir_len,
3040					  EIR_CLASS_OF_DEV, dev_class, 3);
 
 
 
 
 
 
 
 
3041
3042	ev->eir_len = cpu_to_le16(eir_len);
3043
3044	return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3045			  sizeof(*ev) + eir_len, NULL);
3046}
3047
3048static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3049{
3050	struct mgmt_cp_disconnect *cp = cmd->param;
3051	struct sock **sk = data;
3052	struct mgmt_rp_disconnect rp;
3053
3054	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3055	rp.addr.type = cp->addr.type;
3056
3057	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3058		     sizeof(rp));
3059
3060	*sk = cmd->sk;
3061	sock_hold(*sk);
3062
3063	mgmt_pending_remove(cmd);
3064}
3065
3066static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3067{
3068	struct hci_dev *hdev = data;
3069	struct mgmt_cp_unpair_device *cp = cmd->param;
3070	struct mgmt_rp_unpair_device rp;
3071
3072	memset(&rp, 0, sizeof(rp));
3073	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3074	rp.addr.type = cp->addr.type;
 
 
 
 
 
 
 
3075
3076	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
 
 
3077
3078	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
 
 
3079
3080	mgmt_pending_remove(cmd);
3081}
3082
3083int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3084			     u8 link_type, u8 addr_type)
 
3085{
3086	struct mgmt_addr_info ev;
3087	struct sock *sk = NULL;
3088	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
3089
3090	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3091
3092	bacpy(&ev.bdaddr, bdaddr);
3093	ev.type = link_to_bdaddr(link_type, addr_type);
 
 
 
 
 
3094
3095	err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3096			 sk);
3097
3098	if (sk)
3099		sock_put(sk);
3100
3101	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3102			     hdev);
3103
3104	return err;
3105}
3106
3107int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3108			   u8 link_type, u8 addr_type, u8 status)
3109{
3110	struct mgmt_rp_disconnect rp;
3111	struct pending_cmd *cmd;
3112	int err;
 
 
 
3113
3114	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3115	if (!cmd)
3116		return -ENOENT;
3117
3118	bacpy(&rp.addr.bdaddr, bdaddr);
3119	rp.addr.type = link_to_bdaddr(link_type, addr_type);
3120
3121	err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3122			   mgmt_status(status), &rp, sizeof(rp));
3123
 
 
 
 
3124	mgmt_pending_remove(cmd);
3125
3126	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3127									hdev);
3128	return err;
3129}
3130
3131int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3132			u8 addr_type, u8 status)
3133{
3134	struct mgmt_ev_connect_failed ev;
3135
 
 
 
 
 
 
 
 
3136	bacpy(&ev.addr.bdaddr, bdaddr);
3137	ev.addr.type = link_to_bdaddr(link_type, addr_type);
3138	ev.status = mgmt_status(status);
3139
3140	return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3141}
3142
3143int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3144{
3145	struct mgmt_ev_pin_code_request ev;
3146
3147	bacpy(&ev.addr.bdaddr, bdaddr);
3148	ev.addr.type = BDADDR_BREDR;
3149	ev.secure = secure;
3150
3151	return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3152			  NULL);
3153}
3154
3155int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3156				 u8 status)
3157{
3158	struct pending_cmd *cmd;
3159	struct mgmt_rp_pin_code_reply rp;
3160	int err;
3161
3162	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3163	if (!cmd)
3164		return -ENOENT;
3165
3166	bacpy(&rp.addr.bdaddr, bdaddr);
3167	rp.addr.type = BDADDR_BREDR;
3168
3169	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3170			   mgmt_status(status), &rp, sizeof(rp));
3171
 
3172	mgmt_pending_remove(cmd);
3173
3174	return err;
3175}
3176
3177int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3178				     u8 status)
3179{
3180	struct pending_cmd *cmd;
3181	struct mgmt_rp_pin_code_reply rp;
3182	int err;
3183
3184	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3185	if (!cmd)
3186		return -ENOENT;
3187
3188	bacpy(&rp.addr.bdaddr, bdaddr);
3189	rp.addr.type = BDADDR_BREDR;
3190
3191	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3192			   mgmt_status(status), &rp, sizeof(rp));
3193
 
3194	mgmt_pending_remove(cmd);
3195
3196	return err;
3197}
3198
3199int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3200			      u8 link_type, u8 addr_type, __le32 value,
3201			      u8 confirm_hint)
3202{
3203	struct mgmt_ev_user_confirm_request ev;
3204
3205	BT_DBG("%s", hdev->name);
3206
3207	bacpy(&ev.addr.bdaddr, bdaddr);
3208	ev.addr.type = link_to_bdaddr(link_type, addr_type);
3209	ev.confirm_hint = confirm_hint;
3210	ev.value = value;
3211
3212	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3213			  NULL);
3214}
3215
3216int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217						u8 link_type, u8 addr_type)
3218{
3219	struct mgmt_ev_user_passkey_request ev;
3220
3221	BT_DBG("%s", hdev->name);
3222
3223	bacpy(&ev.addr.bdaddr, bdaddr);
3224	ev.addr.type = link_to_bdaddr(link_type, addr_type);
3225
3226	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3227			  NULL);
3228}
3229
3230static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3231					u8 link_type, u8 addr_type, u8 status,
3232					u8 opcode)
3233{
3234	struct pending_cmd *cmd;
3235	struct mgmt_rp_user_confirm_reply rp;
3236	int err;
3237
3238	cmd = mgmt_pending_find(opcode, hdev);
3239	if (!cmd)
3240		return -ENOENT;
3241
3242	bacpy(&rp.addr.bdaddr, bdaddr);
3243	rp.addr.type = link_to_bdaddr(link_type, addr_type);
3244	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3245			   &rp, sizeof(rp));
3246
3247	mgmt_pending_remove(cmd);
3248
3249	return err;
3250}
3251
3252int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3253				     u8 link_type, u8 addr_type, u8 status)
3254{
3255	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3256					  status, MGMT_OP_USER_CONFIRM_REPLY);
3257}
3258
3259int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3260					 u8 link_type, u8 addr_type, u8 status)
3261{
3262	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3263					  status, MGMT_OP_USER_CONFIRM_NEG_REPLY);
 
3264}
3265
3266int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3267				     u8 link_type, u8 addr_type, u8 status)
3268{
3269	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3270					  status, MGMT_OP_USER_PASSKEY_REPLY);
3271}
3272
3273int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3274					 u8 link_type, u8 addr_type, u8 status)
3275{
3276	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3277					  status, MGMT_OP_USER_PASSKEY_NEG_REPLY);
 
3278}
3279
3280int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3281		     u8 addr_type, u8 status)
 
3282{
3283	struct mgmt_ev_auth_failed ev;
 
 
3284
3285	bacpy(&ev.addr.bdaddr, bdaddr);
3286	ev.addr.type = link_to_bdaddr(link_type, addr_type);
3287	ev.status = mgmt_status(status);
 
3288
3289	return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3290}
3291
3292int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3293{
3294	struct cmd_lookup match = { NULL, hdev };
3295	bool changed = false;
3296	int err = 0;
3297
3298	if (status) {
3299		u8 mgmt_err = mgmt_status(status);
3300		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3301				     cmd_status_rsp, &mgmt_err);
3302		return 0;
3303	}
3304
3305	if (test_bit(HCI_AUTH, &hdev->flags)) {
3306		if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3307			changed = true;
3308	} else {
3309		if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3310			changed = true;
3311	}
3312
3313	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3314			     &match);
3315
3316	if (changed)
3317		err = new_settings(hdev, match.sk);
3318
3319	if (match.sk)
3320		sock_put(match.sk);
3321
3322	return err;
3323}
3324
3325static int clear_eir(struct hci_dev *hdev)
3326{
3327	struct hci_cp_write_eir cp;
3328
3329	if (!(hdev->features[6] & LMP_EXT_INQ))
3330		return 0;
3331
3332	memset(hdev->eir, 0, sizeof(hdev->eir));
3333
3334	memset(&cp, 0, sizeof(cp));
3335
3336	return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3337}
3338
3339int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3340{
3341	struct cmd_lookup match = { NULL, hdev };
3342	bool changed = false;
3343	int err = 0;
3344
3345	if (status) {
3346		u8 mgmt_err = mgmt_status(status);
3347
3348		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3349						 &hdev->dev_flags))
3350			err = new_settings(hdev, NULL);
3351
3352		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3353				     &mgmt_err);
3354
3355		return err;
3356	}
3357
3358	if (enable) {
3359		if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3360			changed = true;
3361	} else {
3362		if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3363			changed = true;
3364	}
3365
3366	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
 
3367
3368	if (changed)
3369		err = new_settings(hdev, match.sk);
3370
3371	if (match.sk)
3372		sock_put(match.sk);
3373
3374	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3375		update_eir(hdev);
3376	else
3377		clear_eir(hdev);
3378
3379	return err;
3380}
3381
3382static void class_rsp(struct pending_cmd *cmd, void *data)
3383{
3384	struct cmd_lookup *match = data;
3385
3386	cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status,
3387		     match->hdev->dev_class, 3);
3388
3389	list_del(&cmd->list);
3390
3391	if (match->sk == NULL) {
3392		match->sk = cmd->sk;
3393		sock_hold(match->sk);
3394	}
3395
3396	mgmt_pending_free(cmd);
3397}
3398
3399int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3400				   u8 status)
3401{
3402	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3403	int err = 0;
3404
3405	clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
3406
3407	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match);
3408	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match);
3409	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match);
3410
3411	if (!status)
3412		err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3413				 3, NULL);
3414
3415	if (match.sk)
3416		sock_put(match.sk);
3417
3418	return err;
3419}
3420
3421int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3422{
3423	struct pending_cmd *cmd;
3424	struct mgmt_cp_set_local_name ev;
3425	bool changed = false;
3426	int err = 0;
3427
3428	if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
3429		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3430		changed = true;
3431	}
3432
3433	memset(&ev, 0, sizeof(ev));
3434	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3435	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3436
3437	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3438	if (!cmd)
3439		goto send_event;
3440
3441	/* Always assume that either the short or the complete name has
3442	 * changed if there was a pending mgmt command */
3443	changed = true;
3444
3445	if (status) {
3446		err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3447				 mgmt_status(status));
3448		goto failed;
3449	}
3450
3451	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
3452			   sizeof(ev));
3453	if (err < 0)
3454		goto failed;
3455
3456send_event:
3457	if (changed)
3458		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3459				 sizeof(ev), cmd ? cmd->sk : NULL);
3460
3461	update_eir(hdev);
 
 
 
3462
3463failed:
3464	if (cmd)
3465		mgmt_pending_remove(cmd);
3466	return err;
3467}
3468
3469int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3470					    u8 *randomizer, u8 status)
3471{
3472	struct pending_cmd *cmd;
3473	int err;
 
 
 
 
 
 
 
3474
3475	BT_DBG("%s status %u", hdev->name, status);
 
3476
3477	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3478	if (!cmd)
3479		return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3480
3481	if (status) {
3482		err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3483				 mgmt_status(status));
3484	} else {
3485		struct mgmt_rp_read_local_oob_data rp;
3486
3487		memcpy(rp.hash, hash, sizeof(rp.hash));
3488		memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3489
3490		err = cmd_complete(cmd->sk, hdev->id,
3491				   MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3492				   sizeof(rp));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3493	}
3494
3495	mgmt_pending_remove(cmd);
 
 
 
 
 
 
 
 
3496
3497	return err;
3498}
3499
3500int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
 
3501{
3502	struct cmd_lookup match = { NULL, hdev };
3503	bool changed = false;
3504	int err = 0;
 
 
 
 
 
 
3505
3506	if (status) {
3507		u8 mgmt_err = mgmt_status(status);
 
 
 
 
 
 
3508
3509		if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3510						 &hdev->dev_flags))
3511			err = new_settings(hdev, NULL);
3512
3513		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3514				     &mgmt_err);
 
 
 
 
3515
3516		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3517	}
3518
3519	if (enable) {
3520		if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3521			changed = true;
3522	} else {
3523		if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3524			changed = true;
3525	}
3526
3527	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
 
 
 
 
 
 
 
 
 
 
 
3528
3529	if (changed)
3530		err = new_settings(hdev, match.sk);
 
3531
3532	if (match.sk)
3533		sock_put(match.sk);
 
 
 
 
 
 
 
3534
3535	return err;
 
 
 
3536}
3537
3538int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3539		      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3540		      ssp, u8 *eir, u16 eir_len)
 
3541{
3542	char buf[512];
3543	struct mgmt_ev_device_found *ev = (void *) buf;
3544	size_t ev_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3545
3546	/* Leave 5 bytes for a potential CoD field */
3547	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3548		return -EINVAL;
 
 
3549
3550	memset(buf, 0, sizeof(buf));
3551
3552	bacpy(&ev->addr.bdaddr, bdaddr);
3553	ev->addr.type = link_to_bdaddr(link_type, addr_type);
3554	ev->rssi = rssi;
3555	if (cfm_name)
3556		ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
3557	if (!ssp)
3558		ev->flags[0] |= MGMT_DEV_FOUND_LEGACY_PAIRING;
3559
3560	if (eir_len > 0)
3561		memcpy(ev->eir, eir, eir_len);
 
3562
3563	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
3564		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3565					  dev_class, 3);
3566
3567	ev->eir_len = cpu_to_le16(eir_len);
3568
3569	ev_size = sizeof(*ev) + eir_len;
3570
3571	return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
3572}
3573
3574int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3575		     u8 addr_type, s8 rssi, u8 *name, u8 name_len)
 
 
3576{
 
3577	struct mgmt_ev_device_found *ev;
3578	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3579	u16 eir_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3580
3581	ev = (struct mgmt_ev_device_found *) buf;
3582
3583	memset(buf, 0, sizeof(buf));
 
 
 
 
 
 
 
 
 
 
3584
3585	bacpy(&ev->addr.bdaddr, bdaddr);
3586	ev->addr.type = link_to_bdaddr(link_type, addr_type);
3587	ev->rssi = rssi;
 
 
 
 
 
 
 
 
 
 
 
 
 
3588
3589	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3590				  name_len);
 
3591
3592	ev->eir_len = cpu_to_le16(eir_len);
3593
3594	return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3595			  sizeof(*ev) + eir_len, NULL);
3596}
3597
3598int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
 
3599{
3600	struct pending_cmd *cmd;
3601	u8 type;
3602	int err;
 
3603
3604	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 
3605
3606	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3607	if (!cmd)
3608		return -ENOENT;
 
3609
3610	type = hdev->discovery.type;
 
 
 
3611
3612	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3613			   &type, sizeof(type));
3614	mgmt_pending_remove(cmd);
3615
3616	return err;
3617}
3618
3619int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3620{
3621	struct pending_cmd *cmd;
3622	int err;
3623
3624	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3625	if (!cmd)
3626		return -ENOENT;
3627
3628	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3629			   &hdev->discovery.type, sizeof(hdev->discovery.type));
3630	mgmt_pending_remove(cmd);
3631
3632	return err;
3633}
3634
3635int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
3636{
3637	struct mgmt_ev_discovering ev;
3638	struct pending_cmd *cmd;
3639
3640	BT_DBG("%s discovering %u", hdev->name, discovering);
 
 
3641
3642	if (discovering)
3643		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3644	else
3645		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3646
3647	if (cmd != NULL) {
3648		u8 type = hdev->discovery.type;
 
 
 
 
 
3649
3650		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
3651			     sizeof(type));
3652		mgmt_pending_remove(cmd);
3653	}
3654
3655	memset(&ev, 0, sizeof(ev));
3656	ev.type = hdev->discovery.type;
3657	ev.discovering = discovering;
 
 
 
3658
3659	return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
 
 
3660}
3661
3662int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3663{
3664	struct pending_cmd *cmd;
3665	struct mgmt_ev_device_blocked ev;
3666
3667	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
3668
3669	bacpy(&ev.addr.bdaddr, bdaddr);
3670	ev.addr.type = type;
3671
3672	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
3673			  cmd ? cmd->sk : NULL);
3674}
3675
3676int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3677{
3678	struct pending_cmd *cmd;
3679	struct mgmt_ev_device_unblocked ev;
3680
3681	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
3682
3683	bacpy(&ev.addr.bdaddr, bdaddr);
3684	ev.addr.type = type;
 
 
 
 
 
 
3685
3686	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
3687			  cmd ? cmd->sk : NULL);
3688}
3689
3690module_param(enable_hs, bool, 0644);
3691MODULE_PARM_DESC(enable_hs, "Enable High Speed support");