Loading...
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_sock.h>
33#include <net/bluetooth/l2cap.h>
34#include <net/bluetooth/mgmt.h>
35
36#include "hci_request.h"
37#include "smp.h"
38#include "mgmt_util.h"
39#include "mgmt_config.h"
40#include "msft.h"
41#include "eir.h"
42#include "aosp.h"
43
44#define MGMT_VERSION 1
45#define MGMT_REVISION 22
46
47static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136};
137
138static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183};
184
185static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196};
197
198static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211};
212
213#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218/* HCI to MGMT error code conversion table */
219static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284};
285
286static u8 mgmt_errno_status(int err)
287{
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310}
311
312static u8 mgmt_status(int err)
313{
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321}
322
323static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325{
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328}
329
330static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332{
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335}
336
337static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339{
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342}
343
344static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345{
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348}
349
350static u8 le_addr_type(u8 mgmt_addr_type)
351{
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356}
357
358void mgmt_fill_version_info(void *ver)
359{
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364}
365
366static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368{
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377}
378
379static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381{
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429}
430
431static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433{
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 count++;
449 }
450
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
453 if (!rp) {
454 read_unlock(&hci_dev_list_lock);
455 return -ENOMEM;
456 }
457
458 count = 0;
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 continue;
464
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
467 */
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 continue;
470
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
475 }
476 }
477
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
480
481 read_unlock(&hci_dev_list_lock);
482
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 0, rp, rp_len);
485
486 kfree(rp);
487
488 return err;
489}
490
491static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
493{
494 struct mgmt_rp_read_unconf_index_list *rp;
495 struct hci_dev *d;
496 size_t rp_len;
497 u16 count;
498 int err;
499
500 bt_dev_dbg(hdev, "sock %p", sk);
501
502 read_lock(&hci_dev_list_lock);
503
504 count = 0;
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 count++;
509 }
510
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
513 if (!rp) {
514 read_unlock(&hci_dev_list_lock);
515 return -ENOMEM;
516 }
517
518 count = 0;
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 continue;
524
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
527 */
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 continue;
530
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
535 }
536 }
537
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
540
541 read_unlock(&hci_dev_list_lock);
542
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545
546 kfree(rp);
547
548 return err;
549}
550
551static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
553{
554 struct mgmt_rp_read_ext_index_list *rp;
555 struct hci_dev *d;
556 u16 count;
557 int err;
558
559 bt_dev_dbg(hdev, "sock %p", sk);
560
561 read_lock(&hci_dev_list_lock);
562
563 count = 0;
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 count++;
567 }
568
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 if (!rp) {
571 read_unlock(&hci_dev_list_lock);
572 return -ENOMEM;
573 }
574
575 count = 0;
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 continue;
581
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
584 */
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 continue;
587
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
591 else
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
595 } else {
596 continue;
597 }
598
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
602 }
603
604 rp->num_controllers = cpu_to_le16(count);
605
606 read_unlock(&hci_dev_list_lock);
607
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
611 */
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
619
620 kfree(rp);
621
622 return err;
623}
624
625static bool is_configured(struct hci_dev *hdev)
626{
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 return false;
630
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
634 return false;
635
636 return true;
637}
638
639static __le32 get_missing_options(struct hci_dev *hdev)
640{
641 u32 options = 0;
642
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
646
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651
652 return cpu_to_le32(options);
653}
654
655static int new_options(struct hci_dev *hdev, struct sock *skip)
656{
657 __le32 options = get_missing_options(hdev);
658
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661}
662
663static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664{
665 __le32 options = get_missing_options(hdev);
666
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668 sizeof(options));
669}
670
671static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
673{
674 struct mgmt_rp_read_config_info rp;
675 u32 options = 0;
676
677 bt_dev_dbg(hdev, "sock %p", sk);
678
679 hci_dev_lock(hdev);
680
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
686
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
689
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
692
693 hci_dev_unlock(hdev);
694
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696 &rp, sizeof(rp));
697}
698
699static u32 get_supported_phys(struct hci_dev *hdev)
700{
701 u32 supported_phys = 0;
702
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 }
730 }
731 }
732
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
736
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
740 }
741
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
745 }
746 }
747
748 return supported_phys;
749}
750
751static u32 get_selected_phys(struct hci_dev *hdev)
752{
753 u32 selected_phys = 0;
754
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 }
788 }
789 }
790
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
797
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
800
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
803
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
806
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
809 }
810
811 return selected_phys;
812}
813
814static u32 get_configurable_phys(struct hci_dev *hdev)
815{
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818}
819
820static u32 get_supported_settings(struct hci_dev *hdev)
821{
822 u32 settings = 0;
823
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
829
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
835
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
838 if (IS_ENABLED(CONFIG_BT_HS))
839 settings |= MGMT_SETTING_HS;
840 }
841
842 if (lmp_sc_capable(hdev))
843 settings |= MGMT_SETTING_SECURE_CONN;
844
845 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846 &hdev->quirks))
847 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 }
849
850 if (lmp_le_capable(hdev)) {
851 settings |= MGMT_SETTING_LE;
852 settings |= MGMT_SETTING_SECURE_CONN;
853 settings |= MGMT_SETTING_PRIVACY;
854 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 settings |= MGMT_SETTING_ADVERTISING;
856 }
857
858 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859 hdev->set_bdaddr)
860 settings |= MGMT_SETTING_CONFIGURATION;
861
862 if (cis_central_capable(hdev))
863 settings |= MGMT_SETTING_CIS_CENTRAL;
864
865 if (cis_peripheral_capable(hdev))
866 settings |= MGMT_SETTING_CIS_PERIPHERAL;
867
868 settings |= MGMT_SETTING_PHY_CONFIGURATION;
869
870 return settings;
871}
872
873static u32 get_current_settings(struct hci_dev *hdev)
874{
875 u32 settings = 0;
876
877 if (hdev_is_powered(hdev))
878 settings |= MGMT_SETTING_POWERED;
879
880 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 settings |= MGMT_SETTING_CONNECTABLE;
882
883 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 settings |= MGMT_SETTING_FAST_CONNECTABLE;
885
886 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 settings |= MGMT_SETTING_DISCOVERABLE;
888
889 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 settings |= MGMT_SETTING_BONDABLE;
891
892 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 settings |= MGMT_SETTING_BREDR;
894
895 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 settings |= MGMT_SETTING_LE;
897
898 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 settings |= MGMT_SETTING_LINK_SECURITY;
900
901 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 settings |= MGMT_SETTING_SSP;
903
904 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 settings |= MGMT_SETTING_HS;
906
907 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 settings |= MGMT_SETTING_ADVERTISING;
909
910 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 settings |= MGMT_SETTING_SECURE_CONN;
912
913 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 settings |= MGMT_SETTING_DEBUG_KEYS;
915
916 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 settings |= MGMT_SETTING_PRIVACY;
918
919 /* The current setting for static address has two purposes. The
920 * first is to indicate if the static address will be used and
921 * the second is to indicate if it is actually set.
922 *
923 * This means if the static address is not configured, this flag
924 * will never be set. If the address is configured, then if the
925 * address is actually used decides if the flag is set or not.
926 *
927 * For single mode LE only controllers and dual-mode controllers
928 * with BR/EDR disabled, the existence of the static address will
929 * be evaluated.
930 */
931 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 settings |= MGMT_SETTING_STATIC_ADDRESS;
936 }
937
938 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
940
941 if (cis_central_capable(hdev))
942 settings |= MGMT_SETTING_CIS_CENTRAL;
943
944 if (cis_peripheral_capable(hdev))
945 settings |= MGMT_SETTING_CIS_PERIPHERAL;
946
947 if (bis_capable(hdev))
948 settings |= MGMT_SETTING_ISO_BROADCASTER;
949
950 if (sync_recv_capable(hdev))
951 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
952
953 return settings;
954}
955
956static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
957{
958 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
959}
960
961u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
962{
963 struct mgmt_pending_cmd *cmd;
964
965 /* If there's a pending mgmt command the flags will not yet have
966 * their final values, so check for this first.
967 */
968 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
969 if (cmd) {
970 struct mgmt_mode *cp = cmd->param;
971 if (cp->val == 0x01)
972 return LE_AD_GENERAL;
973 else if (cp->val == 0x02)
974 return LE_AD_LIMITED;
975 } else {
976 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
977 return LE_AD_LIMITED;
978 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
979 return LE_AD_GENERAL;
980 }
981
982 return 0;
983}
984
985bool mgmt_get_connectable(struct hci_dev *hdev)
986{
987 struct mgmt_pending_cmd *cmd;
988
989 /* If there's a pending mgmt command the flag will not yet have
990 * it's final value, so check for this first.
991 */
992 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
993 if (cmd) {
994 struct mgmt_mode *cp = cmd->param;
995
996 return cp->val;
997 }
998
999 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1000}
1001
1002static int service_cache_sync(struct hci_dev *hdev, void *data)
1003{
1004 hci_update_eir_sync(hdev);
1005 hci_update_class_sync(hdev);
1006
1007 return 0;
1008}
1009
1010static void service_cache_off(struct work_struct *work)
1011{
1012 struct hci_dev *hdev = container_of(work, struct hci_dev,
1013 service_cache.work);
1014
1015 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1016 return;
1017
1018 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1019}
1020
1021static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1022{
1023 /* The generation of a new RPA and programming it into the
1024 * controller happens in the hci_req_enable_advertising()
1025 * function.
1026 */
1027 if (ext_adv_capable(hdev))
1028 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1029 else
1030 return hci_enable_advertising_sync(hdev);
1031}
1032
1033static void rpa_expired(struct work_struct *work)
1034{
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 rpa_expired.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1041
1042 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1043 return;
1044
1045 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1046}
1047
1048static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1049
1050static void discov_off(struct work_struct *work)
1051{
1052 struct hci_dev *hdev = container_of(work, struct hci_dev,
1053 discov_off.work);
1054
1055 bt_dev_dbg(hdev, "");
1056
1057 hci_dev_lock(hdev);
1058
1059 /* When discoverable timeout triggers, then just make sure
1060 * the limited discoverable flag is cleared. Even in the case
1061 * of a timeout triggered from general discoverable, it is
1062 * safe to unconditionally clear the flag.
1063 */
1064 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1065 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1066 hdev->discov_timeout = 0;
1067
1068 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1069
1070 mgmt_new_settings(hdev);
1071
1072 hci_dev_unlock(hdev);
1073}
1074
1075static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1076
1077static void mesh_send_complete(struct hci_dev *hdev,
1078 struct mgmt_mesh_tx *mesh_tx, bool silent)
1079{
1080 u8 handle = mesh_tx->handle;
1081
1082 if (!silent)
1083 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1084 sizeof(handle), NULL);
1085
1086 mgmt_mesh_remove(mesh_tx);
1087}
1088
1089static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1090{
1091 struct mgmt_mesh_tx *mesh_tx;
1092
1093 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1094 hci_disable_advertising_sync(hdev);
1095 mesh_tx = mgmt_mesh_next(hdev, NULL);
1096
1097 if (mesh_tx)
1098 mesh_send_complete(hdev, mesh_tx, false);
1099
1100 return 0;
1101}
1102
1103static int mesh_send_sync(struct hci_dev *hdev, void *data);
1104static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1105static void mesh_next(struct hci_dev *hdev, void *data, int err)
1106{
1107 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1108
1109 if (!mesh_tx)
1110 return;
1111
1112 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1113 mesh_send_start_complete);
1114
1115 if (err < 0)
1116 mesh_send_complete(hdev, mesh_tx, false);
1117 else
1118 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1119}
1120
1121static void mesh_send_done(struct work_struct *work)
1122{
1123 struct hci_dev *hdev = container_of(work, struct hci_dev,
1124 mesh_send_done.work);
1125
1126 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1127 return;
1128
1129 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1130}
1131
1132static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1133{
1134 if (hci_dev_test_flag(hdev, HCI_MGMT))
1135 return;
1136
1137 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1138
1139 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1140 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1141 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1142 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1143
1144 /* Non-mgmt controlled devices get this bit set
1145 * implicitly so that pairing works for them, however
1146 * for mgmt we require user-space to explicitly enable
1147 * it
1148 */
1149 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1150
1151 hci_dev_set_flag(hdev, HCI_MGMT);
1152}
1153
1154static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1155 void *data, u16 data_len)
1156{
1157 struct mgmt_rp_read_info rp;
1158
1159 bt_dev_dbg(hdev, "sock %p", sk);
1160
1161 hci_dev_lock(hdev);
1162
1163 memset(&rp, 0, sizeof(rp));
1164
1165 bacpy(&rp.bdaddr, &hdev->bdaddr);
1166
1167 rp.version = hdev->hci_ver;
1168 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1169
1170 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1171 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1172
1173 memcpy(rp.dev_class, hdev->dev_class, 3);
1174
1175 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1176 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1177
1178 hci_dev_unlock(hdev);
1179
1180 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1181 sizeof(rp));
1182}
1183
1184static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1185{
1186 u16 eir_len = 0;
1187 size_t name_len;
1188
1189 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1190 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1191 hdev->dev_class, 3);
1192
1193 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1194 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1195 hdev->appearance);
1196
1197 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1198 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1199 hdev->dev_name, name_len);
1200
1201 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1202 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1203 hdev->short_name, name_len);
1204
1205 return eir_len;
1206}
1207
1208static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1209 void *data, u16 data_len)
1210{
1211 char buf[512];
1212 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1213 u16 eir_len;
1214
1215 bt_dev_dbg(hdev, "sock %p", sk);
1216
1217 memset(&buf, 0, sizeof(buf));
1218
1219 hci_dev_lock(hdev);
1220
1221 bacpy(&rp->bdaddr, &hdev->bdaddr);
1222
1223 rp->version = hdev->hci_ver;
1224 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1225
1226 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1227 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1228
1229
1230 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1231 rp->eir_len = cpu_to_le16(eir_len);
1232
1233 hci_dev_unlock(hdev);
1234
1235 /* If this command is called at least once, then the events
1236 * for class of device and local name changes are disabled
1237 * and only the new extended controller information event
1238 * is used.
1239 */
1240 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1241 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1242 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1243
1244 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1245 sizeof(*rp) + eir_len);
1246}
1247
1248static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1249{
1250 char buf[512];
1251 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1252 u16 eir_len;
1253
1254 memset(buf, 0, sizeof(buf));
1255
1256 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1257 ev->eir_len = cpu_to_le16(eir_len);
1258
1259 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1260 sizeof(*ev) + eir_len,
1261 HCI_MGMT_EXT_INFO_EVENTS, skip);
1262}
1263
1264static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1265{
1266 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1267
1268 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1269 sizeof(settings));
1270}
1271
1272void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1273{
1274 struct mgmt_ev_advertising_added ev;
1275
1276 ev.instance = instance;
1277
1278 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1279}
1280
1281void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1282 u8 instance)
1283{
1284 struct mgmt_ev_advertising_removed ev;
1285
1286 ev.instance = instance;
1287
1288 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1289}
1290
1291static void cancel_adv_timeout(struct hci_dev *hdev)
1292{
1293 if (hdev->adv_instance_timeout) {
1294 hdev->adv_instance_timeout = 0;
1295 cancel_delayed_work(&hdev->adv_instance_expire);
1296 }
1297}
1298
1299/* This function requires the caller holds hdev->lock */
1300static void restart_le_actions(struct hci_dev *hdev)
1301{
1302 struct hci_conn_params *p;
1303
1304 list_for_each_entry(p, &hdev->le_conn_params, list) {
1305 /* Needed for AUTO_OFF case where might not "really"
1306 * have been powered off.
1307 */
1308 hci_pend_le_list_del_init(p);
1309
1310 switch (p->auto_connect) {
1311 case HCI_AUTO_CONN_DIRECT:
1312 case HCI_AUTO_CONN_ALWAYS:
1313 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1314 break;
1315 case HCI_AUTO_CONN_REPORT:
1316 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1317 break;
1318 default:
1319 break;
1320 }
1321 }
1322}
1323
1324static int new_settings(struct hci_dev *hdev, struct sock *skip)
1325{
1326 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1327
1328 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1329 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1330}
1331
1332static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1333{
1334 struct mgmt_pending_cmd *cmd = data;
1335 struct mgmt_mode *cp;
1336
1337 /* Make sure cmd still outstanding. */
1338 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1339 return;
1340
1341 cp = cmd->param;
1342
1343 bt_dev_dbg(hdev, "err %d", err);
1344
1345 if (!err) {
1346 if (cp->val) {
1347 hci_dev_lock(hdev);
1348 restart_le_actions(hdev);
1349 hci_update_passive_scan(hdev);
1350 hci_dev_unlock(hdev);
1351 }
1352
1353 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1354
1355 /* Only call new_setting for power on as power off is deferred
1356 * to hdev->power_off work which does call hci_dev_do_close.
1357 */
1358 if (cp->val)
1359 new_settings(hdev, cmd->sk);
1360 } else {
1361 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1362 mgmt_status(err));
1363 }
1364
1365 mgmt_pending_remove(cmd);
1366}
1367
1368static int set_powered_sync(struct hci_dev *hdev, void *data)
1369{
1370 struct mgmt_pending_cmd *cmd = data;
1371 struct mgmt_mode *cp = cmd->param;
1372
1373 BT_DBG("%s", hdev->name);
1374
1375 return hci_set_powered_sync(hdev, cp->val);
1376}
1377
1378static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1379 u16 len)
1380{
1381 struct mgmt_mode *cp = data;
1382 struct mgmt_pending_cmd *cmd;
1383 int err;
1384
1385 bt_dev_dbg(hdev, "sock %p", sk);
1386
1387 if (cp->val != 0x00 && cp->val != 0x01)
1388 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1389 MGMT_STATUS_INVALID_PARAMS);
1390
1391 hci_dev_lock(hdev);
1392
1393 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1394 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1395 MGMT_STATUS_BUSY);
1396 goto failed;
1397 }
1398
1399 if (!!cp->val == hdev_is_powered(hdev)) {
1400 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1401 goto failed;
1402 }
1403
1404 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1405 if (!cmd) {
1406 err = -ENOMEM;
1407 goto failed;
1408 }
1409
1410 /* Cancel potentially blocking sync operation before power off */
1411 if (cp->val == 0x00) {
1412 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1413 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1414 mgmt_set_powered_complete);
1415 } else {
1416 /* Use hci_cmd_sync_submit since hdev might not be running */
1417 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1418 mgmt_set_powered_complete);
1419 }
1420
1421 if (err < 0)
1422 mgmt_pending_remove(cmd);
1423
1424failed:
1425 hci_dev_unlock(hdev);
1426 return err;
1427}
1428
1429int mgmt_new_settings(struct hci_dev *hdev)
1430{
1431 return new_settings(hdev, NULL);
1432}
1433
1434struct cmd_lookup {
1435 struct sock *sk;
1436 struct hci_dev *hdev;
1437 u8 mgmt_status;
1438};
1439
1440static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1441{
1442 struct cmd_lookup *match = data;
1443
1444 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1445
1446 list_del(&cmd->list);
1447
1448 if (match->sk == NULL) {
1449 match->sk = cmd->sk;
1450 sock_hold(match->sk);
1451 }
1452
1453 mgmt_pending_free(cmd);
1454}
1455
1456static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1457{
1458 u8 *status = data;
1459
1460 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1461 mgmt_pending_remove(cmd);
1462}
1463
1464static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1465{
1466 if (cmd->cmd_complete) {
1467 u8 *status = data;
1468
1469 cmd->cmd_complete(cmd, *status);
1470 mgmt_pending_remove(cmd);
1471
1472 return;
1473 }
1474
1475 cmd_status_rsp(cmd, data);
1476}
1477
1478static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1479{
1480 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1481 cmd->param, cmd->param_len);
1482}
1483
1484static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1485{
1486 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1487 cmd->param, sizeof(struct mgmt_addr_info));
1488}
1489
1490static u8 mgmt_bredr_support(struct hci_dev *hdev)
1491{
1492 if (!lmp_bredr_capable(hdev))
1493 return MGMT_STATUS_NOT_SUPPORTED;
1494 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1495 return MGMT_STATUS_REJECTED;
1496 else
1497 return MGMT_STATUS_SUCCESS;
1498}
1499
1500static u8 mgmt_le_support(struct hci_dev *hdev)
1501{
1502 if (!lmp_le_capable(hdev))
1503 return MGMT_STATUS_NOT_SUPPORTED;
1504 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1505 return MGMT_STATUS_REJECTED;
1506 else
1507 return MGMT_STATUS_SUCCESS;
1508}
1509
1510static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1511 int err)
1512{
1513 struct mgmt_pending_cmd *cmd = data;
1514
1515 bt_dev_dbg(hdev, "err %d", err);
1516
1517 /* Make sure cmd still outstanding. */
1518 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1519 return;
1520
1521 hci_dev_lock(hdev);
1522
1523 if (err) {
1524 u8 mgmt_err = mgmt_status(err);
1525 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1526 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1527 goto done;
1528 }
1529
1530 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1531 hdev->discov_timeout > 0) {
1532 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1533 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1534 }
1535
1536 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1537 new_settings(hdev, cmd->sk);
1538
1539done:
1540 mgmt_pending_remove(cmd);
1541 hci_dev_unlock(hdev);
1542}
1543
1544static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1545{
1546 BT_DBG("%s", hdev->name);
1547
1548 return hci_update_discoverable_sync(hdev);
1549}
1550
1551static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1552 u16 len)
1553{
1554 struct mgmt_cp_set_discoverable *cp = data;
1555 struct mgmt_pending_cmd *cmd;
1556 u16 timeout;
1557 int err;
1558
1559 bt_dev_dbg(hdev, "sock %p", sk);
1560
1561 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1562 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1563 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1564 MGMT_STATUS_REJECTED);
1565
1566 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_INVALID_PARAMS);
1569
1570 timeout = __le16_to_cpu(cp->timeout);
1571
1572 /* Disabling discoverable requires that no timeout is set,
1573 * and enabling limited discoverable requires a timeout.
1574 */
1575 if ((cp->val == 0x00 && timeout > 0) ||
1576 (cp->val == 0x02 && timeout == 0))
1577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1578 MGMT_STATUS_INVALID_PARAMS);
1579
1580 hci_dev_lock(hdev);
1581
1582 if (!hdev_is_powered(hdev) && timeout > 0) {
1583 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1584 MGMT_STATUS_NOT_POWERED);
1585 goto failed;
1586 }
1587
1588 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1589 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1591 MGMT_STATUS_BUSY);
1592 goto failed;
1593 }
1594
1595 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1596 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1597 MGMT_STATUS_REJECTED);
1598 goto failed;
1599 }
1600
1601 if (hdev->advertising_paused) {
1602 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1603 MGMT_STATUS_BUSY);
1604 goto failed;
1605 }
1606
1607 if (!hdev_is_powered(hdev)) {
1608 bool changed = false;
1609
1610 /* Setting limited discoverable when powered off is
1611 * not a valid operation since it requires a timeout
1612 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1613 */
1614 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1615 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1616 changed = true;
1617 }
1618
1619 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1620 if (err < 0)
1621 goto failed;
1622
1623 if (changed)
1624 err = new_settings(hdev, sk);
1625
1626 goto failed;
1627 }
1628
1629 /* If the current mode is the same, then just update the timeout
1630 * value with the new value. And if only the timeout gets updated,
1631 * then no need for any HCI transactions.
1632 */
1633 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1634 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1635 HCI_LIMITED_DISCOVERABLE)) {
1636 cancel_delayed_work(&hdev->discov_off);
1637 hdev->discov_timeout = timeout;
1638
1639 if (cp->val && hdev->discov_timeout > 0) {
1640 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1641 queue_delayed_work(hdev->req_workqueue,
1642 &hdev->discov_off, to);
1643 }
1644
1645 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1646 goto failed;
1647 }
1648
1649 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1650 if (!cmd) {
1651 err = -ENOMEM;
1652 goto failed;
1653 }
1654
1655 /* Cancel any potential discoverable timeout that might be
1656 * still active and store new timeout value. The arming of
1657 * the timeout happens in the complete handler.
1658 */
1659 cancel_delayed_work(&hdev->discov_off);
1660 hdev->discov_timeout = timeout;
1661
1662 if (cp->val)
1663 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1664 else
1665 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1666
1667 /* Limited discoverable mode */
1668 if (cp->val == 0x02)
1669 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1670 else
1671 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1672
1673 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1674 mgmt_set_discoverable_complete);
1675
1676 if (err < 0)
1677 mgmt_pending_remove(cmd);
1678
1679failed:
1680 hci_dev_unlock(hdev);
1681 return err;
1682}
1683
1684static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1685 int err)
1686{
1687 struct mgmt_pending_cmd *cmd = data;
1688
1689 bt_dev_dbg(hdev, "err %d", err);
1690
1691 /* Make sure cmd still outstanding. */
1692 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1693 return;
1694
1695 hci_dev_lock(hdev);
1696
1697 if (err) {
1698 u8 mgmt_err = mgmt_status(err);
1699 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1700 goto done;
1701 }
1702
1703 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1704 new_settings(hdev, cmd->sk);
1705
1706done:
1707 if (cmd)
1708 mgmt_pending_remove(cmd);
1709
1710 hci_dev_unlock(hdev);
1711}
1712
1713static int set_connectable_update_settings(struct hci_dev *hdev,
1714 struct sock *sk, u8 val)
1715{
1716 bool changed = false;
1717 int err;
1718
1719 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1720 changed = true;
1721
1722 if (val) {
1723 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1724 } else {
1725 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1726 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1727 }
1728
1729 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1730 if (err < 0)
1731 return err;
1732
1733 if (changed) {
1734 hci_update_scan(hdev);
1735 hci_update_passive_scan(hdev);
1736 return new_settings(hdev, sk);
1737 }
1738
1739 return 0;
1740}
1741
1742static int set_connectable_sync(struct hci_dev *hdev, void *data)
1743{
1744 BT_DBG("%s", hdev->name);
1745
1746 return hci_update_connectable_sync(hdev);
1747}
1748
1749static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1750 u16 len)
1751{
1752 struct mgmt_mode *cp = data;
1753 struct mgmt_pending_cmd *cmd;
1754 int err;
1755
1756 bt_dev_dbg(hdev, "sock %p", sk);
1757
1758 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1759 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1761 MGMT_STATUS_REJECTED);
1762
1763 if (cp->val != 0x00 && cp->val != 0x01)
1764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1765 MGMT_STATUS_INVALID_PARAMS);
1766
1767 hci_dev_lock(hdev);
1768
1769 if (!hdev_is_powered(hdev)) {
1770 err = set_connectable_update_settings(hdev, sk, cp->val);
1771 goto failed;
1772 }
1773
1774 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1775 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1776 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1777 MGMT_STATUS_BUSY);
1778 goto failed;
1779 }
1780
1781 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1782 if (!cmd) {
1783 err = -ENOMEM;
1784 goto failed;
1785 }
1786
1787 if (cp->val) {
1788 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1789 } else {
1790 if (hdev->discov_timeout > 0)
1791 cancel_delayed_work(&hdev->discov_off);
1792
1793 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1794 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1795 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1796 }
1797
1798 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1799 mgmt_set_connectable_complete);
1800
1801 if (err < 0)
1802 mgmt_pending_remove(cmd);
1803
1804failed:
1805 hci_dev_unlock(hdev);
1806 return err;
1807}
1808
1809static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1810 u16 len)
1811{
1812 struct mgmt_mode *cp = data;
1813 bool changed;
1814 int err;
1815
1816 bt_dev_dbg(hdev, "sock %p", sk);
1817
1818 if (cp->val != 0x00 && cp->val != 0x01)
1819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1820 MGMT_STATUS_INVALID_PARAMS);
1821
1822 hci_dev_lock(hdev);
1823
1824 if (cp->val)
1825 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1826 else
1827 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1828
1829 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1830 if (err < 0)
1831 goto unlock;
1832
1833 if (changed) {
1834 /* In limited privacy mode the change of bondable mode
1835 * may affect the local advertising address.
1836 */
1837 hci_update_discoverable(hdev);
1838
1839 err = new_settings(hdev, sk);
1840 }
1841
1842unlock:
1843 hci_dev_unlock(hdev);
1844 return err;
1845}
1846
1847static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1848 u16 len)
1849{
1850 struct mgmt_mode *cp = data;
1851 struct mgmt_pending_cmd *cmd;
1852 u8 val, status;
1853 int err;
1854
1855 bt_dev_dbg(hdev, "sock %p", sk);
1856
1857 status = mgmt_bredr_support(hdev);
1858 if (status)
1859 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1860 status);
1861
1862 if (cp->val != 0x00 && cp->val != 0x01)
1863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 MGMT_STATUS_INVALID_PARAMS);
1865
1866 hci_dev_lock(hdev);
1867
1868 if (!hdev_is_powered(hdev)) {
1869 bool changed = false;
1870
1871 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1872 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1873 changed = true;
1874 }
1875
1876 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1877 if (err < 0)
1878 goto failed;
1879
1880 if (changed)
1881 err = new_settings(hdev, sk);
1882
1883 goto failed;
1884 }
1885
1886 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1887 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1888 MGMT_STATUS_BUSY);
1889 goto failed;
1890 }
1891
1892 val = !!cp->val;
1893
1894 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1895 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1896 goto failed;
1897 }
1898
1899 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1900 if (!cmd) {
1901 err = -ENOMEM;
1902 goto failed;
1903 }
1904
1905 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1906 if (err < 0) {
1907 mgmt_pending_remove(cmd);
1908 goto failed;
1909 }
1910
1911failed:
1912 hci_dev_unlock(hdev);
1913 return err;
1914}
1915
1916static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1917{
1918 struct cmd_lookup match = { NULL, hdev };
1919 struct mgmt_pending_cmd *cmd = data;
1920 struct mgmt_mode *cp = cmd->param;
1921 u8 enable = cp->val;
1922 bool changed;
1923
1924 /* Make sure cmd still outstanding. */
1925 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1926 return;
1927
1928 if (err) {
1929 u8 mgmt_err = mgmt_status(err);
1930
1931 if (enable && hci_dev_test_and_clear_flag(hdev,
1932 HCI_SSP_ENABLED)) {
1933 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1934 new_settings(hdev, NULL);
1935 }
1936
1937 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1938 &mgmt_err);
1939 return;
1940 }
1941
1942 if (enable) {
1943 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1944 } else {
1945 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1946
1947 if (!changed)
1948 changed = hci_dev_test_and_clear_flag(hdev,
1949 HCI_HS_ENABLED);
1950 else
1951 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1952 }
1953
1954 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1955
1956 if (changed)
1957 new_settings(hdev, match.sk);
1958
1959 if (match.sk)
1960 sock_put(match.sk);
1961
1962 hci_update_eir_sync(hdev);
1963}
1964
1965static int set_ssp_sync(struct hci_dev *hdev, void *data)
1966{
1967 struct mgmt_pending_cmd *cmd = data;
1968 struct mgmt_mode *cp = cmd->param;
1969 bool changed = false;
1970 int err;
1971
1972 if (cp->val)
1973 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1974
1975 err = hci_write_ssp_mode_sync(hdev, cp->val);
1976
1977 if (!err && changed)
1978 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1979
1980 return err;
1981}
1982
1983static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1984{
1985 struct mgmt_mode *cp = data;
1986 struct mgmt_pending_cmd *cmd;
1987 u8 status;
1988 int err;
1989
1990 bt_dev_dbg(hdev, "sock %p", sk);
1991
1992 status = mgmt_bredr_support(hdev);
1993 if (status)
1994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1995
1996 if (!lmp_ssp_capable(hdev))
1997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1998 MGMT_STATUS_NOT_SUPPORTED);
1999
2000 if (cp->val != 0x00 && cp->val != 0x01)
2001 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2002 MGMT_STATUS_INVALID_PARAMS);
2003
2004 hci_dev_lock(hdev);
2005
2006 if (!hdev_is_powered(hdev)) {
2007 bool changed;
2008
2009 if (cp->val) {
2010 changed = !hci_dev_test_and_set_flag(hdev,
2011 HCI_SSP_ENABLED);
2012 } else {
2013 changed = hci_dev_test_and_clear_flag(hdev,
2014 HCI_SSP_ENABLED);
2015 if (!changed)
2016 changed = hci_dev_test_and_clear_flag(hdev,
2017 HCI_HS_ENABLED);
2018 else
2019 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2020 }
2021
2022 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2023 if (err < 0)
2024 goto failed;
2025
2026 if (changed)
2027 err = new_settings(hdev, sk);
2028
2029 goto failed;
2030 }
2031
2032 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2033 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2034 MGMT_STATUS_BUSY);
2035 goto failed;
2036 }
2037
2038 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2039 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2040 goto failed;
2041 }
2042
2043 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2044 if (!cmd)
2045 err = -ENOMEM;
2046 else
2047 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2048 set_ssp_complete);
2049
2050 if (err < 0) {
2051 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2052 MGMT_STATUS_FAILED);
2053
2054 if (cmd)
2055 mgmt_pending_remove(cmd);
2056 }
2057
2058failed:
2059 hci_dev_unlock(hdev);
2060 return err;
2061}
2062
2063static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2064{
2065 struct mgmt_mode *cp = data;
2066 bool changed;
2067 u8 status;
2068 int err;
2069
2070 bt_dev_dbg(hdev, "sock %p", sk);
2071
2072 if (!IS_ENABLED(CONFIG_BT_HS))
2073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074 MGMT_STATUS_NOT_SUPPORTED);
2075
2076 status = mgmt_bredr_support(hdev);
2077 if (status)
2078 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2079
2080 if (!lmp_ssp_capable(hdev))
2081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2082 MGMT_STATUS_NOT_SUPPORTED);
2083
2084 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2085 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2086 MGMT_STATUS_REJECTED);
2087
2088 if (cp->val != 0x00 && cp->val != 0x01)
2089 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2090 MGMT_STATUS_INVALID_PARAMS);
2091
2092 hci_dev_lock(hdev);
2093
2094 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2095 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2096 MGMT_STATUS_BUSY);
2097 goto unlock;
2098 }
2099
2100 if (cp->val) {
2101 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2102 } else {
2103 if (hdev_is_powered(hdev)) {
2104 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2105 MGMT_STATUS_REJECTED);
2106 goto unlock;
2107 }
2108
2109 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2110 }
2111
2112 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2113 if (err < 0)
2114 goto unlock;
2115
2116 if (changed)
2117 err = new_settings(hdev, sk);
2118
2119unlock:
2120 hci_dev_unlock(hdev);
2121 return err;
2122}
2123
2124static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2125{
2126 struct cmd_lookup match = { NULL, hdev };
2127 u8 status = mgmt_status(err);
2128
2129 bt_dev_dbg(hdev, "err %d", err);
2130
2131 if (status) {
2132 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2133 &status);
2134 return;
2135 }
2136
2137 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2138
2139 new_settings(hdev, match.sk);
2140
2141 if (match.sk)
2142 sock_put(match.sk);
2143}
2144
2145static int set_le_sync(struct hci_dev *hdev, void *data)
2146{
2147 struct mgmt_pending_cmd *cmd = data;
2148 struct mgmt_mode *cp = cmd->param;
2149 u8 val = !!cp->val;
2150 int err;
2151
2152 if (!val) {
2153 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2154
2155 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2156 hci_disable_advertising_sync(hdev);
2157
2158 if (ext_adv_capable(hdev))
2159 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2160 } else {
2161 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2162 }
2163
2164 err = hci_write_le_host_supported_sync(hdev, val, 0);
2165
2166 /* Make sure the controller has a good default for
2167 * advertising data. Restrict the update to when LE
2168 * has actually been enabled. During power on, the
2169 * update in powered_update_hci will take care of it.
2170 */
2171 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2172 if (ext_adv_capable(hdev)) {
2173 int status;
2174
2175 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2176 if (!status)
2177 hci_update_scan_rsp_data_sync(hdev, 0x00);
2178 } else {
2179 hci_update_adv_data_sync(hdev, 0x00);
2180 hci_update_scan_rsp_data_sync(hdev, 0x00);
2181 }
2182
2183 hci_update_passive_scan(hdev);
2184 }
2185
2186 return err;
2187}
2188
2189static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2190{
2191 struct mgmt_pending_cmd *cmd = data;
2192 u8 status = mgmt_status(err);
2193 struct sock *sk = cmd->sk;
2194
2195 if (status) {
2196 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2197 cmd_status_rsp, &status);
2198 return;
2199 }
2200
2201 mgmt_pending_remove(cmd);
2202 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2203}
2204
2205static int set_mesh_sync(struct hci_dev *hdev, void *data)
2206{
2207 struct mgmt_pending_cmd *cmd = data;
2208 struct mgmt_cp_set_mesh *cp = cmd->param;
2209 size_t len = cmd->param_len;
2210
2211 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2212
2213 if (cp->enable)
2214 hci_dev_set_flag(hdev, HCI_MESH);
2215 else
2216 hci_dev_clear_flag(hdev, HCI_MESH);
2217
2218 len -= sizeof(*cp);
2219
2220 /* If filters don't fit, forward all adv pkts */
2221 if (len <= sizeof(hdev->mesh_ad_types))
2222 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2223
2224 hci_update_passive_scan_sync(hdev);
2225 return 0;
2226}
2227
2228static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2229{
2230 struct mgmt_cp_set_mesh *cp = data;
2231 struct mgmt_pending_cmd *cmd;
2232 int err = 0;
2233
2234 bt_dev_dbg(hdev, "sock %p", sk);
2235
2236 if (!lmp_le_capable(hdev) ||
2237 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2238 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2239 MGMT_STATUS_NOT_SUPPORTED);
2240
2241 if (cp->enable != 0x00 && cp->enable != 0x01)
2242 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2243 MGMT_STATUS_INVALID_PARAMS);
2244
2245 hci_dev_lock(hdev);
2246
2247 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2248 if (!cmd)
2249 err = -ENOMEM;
2250 else
2251 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2252 set_mesh_complete);
2253
2254 if (err < 0) {
2255 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2256 MGMT_STATUS_FAILED);
2257
2258 if (cmd)
2259 mgmt_pending_remove(cmd);
2260 }
2261
2262 hci_dev_unlock(hdev);
2263 return err;
2264}
2265
2266static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2267{
2268 struct mgmt_mesh_tx *mesh_tx = data;
2269 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2270 unsigned long mesh_send_interval;
2271 u8 mgmt_err = mgmt_status(err);
2272
2273 /* Report any errors here, but don't report completion */
2274
2275 if (mgmt_err) {
2276 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2277 /* Send Complete Error Code for handle */
2278 mesh_send_complete(hdev, mesh_tx, false);
2279 return;
2280 }
2281
2282 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2283 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2284 mesh_send_interval);
2285}
2286
2287static int mesh_send_sync(struct hci_dev *hdev, void *data)
2288{
2289 struct mgmt_mesh_tx *mesh_tx = data;
2290 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2291 struct adv_info *adv, *next_instance;
2292 u8 instance = hdev->le_num_of_adv_sets + 1;
2293 u16 timeout, duration;
2294 int err = 0;
2295
2296 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2297 return MGMT_STATUS_BUSY;
2298
2299 timeout = 1000;
2300 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2301 adv = hci_add_adv_instance(hdev, instance, 0,
2302 send->adv_data_len, send->adv_data,
2303 0, NULL,
2304 timeout, duration,
2305 HCI_ADV_TX_POWER_NO_PREFERENCE,
2306 hdev->le_adv_min_interval,
2307 hdev->le_adv_max_interval,
2308 mesh_tx->handle);
2309
2310 if (!IS_ERR(adv))
2311 mesh_tx->instance = instance;
2312 else
2313 err = PTR_ERR(adv);
2314
2315 if (hdev->cur_adv_instance == instance) {
2316 /* If the currently advertised instance is being changed then
2317 * cancel the current advertising and schedule the next
2318 * instance. If there is only one instance then the overridden
2319 * advertising data will be visible right away.
2320 */
2321 cancel_adv_timeout(hdev);
2322
2323 next_instance = hci_get_next_instance(hdev, instance);
2324 if (next_instance)
2325 instance = next_instance->instance;
2326 else
2327 instance = 0;
2328 } else if (hdev->adv_instance_timeout) {
2329 /* Immediately advertise the new instance if no other, or
2330 * let it go naturally from queue if ADV is already happening
2331 */
2332 instance = 0;
2333 }
2334
2335 if (instance)
2336 return hci_schedule_adv_instance_sync(hdev, instance, true);
2337
2338 return err;
2339}
2340
2341static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2342{
2343 struct mgmt_rp_mesh_read_features *rp = data;
2344
2345 if (rp->used_handles >= rp->max_handles)
2346 return;
2347
2348 rp->handles[rp->used_handles++] = mesh_tx->handle;
2349}
2350
2351static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2352 void *data, u16 len)
2353{
2354 struct mgmt_rp_mesh_read_features rp;
2355
2356 if (!lmp_le_capable(hdev) ||
2357 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2358 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2359 MGMT_STATUS_NOT_SUPPORTED);
2360
2361 memset(&rp, 0, sizeof(rp));
2362 rp.index = cpu_to_le16(hdev->id);
2363 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2364 rp.max_handles = MESH_HANDLES_MAX;
2365
2366 hci_dev_lock(hdev);
2367
2368 if (rp.max_handles)
2369 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2370
2371 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2372 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2373
2374 hci_dev_unlock(hdev);
2375 return 0;
2376}
2377
2378static int send_cancel(struct hci_dev *hdev, void *data)
2379{
2380 struct mgmt_pending_cmd *cmd = data;
2381 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2382 struct mgmt_mesh_tx *mesh_tx;
2383
2384 if (!cancel->handle) {
2385 do {
2386 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2387
2388 if (mesh_tx)
2389 mesh_send_complete(hdev, mesh_tx, false);
2390 } while (mesh_tx);
2391 } else {
2392 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2393
2394 if (mesh_tx && mesh_tx->sk == cmd->sk)
2395 mesh_send_complete(hdev, mesh_tx, false);
2396 }
2397
2398 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2399 0, NULL, 0);
2400 mgmt_pending_free(cmd);
2401
2402 return 0;
2403}
2404
2405static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2406 void *data, u16 len)
2407{
2408 struct mgmt_pending_cmd *cmd;
2409 int err;
2410
2411 if (!lmp_le_capable(hdev) ||
2412 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2414 MGMT_STATUS_NOT_SUPPORTED);
2415
2416 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2417 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2418 MGMT_STATUS_REJECTED);
2419
2420 hci_dev_lock(hdev);
2421 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2422 if (!cmd)
2423 err = -ENOMEM;
2424 else
2425 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2426
2427 if (err < 0) {
2428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2429 MGMT_STATUS_FAILED);
2430
2431 if (cmd)
2432 mgmt_pending_free(cmd);
2433 }
2434
2435 hci_dev_unlock(hdev);
2436 return err;
2437}
2438
2439static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2440{
2441 struct mgmt_mesh_tx *mesh_tx;
2442 struct mgmt_cp_mesh_send *send = data;
2443 struct mgmt_rp_mesh_read_features rp;
2444 bool sending;
2445 int err = 0;
2446
2447 if (!lmp_le_capable(hdev) ||
2448 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2450 MGMT_STATUS_NOT_SUPPORTED);
2451 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2452 len <= MGMT_MESH_SEND_SIZE ||
2453 len > (MGMT_MESH_SEND_SIZE + 31))
2454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2455 MGMT_STATUS_REJECTED);
2456
2457 hci_dev_lock(hdev);
2458
2459 memset(&rp, 0, sizeof(rp));
2460 rp.max_handles = MESH_HANDLES_MAX;
2461
2462 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2463
2464 if (rp.max_handles <= rp.used_handles) {
2465 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2466 MGMT_STATUS_BUSY);
2467 goto done;
2468 }
2469
2470 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2471 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2472
2473 if (!mesh_tx)
2474 err = -ENOMEM;
2475 else if (!sending)
2476 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2477 mesh_send_start_complete);
2478
2479 if (err < 0) {
2480 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2481 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2482 MGMT_STATUS_FAILED);
2483
2484 if (mesh_tx) {
2485 if (sending)
2486 mgmt_mesh_remove(mesh_tx);
2487 }
2488 } else {
2489 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2490
2491 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2492 &mesh_tx->handle, 1);
2493 }
2494
2495done:
2496 hci_dev_unlock(hdev);
2497 return err;
2498}
2499
2500static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2501{
2502 struct mgmt_mode *cp = data;
2503 struct mgmt_pending_cmd *cmd;
2504 int err;
2505 u8 val, enabled;
2506
2507 bt_dev_dbg(hdev, "sock %p", sk);
2508
2509 if (!lmp_le_capable(hdev))
2510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2511 MGMT_STATUS_NOT_SUPPORTED);
2512
2513 if (cp->val != 0x00 && cp->val != 0x01)
2514 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2515 MGMT_STATUS_INVALID_PARAMS);
2516
2517 /* Bluetooth single mode LE only controllers or dual-mode
2518 * controllers configured as LE only devices, do not allow
2519 * switching LE off. These have either LE enabled explicitly
2520 * or BR/EDR has been previously switched off.
2521 *
2522 * When trying to enable an already enabled LE, then gracefully
2523 * send a positive response. Trying to disable it however will
2524 * result into rejection.
2525 */
2526 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2527 if (cp->val == 0x01)
2528 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2529
2530 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2531 MGMT_STATUS_REJECTED);
2532 }
2533
2534 hci_dev_lock(hdev);
2535
2536 val = !!cp->val;
2537 enabled = lmp_host_le_capable(hdev);
2538
2539 if (!hdev_is_powered(hdev) || val == enabled) {
2540 bool changed = false;
2541
2542 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2543 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2544 changed = true;
2545 }
2546
2547 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2548 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2549 changed = true;
2550 }
2551
2552 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2553 if (err < 0)
2554 goto unlock;
2555
2556 if (changed)
2557 err = new_settings(hdev, sk);
2558
2559 goto unlock;
2560 }
2561
2562 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2563 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2564 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2565 MGMT_STATUS_BUSY);
2566 goto unlock;
2567 }
2568
2569 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2570 if (!cmd)
2571 err = -ENOMEM;
2572 else
2573 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2574 set_le_complete);
2575
2576 if (err < 0) {
2577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2578 MGMT_STATUS_FAILED);
2579
2580 if (cmd)
2581 mgmt_pending_remove(cmd);
2582 }
2583
2584unlock:
2585 hci_dev_unlock(hdev);
2586 return err;
2587}
2588
2589/* This is a helper function to test for pending mgmt commands that can
2590 * cause CoD or EIR HCI commands. We can only allow one such pending
2591 * mgmt command at a time since otherwise we cannot easily track what
2592 * the current values are, will be, and based on that calculate if a new
2593 * HCI command needs to be sent and if yes with what value.
2594 */
2595static bool pending_eir_or_class(struct hci_dev *hdev)
2596{
2597 struct mgmt_pending_cmd *cmd;
2598
2599 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2600 switch (cmd->opcode) {
2601 case MGMT_OP_ADD_UUID:
2602 case MGMT_OP_REMOVE_UUID:
2603 case MGMT_OP_SET_DEV_CLASS:
2604 case MGMT_OP_SET_POWERED:
2605 return true;
2606 }
2607 }
2608
2609 return false;
2610}
2611
2612static const u8 bluetooth_base_uuid[] = {
2613 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2614 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2615};
2616
2617static u8 get_uuid_size(const u8 *uuid)
2618{
2619 u32 val;
2620
2621 if (memcmp(uuid, bluetooth_base_uuid, 12))
2622 return 128;
2623
2624 val = get_unaligned_le32(&uuid[12]);
2625 if (val > 0xffff)
2626 return 32;
2627
2628 return 16;
2629}
2630
2631static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2632{
2633 struct mgmt_pending_cmd *cmd = data;
2634
2635 bt_dev_dbg(hdev, "err %d", err);
2636
2637 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2638 mgmt_status(err), hdev->dev_class, 3);
2639
2640 mgmt_pending_free(cmd);
2641}
2642
2643static int add_uuid_sync(struct hci_dev *hdev, void *data)
2644{
2645 int err;
2646
2647 err = hci_update_class_sync(hdev);
2648 if (err)
2649 return err;
2650
2651 return hci_update_eir_sync(hdev);
2652}
2653
2654static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2655{
2656 struct mgmt_cp_add_uuid *cp = data;
2657 struct mgmt_pending_cmd *cmd;
2658 struct bt_uuid *uuid;
2659 int err;
2660
2661 bt_dev_dbg(hdev, "sock %p", sk);
2662
2663 hci_dev_lock(hdev);
2664
2665 if (pending_eir_or_class(hdev)) {
2666 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2667 MGMT_STATUS_BUSY);
2668 goto failed;
2669 }
2670
2671 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2672 if (!uuid) {
2673 err = -ENOMEM;
2674 goto failed;
2675 }
2676
2677 memcpy(uuid->uuid, cp->uuid, 16);
2678 uuid->svc_hint = cp->svc_hint;
2679 uuid->size = get_uuid_size(cp->uuid);
2680
2681 list_add_tail(&uuid->list, &hdev->uuids);
2682
2683 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2684 if (!cmd) {
2685 err = -ENOMEM;
2686 goto failed;
2687 }
2688
2689 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2690 if (err < 0) {
2691 mgmt_pending_free(cmd);
2692 goto failed;
2693 }
2694
2695failed:
2696 hci_dev_unlock(hdev);
2697 return err;
2698}
2699
2700static bool enable_service_cache(struct hci_dev *hdev)
2701{
2702 if (!hdev_is_powered(hdev))
2703 return false;
2704
2705 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2706 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2707 CACHE_TIMEOUT);
2708 return true;
2709 }
2710
2711 return false;
2712}
2713
2714static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2715{
2716 int err;
2717
2718 err = hci_update_class_sync(hdev);
2719 if (err)
2720 return err;
2721
2722 return hci_update_eir_sync(hdev);
2723}
2724
2725static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2726 u16 len)
2727{
2728 struct mgmt_cp_remove_uuid *cp = data;
2729 struct mgmt_pending_cmd *cmd;
2730 struct bt_uuid *match, *tmp;
2731 static const u8 bt_uuid_any[] = {
2732 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2733 };
2734 int err, found;
2735
2736 bt_dev_dbg(hdev, "sock %p", sk);
2737
2738 hci_dev_lock(hdev);
2739
2740 if (pending_eir_or_class(hdev)) {
2741 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2742 MGMT_STATUS_BUSY);
2743 goto unlock;
2744 }
2745
2746 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2747 hci_uuids_clear(hdev);
2748
2749 if (enable_service_cache(hdev)) {
2750 err = mgmt_cmd_complete(sk, hdev->id,
2751 MGMT_OP_REMOVE_UUID,
2752 0, hdev->dev_class, 3);
2753 goto unlock;
2754 }
2755
2756 goto update_class;
2757 }
2758
2759 found = 0;
2760
2761 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2762 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2763 continue;
2764
2765 list_del(&match->list);
2766 kfree(match);
2767 found++;
2768 }
2769
2770 if (found == 0) {
2771 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2772 MGMT_STATUS_INVALID_PARAMS);
2773 goto unlock;
2774 }
2775
2776update_class:
2777 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2778 if (!cmd) {
2779 err = -ENOMEM;
2780 goto unlock;
2781 }
2782
2783 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2784 mgmt_class_complete);
2785 if (err < 0)
2786 mgmt_pending_free(cmd);
2787
2788unlock:
2789 hci_dev_unlock(hdev);
2790 return err;
2791}
2792
2793static int set_class_sync(struct hci_dev *hdev, void *data)
2794{
2795 int err = 0;
2796
2797 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2798 cancel_delayed_work_sync(&hdev->service_cache);
2799 err = hci_update_eir_sync(hdev);
2800 }
2801
2802 if (err)
2803 return err;
2804
2805 return hci_update_class_sync(hdev);
2806}
2807
2808static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2809 u16 len)
2810{
2811 struct mgmt_cp_set_dev_class *cp = data;
2812 struct mgmt_pending_cmd *cmd;
2813 int err;
2814
2815 bt_dev_dbg(hdev, "sock %p", sk);
2816
2817 if (!lmp_bredr_capable(hdev))
2818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2819 MGMT_STATUS_NOT_SUPPORTED);
2820
2821 hci_dev_lock(hdev);
2822
2823 if (pending_eir_or_class(hdev)) {
2824 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2825 MGMT_STATUS_BUSY);
2826 goto unlock;
2827 }
2828
2829 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2830 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2831 MGMT_STATUS_INVALID_PARAMS);
2832 goto unlock;
2833 }
2834
2835 hdev->major_class = cp->major;
2836 hdev->minor_class = cp->minor;
2837
2838 if (!hdev_is_powered(hdev)) {
2839 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2840 hdev->dev_class, 3);
2841 goto unlock;
2842 }
2843
2844 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2845 if (!cmd) {
2846 err = -ENOMEM;
2847 goto unlock;
2848 }
2849
2850 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2851 mgmt_class_complete);
2852 if (err < 0)
2853 mgmt_pending_free(cmd);
2854
2855unlock:
2856 hci_dev_unlock(hdev);
2857 return err;
2858}
2859
2860static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2861 u16 len)
2862{
2863 struct mgmt_cp_load_link_keys *cp = data;
2864 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2865 sizeof(struct mgmt_link_key_info));
2866 u16 key_count, expected_len;
2867 bool changed;
2868 int i;
2869
2870 bt_dev_dbg(hdev, "sock %p", sk);
2871
2872 if (!lmp_bredr_capable(hdev))
2873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2874 MGMT_STATUS_NOT_SUPPORTED);
2875
2876 key_count = __le16_to_cpu(cp->key_count);
2877 if (key_count > max_key_count) {
2878 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2879 key_count);
2880 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2881 MGMT_STATUS_INVALID_PARAMS);
2882 }
2883
2884 expected_len = struct_size(cp, keys, key_count);
2885 if (expected_len != len) {
2886 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2887 expected_len, len);
2888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2889 MGMT_STATUS_INVALID_PARAMS);
2890 }
2891
2892 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2894 MGMT_STATUS_INVALID_PARAMS);
2895
2896 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2897 key_count);
2898
2899 for (i = 0; i < key_count; i++) {
2900 struct mgmt_link_key_info *key = &cp->keys[i];
2901
2902 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2903 if (key->type > 0x08)
2904 return mgmt_cmd_status(sk, hdev->id,
2905 MGMT_OP_LOAD_LINK_KEYS,
2906 MGMT_STATUS_INVALID_PARAMS);
2907 }
2908
2909 hci_dev_lock(hdev);
2910
2911 hci_link_keys_clear(hdev);
2912
2913 if (cp->debug_keys)
2914 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2915 else
2916 changed = hci_dev_test_and_clear_flag(hdev,
2917 HCI_KEEP_DEBUG_KEYS);
2918
2919 if (changed)
2920 new_settings(hdev, NULL);
2921
2922 for (i = 0; i < key_count; i++) {
2923 struct mgmt_link_key_info *key = &cp->keys[i];
2924
2925 if (hci_is_blocked_key(hdev,
2926 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2927 key->val)) {
2928 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2929 &key->addr.bdaddr);
2930 continue;
2931 }
2932
2933 /* Always ignore debug keys and require a new pairing if
2934 * the user wants to use them.
2935 */
2936 if (key->type == HCI_LK_DEBUG_COMBINATION)
2937 continue;
2938
2939 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2940 key->type, key->pin_len, NULL);
2941 }
2942
2943 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2944
2945 hci_dev_unlock(hdev);
2946
2947 return 0;
2948}
2949
2950static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2951 u8 addr_type, struct sock *skip_sk)
2952{
2953 struct mgmt_ev_device_unpaired ev;
2954
2955 bacpy(&ev.addr.bdaddr, bdaddr);
2956 ev.addr.type = addr_type;
2957
2958 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2959 skip_sk);
2960}
2961
2962static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2963{
2964 struct mgmt_pending_cmd *cmd = data;
2965 struct mgmt_cp_unpair_device *cp = cmd->param;
2966
2967 if (!err)
2968 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2969
2970 cmd->cmd_complete(cmd, err);
2971 mgmt_pending_free(cmd);
2972}
2973
2974static int unpair_device_sync(struct hci_dev *hdev, void *data)
2975{
2976 struct mgmt_pending_cmd *cmd = data;
2977 struct mgmt_cp_unpair_device *cp = cmd->param;
2978 struct hci_conn *conn;
2979
2980 if (cp->addr.type == BDADDR_BREDR)
2981 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2982 &cp->addr.bdaddr);
2983 else
2984 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2985 le_addr_type(cp->addr.type));
2986
2987 if (!conn)
2988 return 0;
2989
2990 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2991}
2992
2993static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2994 u16 len)
2995{
2996 struct mgmt_cp_unpair_device *cp = data;
2997 struct mgmt_rp_unpair_device rp;
2998 struct hci_conn_params *params;
2999 struct mgmt_pending_cmd *cmd;
3000 struct hci_conn *conn;
3001 u8 addr_type;
3002 int err;
3003
3004 memset(&rp, 0, sizeof(rp));
3005 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3006 rp.addr.type = cp->addr.type;
3007
3008 if (!bdaddr_type_is_valid(cp->addr.type))
3009 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3010 MGMT_STATUS_INVALID_PARAMS,
3011 &rp, sizeof(rp));
3012
3013 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3014 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3015 MGMT_STATUS_INVALID_PARAMS,
3016 &rp, sizeof(rp));
3017
3018 hci_dev_lock(hdev);
3019
3020 if (!hdev_is_powered(hdev)) {
3021 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3022 MGMT_STATUS_NOT_POWERED, &rp,
3023 sizeof(rp));
3024 goto unlock;
3025 }
3026
3027 if (cp->addr.type == BDADDR_BREDR) {
3028 /* If disconnection is requested, then look up the
3029 * connection. If the remote device is connected, it
3030 * will be later used to terminate the link.
3031 *
3032 * Setting it to NULL explicitly will cause no
3033 * termination of the link.
3034 */
3035 if (cp->disconnect)
3036 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3037 &cp->addr.bdaddr);
3038 else
3039 conn = NULL;
3040
3041 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3042 if (err < 0) {
3043 err = mgmt_cmd_complete(sk, hdev->id,
3044 MGMT_OP_UNPAIR_DEVICE,
3045 MGMT_STATUS_NOT_PAIRED, &rp,
3046 sizeof(rp));
3047 goto unlock;
3048 }
3049
3050 goto done;
3051 }
3052
3053 /* LE address type */
3054 addr_type = le_addr_type(cp->addr.type);
3055
3056 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3057 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3058 if (err < 0) {
3059 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3060 MGMT_STATUS_NOT_PAIRED, &rp,
3061 sizeof(rp));
3062 goto unlock;
3063 }
3064
3065 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3066 if (!conn) {
3067 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3068 goto done;
3069 }
3070
3071
3072 /* Defer clearing up the connection parameters until closing to
3073 * give a chance of keeping them if a repairing happens.
3074 */
3075 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3076
3077 /* Disable auto-connection parameters if present */
3078 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3079 if (params) {
3080 if (params->explicit_connect)
3081 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3082 else
3083 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3084 }
3085
3086 /* If disconnection is not requested, then clear the connection
3087 * variable so that the link is not terminated.
3088 */
3089 if (!cp->disconnect)
3090 conn = NULL;
3091
3092done:
3093 /* If the connection variable is set, then termination of the
3094 * link is requested.
3095 */
3096 if (!conn) {
3097 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3098 &rp, sizeof(rp));
3099 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3100 goto unlock;
3101 }
3102
3103 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3104 sizeof(*cp));
3105 if (!cmd) {
3106 err = -ENOMEM;
3107 goto unlock;
3108 }
3109
3110 cmd->cmd_complete = addr_cmd_complete;
3111
3112 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3113 unpair_device_complete);
3114 if (err < 0)
3115 mgmt_pending_free(cmd);
3116
3117unlock:
3118 hci_dev_unlock(hdev);
3119 return err;
3120}
3121
3122static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3123 u16 len)
3124{
3125 struct mgmt_cp_disconnect *cp = data;
3126 struct mgmt_rp_disconnect rp;
3127 struct mgmt_pending_cmd *cmd;
3128 struct hci_conn *conn;
3129 int err;
3130
3131 bt_dev_dbg(hdev, "sock %p", sk);
3132
3133 memset(&rp, 0, sizeof(rp));
3134 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3135 rp.addr.type = cp->addr.type;
3136
3137 if (!bdaddr_type_is_valid(cp->addr.type))
3138 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3139 MGMT_STATUS_INVALID_PARAMS,
3140 &rp, sizeof(rp));
3141
3142 hci_dev_lock(hdev);
3143
3144 if (!test_bit(HCI_UP, &hdev->flags)) {
3145 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3146 MGMT_STATUS_NOT_POWERED, &rp,
3147 sizeof(rp));
3148 goto failed;
3149 }
3150
3151 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3152 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3153 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3154 goto failed;
3155 }
3156
3157 if (cp->addr.type == BDADDR_BREDR)
3158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3159 &cp->addr.bdaddr);
3160 else
3161 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3162 le_addr_type(cp->addr.type));
3163
3164 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3165 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3166 MGMT_STATUS_NOT_CONNECTED, &rp,
3167 sizeof(rp));
3168 goto failed;
3169 }
3170
3171 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3172 if (!cmd) {
3173 err = -ENOMEM;
3174 goto failed;
3175 }
3176
3177 cmd->cmd_complete = generic_cmd_complete;
3178
3179 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3180 if (err < 0)
3181 mgmt_pending_remove(cmd);
3182
3183failed:
3184 hci_dev_unlock(hdev);
3185 return err;
3186}
3187
3188static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3189{
3190 switch (link_type) {
3191 case LE_LINK:
3192 switch (addr_type) {
3193 case ADDR_LE_DEV_PUBLIC:
3194 return BDADDR_LE_PUBLIC;
3195
3196 default:
3197 /* Fallback to LE Random address type */
3198 return BDADDR_LE_RANDOM;
3199 }
3200
3201 default:
3202 /* Fallback to BR/EDR type */
3203 return BDADDR_BREDR;
3204 }
3205}
3206
3207static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3208 u16 data_len)
3209{
3210 struct mgmt_rp_get_connections *rp;
3211 struct hci_conn *c;
3212 int err;
3213 u16 i;
3214
3215 bt_dev_dbg(hdev, "sock %p", sk);
3216
3217 hci_dev_lock(hdev);
3218
3219 if (!hdev_is_powered(hdev)) {
3220 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3221 MGMT_STATUS_NOT_POWERED);
3222 goto unlock;
3223 }
3224
3225 i = 0;
3226 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3227 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3228 i++;
3229 }
3230
3231 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3232 if (!rp) {
3233 err = -ENOMEM;
3234 goto unlock;
3235 }
3236
3237 i = 0;
3238 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3239 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3240 continue;
3241 bacpy(&rp->addr[i].bdaddr, &c->dst);
3242 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3243 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3244 continue;
3245 i++;
3246 }
3247
3248 rp->conn_count = cpu_to_le16(i);
3249
3250 /* Recalculate length in case of filtered SCO connections, etc */
3251 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3252 struct_size(rp, addr, i));
3253
3254 kfree(rp);
3255
3256unlock:
3257 hci_dev_unlock(hdev);
3258 return err;
3259}
3260
3261static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3262 struct mgmt_cp_pin_code_neg_reply *cp)
3263{
3264 struct mgmt_pending_cmd *cmd;
3265 int err;
3266
3267 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3268 sizeof(*cp));
3269 if (!cmd)
3270 return -ENOMEM;
3271
3272 cmd->cmd_complete = addr_cmd_complete;
3273
3274 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3275 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3276 if (err < 0)
3277 mgmt_pending_remove(cmd);
3278
3279 return err;
3280}
3281
3282static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3283 u16 len)
3284{
3285 struct hci_conn *conn;
3286 struct mgmt_cp_pin_code_reply *cp = data;
3287 struct hci_cp_pin_code_reply reply;
3288 struct mgmt_pending_cmd *cmd;
3289 int err;
3290
3291 bt_dev_dbg(hdev, "sock %p", sk);
3292
3293 hci_dev_lock(hdev);
3294
3295 if (!hdev_is_powered(hdev)) {
3296 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3297 MGMT_STATUS_NOT_POWERED);
3298 goto failed;
3299 }
3300
3301 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3302 if (!conn) {
3303 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3304 MGMT_STATUS_NOT_CONNECTED);
3305 goto failed;
3306 }
3307
3308 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3309 struct mgmt_cp_pin_code_neg_reply ncp;
3310
3311 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3312
3313 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3314
3315 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3316 if (err >= 0)
3317 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3318 MGMT_STATUS_INVALID_PARAMS);
3319
3320 goto failed;
3321 }
3322
3323 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3324 if (!cmd) {
3325 err = -ENOMEM;
3326 goto failed;
3327 }
3328
3329 cmd->cmd_complete = addr_cmd_complete;
3330
3331 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3332 reply.pin_len = cp->pin_len;
3333 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3334
3335 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3336 if (err < 0)
3337 mgmt_pending_remove(cmd);
3338
3339failed:
3340 hci_dev_unlock(hdev);
3341 return err;
3342}
3343
3344static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3345 u16 len)
3346{
3347 struct mgmt_cp_set_io_capability *cp = data;
3348
3349 bt_dev_dbg(hdev, "sock %p", sk);
3350
3351 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3352 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3353 MGMT_STATUS_INVALID_PARAMS);
3354
3355 hci_dev_lock(hdev);
3356
3357 hdev->io_capability = cp->io_capability;
3358
3359 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3360
3361 hci_dev_unlock(hdev);
3362
3363 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3364 NULL, 0);
3365}
3366
3367static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3368{
3369 struct hci_dev *hdev = conn->hdev;
3370 struct mgmt_pending_cmd *cmd;
3371
3372 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3373 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3374 continue;
3375
3376 if (cmd->user_data != conn)
3377 continue;
3378
3379 return cmd;
3380 }
3381
3382 return NULL;
3383}
3384
3385static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3386{
3387 struct mgmt_rp_pair_device rp;
3388 struct hci_conn *conn = cmd->user_data;
3389 int err;
3390
3391 bacpy(&rp.addr.bdaddr, &conn->dst);
3392 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3393
3394 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3395 status, &rp, sizeof(rp));
3396
3397 /* So we don't get further callbacks for this connection */
3398 conn->connect_cfm_cb = NULL;
3399 conn->security_cfm_cb = NULL;
3400 conn->disconn_cfm_cb = NULL;
3401
3402 hci_conn_drop(conn);
3403
3404 /* The device is paired so there is no need to remove
3405 * its connection parameters anymore.
3406 */
3407 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3408
3409 hci_conn_put(conn);
3410
3411 return err;
3412}
3413
3414void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3415{
3416 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3417 struct mgmt_pending_cmd *cmd;
3418
3419 cmd = find_pairing(conn);
3420 if (cmd) {
3421 cmd->cmd_complete(cmd, status);
3422 mgmt_pending_remove(cmd);
3423 }
3424}
3425
3426static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3427{
3428 struct mgmt_pending_cmd *cmd;
3429
3430 BT_DBG("status %u", status);
3431
3432 cmd = find_pairing(conn);
3433 if (!cmd) {
3434 BT_DBG("Unable to find a pending command");
3435 return;
3436 }
3437
3438 cmd->cmd_complete(cmd, mgmt_status(status));
3439 mgmt_pending_remove(cmd);
3440}
3441
3442static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3443{
3444 struct mgmt_pending_cmd *cmd;
3445
3446 BT_DBG("status %u", status);
3447
3448 if (!status)
3449 return;
3450
3451 cmd = find_pairing(conn);
3452 if (!cmd) {
3453 BT_DBG("Unable to find a pending command");
3454 return;
3455 }
3456
3457 cmd->cmd_complete(cmd, mgmt_status(status));
3458 mgmt_pending_remove(cmd);
3459}
3460
3461static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3462 u16 len)
3463{
3464 struct mgmt_cp_pair_device *cp = data;
3465 struct mgmt_rp_pair_device rp;
3466 struct mgmt_pending_cmd *cmd;
3467 u8 sec_level, auth_type;
3468 struct hci_conn *conn;
3469 int err;
3470
3471 bt_dev_dbg(hdev, "sock %p", sk);
3472
3473 memset(&rp, 0, sizeof(rp));
3474 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3475 rp.addr.type = cp->addr.type;
3476
3477 if (!bdaddr_type_is_valid(cp->addr.type))
3478 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3479 MGMT_STATUS_INVALID_PARAMS,
3480 &rp, sizeof(rp));
3481
3482 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3483 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3484 MGMT_STATUS_INVALID_PARAMS,
3485 &rp, sizeof(rp));
3486
3487 hci_dev_lock(hdev);
3488
3489 if (!hdev_is_powered(hdev)) {
3490 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3491 MGMT_STATUS_NOT_POWERED, &rp,
3492 sizeof(rp));
3493 goto unlock;
3494 }
3495
3496 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3497 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3498 MGMT_STATUS_ALREADY_PAIRED, &rp,
3499 sizeof(rp));
3500 goto unlock;
3501 }
3502
3503 sec_level = BT_SECURITY_MEDIUM;
3504 auth_type = HCI_AT_DEDICATED_BONDING;
3505
3506 if (cp->addr.type == BDADDR_BREDR) {
3507 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3508 auth_type, CONN_REASON_PAIR_DEVICE);
3509 } else {
3510 u8 addr_type = le_addr_type(cp->addr.type);
3511 struct hci_conn_params *p;
3512
3513 /* When pairing a new device, it is expected to remember
3514 * this device for future connections. Adding the connection
3515 * parameter information ahead of time allows tracking
3516 * of the peripheral preferred values and will speed up any
3517 * further connection establishment.
3518 *
3519 * If connection parameters already exist, then they
3520 * will be kept and this function does nothing.
3521 */
3522 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3523
3524 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3525 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3526
3527 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3528 sec_level, HCI_LE_CONN_TIMEOUT,
3529 CONN_REASON_PAIR_DEVICE);
3530 }
3531
3532 if (IS_ERR(conn)) {
3533 int status;
3534
3535 if (PTR_ERR(conn) == -EBUSY)
3536 status = MGMT_STATUS_BUSY;
3537 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3538 status = MGMT_STATUS_NOT_SUPPORTED;
3539 else if (PTR_ERR(conn) == -ECONNREFUSED)
3540 status = MGMT_STATUS_REJECTED;
3541 else
3542 status = MGMT_STATUS_CONNECT_FAILED;
3543
3544 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3545 status, &rp, sizeof(rp));
3546 goto unlock;
3547 }
3548
3549 if (conn->connect_cfm_cb) {
3550 hci_conn_drop(conn);
3551 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3552 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3553 goto unlock;
3554 }
3555
3556 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3557 if (!cmd) {
3558 err = -ENOMEM;
3559 hci_conn_drop(conn);
3560 goto unlock;
3561 }
3562
3563 cmd->cmd_complete = pairing_complete;
3564
3565 /* For LE, just connecting isn't a proof that the pairing finished */
3566 if (cp->addr.type == BDADDR_BREDR) {
3567 conn->connect_cfm_cb = pairing_complete_cb;
3568 conn->security_cfm_cb = pairing_complete_cb;
3569 conn->disconn_cfm_cb = pairing_complete_cb;
3570 } else {
3571 conn->connect_cfm_cb = le_pairing_complete_cb;
3572 conn->security_cfm_cb = le_pairing_complete_cb;
3573 conn->disconn_cfm_cb = le_pairing_complete_cb;
3574 }
3575
3576 conn->io_capability = cp->io_cap;
3577 cmd->user_data = hci_conn_get(conn);
3578
3579 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3580 hci_conn_security(conn, sec_level, auth_type, true)) {
3581 cmd->cmd_complete(cmd, 0);
3582 mgmt_pending_remove(cmd);
3583 }
3584
3585 err = 0;
3586
3587unlock:
3588 hci_dev_unlock(hdev);
3589 return err;
3590}
3591
3592static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3593 u16 len)
3594{
3595 struct mgmt_addr_info *addr = data;
3596 struct mgmt_pending_cmd *cmd;
3597 struct hci_conn *conn;
3598 int err;
3599
3600 bt_dev_dbg(hdev, "sock %p", sk);
3601
3602 hci_dev_lock(hdev);
3603
3604 if (!hdev_is_powered(hdev)) {
3605 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3606 MGMT_STATUS_NOT_POWERED);
3607 goto unlock;
3608 }
3609
3610 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3611 if (!cmd) {
3612 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3613 MGMT_STATUS_INVALID_PARAMS);
3614 goto unlock;
3615 }
3616
3617 conn = cmd->user_data;
3618
3619 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3620 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3621 MGMT_STATUS_INVALID_PARAMS);
3622 goto unlock;
3623 }
3624
3625 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3626 mgmt_pending_remove(cmd);
3627
3628 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3629 addr, sizeof(*addr));
3630
3631 /* Since user doesn't want to proceed with the connection, abort any
3632 * ongoing pairing and then terminate the link if it was created
3633 * because of the pair device action.
3634 */
3635 if (addr->type == BDADDR_BREDR)
3636 hci_remove_link_key(hdev, &addr->bdaddr);
3637 else
3638 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3639 le_addr_type(addr->type));
3640
3641 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3642 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3643
3644unlock:
3645 hci_dev_unlock(hdev);
3646 return err;
3647}
3648
3649static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3650 struct mgmt_addr_info *addr, u16 mgmt_op,
3651 u16 hci_op, __le32 passkey)
3652{
3653 struct mgmt_pending_cmd *cmd;
3654 struct hci_conn *conn;
3655 int err;
3656
3657 hci_dev_lock(hdev);
3658
3659 if (!hdev_is_powered(hdev)) {
3660 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3661 MGMT_STATUS_NOT_POWERED, addr,
3662 sizeof(*addr));
3663 goto done;
3664 }
3665
3666 if (addr->type == BDADDR_BREDR)
3667 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3668 else
3669 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3670 le_addr_type(addr->type));
3671
3672 if (!conn) {
3673 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3674 MGMT_STATUS_NOT_CONNECTED, addr,
3675 sizeof(*addr));
3676 goto done;
3677 }
3678
3679 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3680 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3681 if (!err)
3682 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3683 MGMT_STATUS_SUCCESS, addr,
3684 sizeof(*addr));
3685 else
3686 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3687 MGMT_STATUS_FAILED, addr,
3688 sizeof(*addr));
3689
3690 goto done;
3691 }
3692
3693 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3694 if (!cmd) {
3695 err = -ENOMEM;
3696 goto done;
3697 }
3698
3699 cmd->cmd_complete = addr_cmd_complete;
3700
3701 /* Continue with pairing via HCI */
3702 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3703 struct hci_cp_user_passkey_reply cp;
3704
3705 bacpy(&cp.bdaddr, &addr->bdaddr);
3706 cp.passkey = passkey;
3707 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3708 } else
3709 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3710 &addr->bdaddr);
3711
3712 if (err < 0)
3713 mgmt_pending_remove(cmd);
3714
3715done:
3716 hci_dev_unlock(hdev);
3717 return err;
3718}
3719
3720static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3721 void *data, u16 len)
3722{
3723 struct mgmt_cp_pin_code_neg_reply *cp = data;
3724
3725 bt_dev_dbg(hdev, "sock %p", sk);
3726
3727 return user_pairing_resp(sk, hdev, &cp->addr,
3728 MGMT_OP_PIN_CODE_NEG_REPLY,
3729 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3730}
3731
3732static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3733 u16 len)
3734{
3735 struct mgmt_cp_user_confirm_reply *cp = data;
3736
3737 bt_dev_dbg(hdev, "sock %p", sk);
3738
3739 if (len != sizeof(*cp))
3740 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3741 MGMT_STATUS_INVALID_PARAMS);
3742
3743 return user_pairing_resp(sk, hdev, &cp->addr,
3744 MGMT_OP_USER_CONFIRM_REPLY,
3745 HCI_OP_USER_CONFIRM_REPLY, 0);
3746}
3747
3748static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3749 void *data, u16 len)
3750{
3751 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3752
3753 bt_dev_dbg(hdev, "sock %p", sk);
3754
3755 return user_pairing_resp(sk, hdev, &cp->addr,
3756 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3757 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3758}
3759
3760static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3761 u16 len)
3762{
3763 struct mgmt_cp_user_passkey_reply *cp = data;
3764
3765 bt_dev_dbg(hdev, "sock %p", sk);
3766
3767 return user_pairing_resp(sk, hdev, &cp->addr,
3768 MGMT_OP_USER_PASSKEY_REPLY,
3769 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3770}
3771
3772static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3773 void *data, u16 len)
3774{
3775 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3776
3777 bt_dev_dbg(hdev, "sock %p", sk);
3778
3779 return user_pairing_resp(sk, hdev, &cp->addr,
3780 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3781 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3782}
3783
3784static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3785{
3786 struct adv_info *adv_instance;
3787
3788 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3789 if (!adv_instance)
3790 return 0;
3791
3792 /* stop if current instance doesn't need to be changed */
3793 if (!(adv_instance->flags & flags))
3794 return 0;
3795
3796 cancel_adv_timeout(hdev);
3797
3798 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3799 if (!adv_instance)
3800 return 0;
3801
3802 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3803
3804 return 0;
3805}
3806
3807static int name_changed_sync(struct hci_dev *hdev, void *data)
3808{
3809 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3810}
3811
3812static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3813{
3814 struct mgmt_pending_cmd *cmd = data;
3815 struct mgmt_cp_set_local_name *cp = cmd->param;
3816 u8 status = mgmt_status(err);
3817
3818 bt_dev_dbg(hdev, "err %d", err);
3819
3820 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3821 return;
3822
3823 if (status) {
3824 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3825 status);
3826 } else {
3827 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3828 cp, sizeof(*cp));
3829
3830 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3831 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3832 }
3833
3834 mgmt_pending_remove(cmd);
3835}
3836
3837static int set_name_sync(struct hci_dev *hdev, void *data)
3838{
3839 if (lmp_bredr_capable(hdev)) {
3840 hci_update_name_sync(hdev);
3841 hci_update_eir_sync(hdev);
3842 }
3843
3844 /* The name is stored in the scan response data and so
3845 * no need to update the advertising data here.
3846 */
3847 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3848 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3849
3850 return 0;
3851}
3852
3853static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3854 u16 len)
3855{
3856 struct mgmt_cp_set_local_name *cp = data;
3857 struct mgmt_pending_cmd *cmd;
3858 int err;
3859
3860 bt_dev_dbg(hdev, "sock %p", sk);
3861
3862 hci_dev_lock(hdev);
3863
3864 /* If the old values are the same as the new ones just return a
3865 * direct command complete event.
3866 */
3867 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3868 !memcmp(hdev->short_name, cp->short_name,
3869 sizeof(hdev->short_name))) {
3870 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3871 data, len);
3872 goto failed;
3873 }
3874
3875 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3876
3877 if (!hdev_is_powered(hdev)) {
3878 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3879
3880 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3881 data, len);
3882 if (err < 0)
3883 goto failed;
3884
3885 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3886 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3887 ext_info_changed(hdev, sk);
3888
3889 goto failed;
3890 }
3891
3892 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3893 if (!cmd)
3894 err = -ENOMEM;
3895 else
3896 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3897 set_name_complete);
3898
3899 if (err < 0) {
3900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3901 MGMT_STATUS_FAILED);
3902
3903 if (cmd)
3904 mgmt_pending_remove(cmd);
3905
3906 goto failed;
3907 }
3908
3909 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3910
3911failed:
3912 hci_dev_unlock(hdev);
3913 return err;
3914}
3915
3916static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3917{
3918 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3919}
3920
3921static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3922 u16 len)
3923{
3924 struct mgmt_cp_set_appearance *cp = data;
3925 u16 appearance;
3926 int err;
3927
3928 bt_dev_dbg(hdev, "sock %p", sk);
3929
3930 if (!lmp_le_capable(hdev))
3931 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3932 MGMT_STATUS_NOT_SUPPORTED);
3933
3934 appearance = le16_to_cpu(cp->appearance);
3935
3936 hci_dev_lock(hdev);
3937
3938 if (hdev->appearance != appearance) {
3939 hdev->appearance = appearance;
3940
3941 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3942 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3943 NULL);
3944
3945 ext_info_changed(hdev, sk);
3946 }
3947
3948 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3949 0);
3950
3951 hci_dev_unlock(hdev);
3952
3953 return err;
3954}
3955
3956static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3957 void *data, u16 len)
3958{
3959 struct mgmt_rp_get_phy_configuration rp;
3960
3961 bt_dev_dbg(hdev, "sock %p", sk);
3962
3963 hci_dev_lock(hdev);
3964
3965 memset(&rp, 0, sizeof(rp));
3966
3967 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3968 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3969 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3970
3971 hci_dev_unlock(hdev);
3972
3973 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3974 &rp, sizeof(rp));
3975}
3976
3977int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3978{
3979 struct mgmt_ev_phy_configuration_changed ev;
3980
3981 memset(&ev, 0, sizeof(ev));
3982
3983 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3984
3985 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3986 sizeof(ev), skip);
3987}
3988
3989static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3990{
3991 struct mgmt_pending_cmd *cmd = data;
3992 struct sk_buff *skb = cmd->skb;
3993 u8 status = mgmt_status(err);
3994
3995 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3996 return;
3997
3998 if (!status) {
3999 if (!skb)
4000 status = MGMT_STATUS_FAILED;
4001 else if (IS_ERR(skb))
4002 status = mgmt_status(PTR_ERR(skb));
4003 else
4004 status = mgmt_status(skb->data[0]);
4005 }
4006
4007 bt_dev_dbg(hdev, "status %d", status);
4008
4009 if (status) {
4010 mgmt_cmd_status(cmd->sk, hdev->id,
4011 MGMT_OP_SET_PHY_CONFIGURATION, status);
4012 } else {
4013 mgmt_cmd_complete(cmd->sk, hdev->id,
4014 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4015 NULL, 0);
4016
4017 mgmt_phy_configuration_changed(hdev, cmd->sk);
4018 }
4019
4020 if (skb && !IS_ERR(skb))
4021 kfree_skb(skb);
4022
4023 mgmt_pending_remove(cmd);
4024}
4025
4026static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4027{
4028 struct mgmt_pending_cmd *cmd = data;
4029 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4030 struct hci_cp_le_set_default_phy cp_phy;
4031 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4032
4033 memset(&cp_phy, 0, sizeof(cp_phy));
4034
4035 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4036 cp_phy.all_phys |= 0x01;
4037
4038 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4039 cp_phy.all_phys |= 0x02;
4040
4041 if (selected_phys & MGMT_PHY_LE_1M_TX)
4042 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4043
4044 if (selected_phys & MGMT_PHY_LE_2M_TX)
4045 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4046
4047 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4048 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4049
4050 if (selected_phys & MGMT_PHY_LE_1M_RX)
4051 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4052
4053 if (selected_phys & MGMT_PHY_LE_2M_RX)
4054 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4055
4056 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4057 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4058
4059 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4060 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4061
4062 return 0;
4063}
4064
4065static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4066 void *data, u16 len)
4067{
4068 struct mgmt_cp_set_phy_configuration *cp = data;
4069 struct mgmt_pending_cmd *cmd;
4070 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4071 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4072 bool changed = false;
4073 int err;
4074
4075 bt_dev_dbg(hdev, "sock %p", sk);
4076
4077 configurable_phys = get_configurable_phys(hdev);
4078 supported_phys = get_supported_phys(hdev);
4079 selected_phys = __le32_to_cpu(cp->selected_phys);
4080
4081 if (selected_phys & ~supported_phys)
4082 return mgmt_cmd_status(sk, hdev->id,
4083 MGMT_OP_SET_PHY_CONFIGURATION,
4084 MGMT_STATUS_INVALID_PARAMS);
4085
4086 unconfigure_phys = supported_phys & ~configurable_phys;
4087
4088 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4089 return mgmt_cmd_status(sk, hdev->id,
4090 MGMT_OP_SET_PHY_CONFIGURATION,
4091 MGMT_STATUS_INVALID_PARAMS);
4092
4093 if (selected_phys == get_selected_phys(hdev))
4094 return mgmt_cmd_complete(sk, hdev->id,
4095 MGMT_OP_SET_PHY_CONFIGURATION,
4096 0, NULL, 0);
4097
4098 hci_dev_lock(hdev);
4099
4100 if (!hdev_is_powered(hdev)) {
4101 err = mgmt_cmd_status(sk, hdev->id,
4102 MGMT_OP_SET_PHY_CONFIGURATION,
4103 MGMT_STATUS_REJECTED);
4104 goto unlock;
4105 }
4106
4107 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4108 err = mgmt_cmd_status(sk, hdev->id,
4109 MGMT_OP_SET_PHY_CONFIGURATION,
4110 MGMT_STATUS_BUSY);
4111 goto unlock;
4112 }
4113
4114 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4115 pkt_type |= (HCI_DH3 | HCI_DM3);
4116 else
4117 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4118
4119 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4120 pkt_type |= (HCI_DH5 | HCI_DM5);
4121 else
4122 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4123
4124 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4125 pkt_type &= ~HCI_2DH1;
4126 else
4127 pkt_type |= HCI_2DH1;
4128
4129 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4130 pkt_type &= ~HCI_2DH3;
4131 else
4132 pkt_type |= HCI_2DH3;
4133
4134 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4135 pkt_type &= ~HCI_2DH5;
4136 else
4137 pkt_type |= HCI_2DH5;
4138
4139 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4140 pkt_type &= ~HCI_3DH1;
4141 else
4142 pkt_type |= HCI_3DH1;
4143
4144 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4145 pkt_type &= ~HCI_3DH3;
4146 else
4147 pkt_type |= HCI_3DH3;
4148
4149 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4150 pkt_type &= ~HCI_3DH5;
4151 else
4152 pkt_type |= HCI_3DH5;
4153
4154 if (pkt_type != hdev->pkt_type) {
4155 hdev->pkt_type = pkt_type;
4156 changed = true;
4157 }
4158
4159 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4160 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4161 if (changed)
4162 mgmt_phy_configuration_changed(hdev, sk);
4163
4164 err = mgmt_cmd_complete(sk, hdev->id,
4165 MGMT_OP_SET_PHY_CONFIGURATION,
4166 0, NULL, 0);
4167
4168 goto unlock;
4169 }
4170
4171 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4172 len);
4173 if (!cmd)
4174 err = -ENOMEM;
4175 else
4176 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4177 set_default_phy_complete);
4178
4179 if (err < 0) {
4180 err = mgmt_cmd_status(sk, hdev->id,
4181 MGMT_OP_SET_PHY_CONFIGURATION,
4182 MGMT_STATUS_FAILED);
4183
4184 if (cmd)
4185 mgmt_pending_remove(cmd);
4186 }
4187
4188unlock:
4189 hci_dev_unlock(hdev);
4190
4191 return err;
4192}
4193
4194static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4195 u16 len)
4196{
4197 int err = MGMT_STATUS_SUCCESS;
4198 struct mgmt_cp_set_blocked_keys *keys = data;
4199 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4200 sizeof(struct mgmt_blocked_key_info));
4201 u16 key_count, expected_len;
4202 int i;
4203
4204 bt_dev_dbg(hdev, "sock %p", sk);
4205
4206 key_count = __le16_to_cpu(keys->key_count);
4207 if (key_count > max_key_count) {
4208 bt_dev_err(hdev, "too big key_count value %u", key_count);
4209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4210 MGMT_STATUS_INVALID_PARAMS);
4211 }
4212
4213 expected_len = struct_size(keys, keys, key_count);
4214 if (expected_len != len) {
4215 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4216 expected_len, len);
4217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4218 MGMT_STATUS_INVALID_PARAMS);
4219 }
4220
4221 hci_dev_lock(hdev);
4222
4223 hci_blocked_keys_clear(hdev);
4224
4225 for (i = 0; i < key_count; ++i) {
4226 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4227
4228 if (!b) {
4229 err = MGMT_STATUS_NO_RESOURCES;
4230 break;
4231 }
4232
4233 b->type = keys->keys[i].type;
4234 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4235 list_add_rcu(&b->list, &hdev->blocked_keys);
4236 }
4237 hci_dev_unlock(hdev);
4238
4239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4240 err, NULL, 0);
4241}
4242
4243static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4244 void *data, u16 len)
4245{
4246 struct mgmt_mode *cp = data;
4247 int err;
4248 bool changed = false;
4249
4250 bt_dev_dbg(hdev, "sock %p", sk);
4251
4252 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4253 return mgmt_cmd_status(sk, hdev->id,
4254 MGMT_OP_SET_WIDEBAND_SPEECH,
4255 MGMT_STATUS_NOT_SUPPORTED);
4256
4257 if (cp->val != 0x00 && cp->val != 0x01)
4258 return mgmt_cmd_status(sk, hdev->id,
4259 MGMT_OP_SET_WIDEBAND_SPEECH,
4260 MGMT_STATUS_INVALID_PARAMS);
4261
4262 hci_dev_lock(hdev);
4263
4264 if (hdev_is_powered(hdev) &&
4265 !!cp->val != hci_dev_test_flag(hdev,
4266 HCI_WIDEBAND_SPEECH_ENABLED)) {
4267 err = mgmt_cmd_status(sk, hdev->id,
4268 MGMT_OP_SET_WIDEBAND_SPEECH,
4269 MGMT_STATUS_REJECTED);
4270 goto unlock;
4271 }
4272
4273 if (cp->val)
4274 changed = !hci_dev_test_and_set_flag(hdev,
4275 HCI_WIDEBAND_SPEECH_ENABLED);
4276 else
4277 changed = hci_dev_test_and_clear_flag(hdev,
4278 HCI_WIDEBAND_SPEECH_ENABLED);
4279
4280 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4281 if (err < 0)
4282 goto unlock;
4283
4284 if (changed)
4285 err = new_settings(hdev, sk);
4286
4287unlock:
4288 hci_dev_unlock(hdev);
4289 return err;
4290}
4291
4292static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4293 void *data, u16 data_len)
4294{
4295 char buf[20];
4296 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4297 u16 cap_len = 0;
4298 u8 flags = 0;
4299 u8 tx_power_range[2];
4300
4301 bt_dev_dbg(hdev, "sock %p", sk);
4302
4303 memset(&buf, 0, sizeof(buf));
4304
4305 hci_dev_lock(hdev);
4306
4307 /* When the Read Simple Pairing Options command is supported, then
4308 * the remote public key validation is supported.
4309 *
4310 * Alternatively, when Microsoft extensions are available, they can
4311 * indicate support for public key validation as well.
4312 */
4313 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4314 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4315
4316 flags |= 0x02; /* Remote public key validation (LE) */
4317
4318 /* When the Read Encryption Key Size command is supported, then the
4319 * encryption key size is enforced.
4320 */
4321 if (hdev->commands[20] & 0x10)
4322 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4323
4324 flags |= 0x08; /* Encryption key size enforcement (LE) */
4325
4326 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4327 &flags, 1);
4328
4329 /* When the Read Simple Pairing Options command is supported, then
4330 * also max encryption key size information is provided.
4331 */
4332 if (hdev->commands[41] & 0x08)
4333 cap_len = eir_append_le16(rp->cap, cap_len,
4334 MGMT_CAP_MAX_ENC_KEY_SIZE,
4335 hdev->max_enc_key_size);
4336
4337 cap_len = eir_append_le16(rp->cap, cap_len,
4338 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4339 SMP_MAX_ENC_KEY_SIZE);
4340
4341 /* Append the min/max LE tx power parameters if we were able to fetch
4342 * it from the controller
4343 */
4344 if (hdev->commands[38] & 0x80) {
4345 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4346 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4347 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4348 tx_power_range, 2);
4349 }
4350
4351 rp->cap_len = cpu_to_le16(cap_len);
4352
4353 hci_dev_unlock(hdev);
4354
4355 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4356 rp, sizeof(*rp) + cap_len);
4357}
4358
4359#ifdef CONFIG_BT_FEATURE_DEBUG
4360/* d4992530-b9ec-469f-ab01-6c481c47da1c */
4361static const u8 debug_uuid[16] = {
4362 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4363 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4364};
4365#endif
4366
4367/* 330859bc-7506-492d-9370-9a6f0614037f */
4368static const u8 quality_report_uuid[16] = {
4369 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4370 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4371};
4372
4373/* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4374static const u8 offload_codecs_uuid[16] = {
4375 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4376 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4377};
4378
4379/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4380static const u8 le_simultaneous_roles_uuid[16] = {
4381 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4382 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4383};
4384
4385/* 15c0a148-c273-11ea-b3de-0242ac130004 */
4386static const u8 rpa_resolution_uuid[16] = {
4387 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4388 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4389};
4390
4391/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4392static const u8 iso_socket_uuid[16] = {
4393 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4394 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4395};
4396
4397/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4398static const u8 mgmt_mesh_uuid[16] = {
4399 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4400 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4401};
4402
4403static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4404 void *data, u16 data_len)
4405{
4406 struct mgmt_rp_read_exp_features_info *rp;
4407 size_t len;
4408 u16 idx = 0;
4409 u32 flags;
4410 int status;
4411
4412 bt_dev_dbg(hdev, "sock %p", sk);
4413
4414 /* Enough space for 7 features */
4415 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4416 rp = kzalloc(len, GFP_KERNEL);
4417 if (!rp)
4418 return -ENOMEM;
4419
4420#ifdef CONFIG_BT_FEATURE_DEBUG
4421 if (!hdev) {
4422 flags = bt_dbg_get() ? BIT(0) : 0;
4423
4424 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4425 rp->features[idx].flags = cpu_to_le32(flags);
4426 idx++;
4427 }
4428#endif
4429
4430 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4431 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4432 flags = BIT(0);
4433 else
4434 flags = 0;
4435
4436 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4437 rp->features[idx].flags = cpu_to_le32(flags);
4438 idx++;
4439 }
4440
4441 if (hdev && ll_privacy_capable(hdev)) {
4442 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4443 flags = BIT(0) | BIT(1);
4444 else
4445 flags = BIT(1);
4446
4447 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4448 rp->features[idx].flags = cpu_to_le32(flags);
4449 idx++;
4450 }
4451
4452 if (hdev && (aosp_has_quality_report(hdev) ||
4453 hdev->set_quality_report)) {
4454 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4455 flags = BIT(0);
4456 else
4457 flags = 0;
4458
4459 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4460 rp->features[idx].flags = cpu_to_le32(flags);
4461 idx++;
4462 }
4463
4464 if (hdev && hdev->get_data_path_id) {
4465 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4466 flags = BIT(0);
4467 else
4468 flags = 0;
4469
4470 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4471 rp->features[idx].flags = cpu_to_le32(flags);
4472 idx++;
4473 }
4474
4475 if (IS_ENABLED(CONFIG_BT_LE)) {
4476 flags = iso_enabled() ? BIT(0) : 0;
4477 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4478 rp->features[idx].flags = cpu_to_le32(flags);
4479 idx++;
4480 }
4481
4482 if (hdev && lmp_le_capable(hdev)) {
4483 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4484 flags = BIT(0);
4485 else
4486 flags = 0;
4487
4488 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4489 rp->features[idx].flags = cpu_to_le32(flags);
4490 idx++;
4491 }
4492
4493 rp->feature_count = cpu_to_le16(idx);
4494
4495 /* After reading the experimental features information, enable
4496 * the events to update client on any future change.
4497 */
4498 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4499
4500 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4501 MGMT_OP_READ_EXP_FEATURES_INFO,
4502 0, rp, sizeof(*rp) + (20 * idx));
4503
4504 kfree(rp);
4505 return status;
4506}
4507
4508static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4509 struct sock *skip)
4510{
4511 struct mgmt_ev_exp_feature_changed ev;
4512
4513 memset(&ev, 0, sizeof(ev));
4514 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4515 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4516
4517 // Do we need to be atomic with the conn_flags?
4518 if (enabled && privacy_mode_capable(hdev))
4519 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4520 else
4521 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4522
4523 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4524 &ev, sizeof(ev),
4525 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4526
4527}
4528
4529static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4530 bool enabled, struct sock *skip)
4531{
4532 struct mgmt_ev_exp_feature_changed ev;
4533
4534 memset(&ev, 0, sizeof(ev));
4535 memcpy(ev.uuid, uuid, 16);
4536 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4537
4538 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4539 &ev, sizeof(ev),
4540 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4541}
4542
4543#define EXP_FEAT(_uuid, _set_func) \
4544{ \
4545 .uuid = _uuid, \
4546 .set_func = _set_func, \
4547}
4548
4549/* The zero key uuid is special. Multiple exp features are set through it. */
4550static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4551 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4552{
4553 struct mgmt_rp_set_exp_feature rp;
4554
4555 memset(rp.uuid, 0, 16);
4556 rp.flags = cpu_to_le32(0);
4557
4558#ifdef CONFIG_BT_FEATURE_DEBUG
4559 if (!hdev) {
4560 bool changed = bt_dbg_get();
4561
4562 bt_dbg_set(false);
4563
4564 if (changed)
4565 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4566 }
4567#endif
4568
4569 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4570 bool changed;
4571
4572 changed = hci_dev_test_and_clear_flag(hdev,
4573 HCI_ENABLE_LL_PRIVACY);
4574 if (changed)
4575 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4576 sk);
4577 }
4578
4579 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4580
4581 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4582 MGMT_OP_SET_EXP_FEATURE, 0,
4583 &rp, sizeof(rp));
4584}
4585
4586#ifdef CONFIG_BT_FEATURE_DEBUG
4587static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4588 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4589{
4590 struct mgmt_rp_set_exp_feature rp;
4591
4592 bool val, changed;
4593 int err;
4594
4595 /* Command requires to use the non-controller index */
4596 if (hdev)
4597 return mgmt_cmd_status(sk, hdev->id,
4598 MGMT_OP_SET_EXP_FEATURE,
4599 MGMT_STATUS_INVALID_INDEX);
4600
4601 /* Parameters are limited to a single octet */
4602 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4603 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4604 MGMT_OP_SET_EXP_FEATURE,
4605 MGMT_STATUS_INVALID_PARAMS);
4606
4607 /* Only boolean on/off is supported */
4608 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4609 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4610 MGMT_OP_SET_EXP_FEATURE,
4611 MGMT_STATUS_INVALID_PARAMS);
4612
4613 val = !!cp->param[0];
4614 changed = val ? !bt_dbg_get() : bt_dbg_get();
4615 bt_dbg_set(val);
4616
4617 memcpy(rp.uuid, debug_uuid, 16);
4618 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4619
4620 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4621
4622 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4623 MGMT_OP_SET_EXP_FEATURE, 0,
4624 &rp, sizeof(rp));
4625
4626 if (changed)
4627 exp_feature_changed(hdev, debug_uuid, val, sk);
4628
4629 return err;
4630}
4631#endif
4632
4633static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4634 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4635{
4636 struct mgmt_rp_set_exp_feature rp;
4637 bool val, changed;
4638 int err;
4639
4640 /* Command requires to use the controller index */
4641 if (!hdev)
4642 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4643 MGMT_OP_SET_EXP_FEATURE,
4644 MGMT_STATUS_INVALID_INDEX);
4645
4646 /* Parameters are limited to a single octet */
4647 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4648 return mgmt_cmd_status(sk, hdev->id,
4649 MGMT_OP_SET_EXP_FEATURE,
4650 MGMT_STATUS_INVALID_PARAMS);
4651
4652 /* Only boolean on/off is supported */
4653 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4654 return mgmt_cmd_status(sk, hdev->id,
4655 MGMT_OP_SET_EXP_FEATURE,
4656 MGMT_STATUS_INVALID_PARAMS);
4657
4658 val = !!cp->param[0];
4659
4660 if (val) {
4661 changed = !hci_dev_test_and_set_flag(hdev,
4662 HCI_MESH_EXPERIMENTAL);
4663 } else {
4664 hci_dev_clear_flag(hdev, HCI_MESH);
4665 changed = hci_dev_test_and_clear_flag(hdev,
4666 HCI_MESH_EXPERIMENTAL);
4667 }
4668
4669 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4670 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4671
4672 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4673
4674 err = mgmt_cmd_complete(sk, hdev->id,
4675 MGMT_OP_SET_EXP_FEATURE, 0,
4676 &rp, sizeof(rp));
4677
4678 if (changed)
4679 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4680
4681 return err;
4682}
4683
4684static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4685 struct mgmt_cp_set_exp_feature *cp,
4686 u16 data_len)
4687{
4688 struct mgmt_rp_set_exp_feature rp;
4689 bool val, changed;
4690 int err;
4691 u32 flags;
4692
4693 /* Command requires to use the controller index */
4694 if (!hdev)
4695 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4696 MGMT_OP_SET_EXP_FEATURE,
4697 MGMT_STATUS_INVALID_INDEX);
4698
4699 /* Changes can only be made when controller is powered down */
4700 if (hdev_is_powered(hdev))
4701 return mgmt_cmd_status(sk, hdev->id,
4702 MGMT_OP_SET_EXP_FEATURE,
4703 MGMT_STATUS_REJECTED);
4704
4705 /* Parameters are limited to a single octet */
4706 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4707 return mgmt_cmd_status(sk, hdev->id,
4708 MGMT_OP_SET_EXP_FEATURE,
4709 MGMT_STATUS_INVALID_PARAMS);
4710
4711 /* Only boolean on/off is supported */
4712 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4713 return mgmt_cmd_status(sk, hdev->id,
4714 MGMT_OP_SET_EXP_FEATURE,
4715 MGMT_STATUS_INVALID_PARAMS);
4716
4717 val = !!cp->param[0];
4718
4719 if (val) {
4720 changed = !hci_dev_test_and_set_flag(hdev,
4721 HCI_ENABLE_LL_PRIVACY);
4722 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4723
4724 /* Enable LL privacy + supported settings changed */
4725 flags = BIT(0) | BIT(1);
4726 } else {
4727 changed = hci_dev_test_and_clear_flag(hdev,
4728 HCI_ENABLE_LL_PRIVACY);
4729
4730 /* Disable LL privacy + supported settings changed */
4731 flags = BIT(1);
4732 }
4733
4734 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4735 rp.flags = cpu_to_le32(flags);
4736
4737 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4738
4739 err = mgmt_cmd_complete(sk, hdev->id,
4740 MGMT_OP_SET_EXP_FEATURE, 0,
4741 &rp, sizeof(rp));
4742
4743 if (changed)
4744 exp_ll_privacy_feature_changed(val, hdev, sk);
4745
4746 return err;
4747}
4748
4749static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4750 struct mgmt_cp_set_exp_feature *cp,
4751 u16 data_len)
4752{
4753 struct mgmt_rp_set_exp_feature rp;
4754 bool val, changed;
4755 int err;
4756
4757 /* Command requires to use a valid controller index */
4758 if (!hdev)
4759 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4760 MGMT_OP_SET_EXP_FEATURE,
4761 MGMT_STATUS_INVALID_INDEX);
4762
4763 /* Parameters are limited to a single octet */
4764 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4765 return mgmt_cmd_status(sk, hdev->id,
4766 MGMT_OP_SET_EXP_FEATURE,
4767 MGMT_STATUS_INVALID_PARAMS);
4768
4769 /* Only boolean on/off is supported */
4770 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4771 return mgmt_cmd_status(sk, hdev->id,
4772 MGMT_OP_SET_EXP_FEATURE,
4773 MGMT_STATUS_INVALID_PARAMS);
4774
4775 hci_req_sync_lock(hdev);
4776
4777 val = !!cp->param[0];
4778 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4779
4780 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4781 err = mgmt_cmd_status(sk, hdev->id,
4782 MGMT_OP_SET_EXP_FEATURE,
4783 MGMT_STATUS_NOT_SUPPORTED);
4784 goto unlock_quality_report;
4785 }
4786
4787 if (changed) {
4788 if (hdev->set_quality_report)
4789 err = hdev->set_quality_report(hdev, val);
4790 else
4791 err = aosp_set_quality_report(hdev, val);
4792
4793 if (err) {
4794 err = mgmt_cmd_status(sk, hdev->id,
4795 MGMT_OP_SET_EXP_FEATURE,
4796 MGMT_STATUS_FAILED);
4797 goto unlock_quality_report;
4798 }
4799
4800 if (val)
4801 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4802 else
4803 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4804 }
4805
4806 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4807
4808 memcpy(rp.uuid, quality_report_uuid, 16);
4809 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4810 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4811
4812 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4813 &rp, sizeof(rp));
4814
4815 if (changed)
4816 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4817
4818unlock_quality_report:
4819 hci_req_sync_unlock(hdev);
4820 return err;
4821}
4822
4823static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4824 struct mgmt_cp_set_exp_feature *cp,
4825 u16 data_len)
4826{
4827 bool val, changed;
4828 int err;
4829 struct mgmt_rp_set_exp_feature rp;
4830
4831 /* Command requires to use a valid controller index */
4832 if (!hdev)
4833 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4834 MGMT_OP_SET_EXP_FEATURE,
4835 MGMT_STATUS_INVALID_INDEX);
4836
4837 /* Parameters are limited to a single octet */
4838 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4839 return mgmt_cmd_status(sk, hdev->id,
4840 MGMT_OP_SET_EXP_FEATURE,
4841 MGMT_STATUS_INVALID_PARAMS);
4842
4843 /* Only boolean on/off is supported */
4844 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4845 return mgmt_cmd_status(sk, hdev->id,
4846 MGMT_OP_SET_EXP_FEATURE,
4847 MGMT_STATUS_INVALID_PARAMS);
4848
4849 val = !!cp->param[0];
4850 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4851
4852 if (!hdev->get_data_path_id) {
4853 return mgmt_cmd_status(sk, hdev->id,
4854 MGMT_OP_SET_EXP_FEATURE,
4855 MGMT_STATUS_NOT_SUPPORTED);
4856 }
4857
4858 if (changed) {
4859 if (val)
4860 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4861 else
4862 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4863 }
4864
4865 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4866 val, changed);
4867
4868 memcpy(rp.uuid, offload_codecs_uuid, 16);
4869 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4870 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4871 err = mgmt_cmd_complete(sk, hdev->id,
4872 MGMT_OP_SET_EXP_FEATURE, 0,
4873 &rp, sizeof(rp));
4874
4875 if (changed)
4876 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4877
4878 return err;
4879}
4880
4881static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4882 struct mgmt_cp_set_exp_feature *cp,
4883 u16 data_len)
4884{
4885 bool val, changed;
4886 int err;
4887 struct mgmt_rp_set_exp_feature rp;
4888
4889 /* Command requires to use a valid controller index */
4890 if (!hdev)
4891 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4892 MGMT_OP_SET_EXP_FEATURE,
4893 MGMT_STATUS_INVALID_INDEX);
4894
4895 /* Parameters are limited to a single octet */
4896 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4897 return mgmt_cmd_status(sk, hdev->id,
4898 MGMT_OP_SET_EXP_FEATURE,
4899 MGMT_STATUS_INVALID_PARAMS);
4900
4901 /* Only boolean on/off is supported */
4902 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4903 return mgmt_cmd_status(sk, hdev->id,
4904 MGMT_OP_SET_EXP_FEATURE,
4905 MGMT_STATUS_INVALID_PARAMS);
4906
4907 val = !!cp->param[0];
4908 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4909
4910 if (!hci_dev_le_state_simultaneous(hdev)) {
4911 return mgmt_cmd_status(sk, hdev->id,
4912 MGMT_OP_SET_EXP_FEATURE,
4913 MGMT_STATUS_NOT_SUPPORTED);
4914 }
4915
4916 if (changed) {
4917 if (val)
4918 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4919 else
4920 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4921 }
4922
4923 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4924 val, changed);
4925
4926 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4927 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4928 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4929 err = mgmt_cmd_complete(sk, hdev->id,
4930 MGMT_OP_SET_EXP_FEATURE, 0,
4931 &rp, sizeof(rp));
4932
4933 if (changed)
4934 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4935
4936 return err;
4937}
4938
4939#ifdef CONFIG_BT_LE
4940static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4941 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4942{
4943 struct mgmt_rp_set_exp_feature rp;
4944 bool val, changed = false;
4945 int err;
4946
4947 /* Command requires to use the non-controller index */
4948 if (hdev)
4949 return mgmt_cmd_status(sk, hdev->id,
4950 MGMT_OP_SET_EXP_FEATURE,
4951 MGMT_STATUS_INVALID_INDEX);
4952
4953 /* Parameters are limited to a single octet */
4954 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4955 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4956 MGMT_OP_SET_EXP_FEATURE,
4957 MGMT_STATUS_INVALID_PARAMS);
4958
4959 /* Only boolean on/off is supported */
4960 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4961 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4962 MGMT_OP_SET_EXP_FEATURE,
4963 MGMT_STATUS_INVALID_PARAMS);
4964
4965 val = cp->param[0] ? true : false;
4966 if (val)
4967 err = iso_init();
4968 else
4969 err = iso_exit();
4970
4971 if (!err)
4972 changed = true;
4973
4974 memcpy(rp.uuid, iso_socket_uuid, 16);
4975 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4976
4977 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4978
4979 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4980 MGMT_OP_SET_EXP_FEATURE, 0,
4981 &rp, sizeof(rp));
4982
4983 if (changed)
4984 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4985
4986 return err;
4987}
4988#endif
4989
4990static const struct mgmt_exp_feature {
4991 const u8 *uuid;
4992 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4993 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4994} exp_features[] = {
4995 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4996#ifdef CONFIG_BT_FEATURE_DEBUG
4997 EXP_FEAT(debug_uuid, set_debug_func),
4998#endif
4999 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5000 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5001 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5002 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5003 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5004#ifdef CONFIG_BT_LE
5005 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5006#endif
5007
5008 /* end with a null feature */
5009 EXP_FEAT(NULL, NULL)
5010};
5011
5012static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5013 void *data, u16 data_len)
5014{
5015 struct mgmt_cp_set_exp_feature *cp = data;
5016 size_t i = 0;
5017
5018 bt_dev_dbg(hdev, "sock %p", sk);
5019
5020 for (i = 0; exp_features[i].uuid; i++) {
5021 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5022 return exp_features[i].set_func(sk, hdev, cp, data_len);
5023 }
5024
5025 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5026 MGMT_OP_SET_EXP_FEATURE,
5027 MGMT_STATUS_NOT_SUPPORTED);
5028}
5029
5030static u32 get_params_flags(struct hci_dev *hdev,
5031 struct hci_conn_params *params)
5032{
5033 u32 flags = hdev->conn_flags;
5034
5035 /* Devices using RPAs can only be programmed in the acceptlist if
5036 * LL Privacy has been enable otherwise they cannot mark
5037 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5038 */
5039 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5040 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5041 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5042
5043 return flags;
5044}
5045
5046static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5047 u16 data_len)
5048{
5049 struct mgmt_cp_get_device_flags *cp = data;
5050 struct mgmt_rp_get_device_flags rp;
5051 struct bdaddr_list_with_flags *br_params;
5052 struct hci_conn_params *params;
5053 u32 supported_flags;
5054 u32 current_flags = 0;
5055 u8 status = MGMT_STATUS_INVALID_PARAMS;
5056
5057 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5058 &cp->addr.bdaddr, cp->addr.type);
5059
5060 hci_dev_lock(hdev);
5061
5062 supported_flags = hdev->conn_flags;
5063
5064 memset(&rp, 0, sizeof(rp));
5065
5066 if (cp->addr.type == BDADDR_BREDR) {
5067 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5068 &cp->addr.bdaddr,
5069 cp->addr.type);
5070 if (!br_params)
5071 goto done;
5072
5073 current_flags = br_params->flags;
5074 } else {
5075 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5076 le_addr_type(cp->addr.type));
5077 if (!params)
5078 goto done;
5079
5080 supported_flags = get_params_flags(hdev, params);
5081 current_flags = params->flags;
5082 }
5083
5084 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5085 rp.addr.type = cp->addr.type;
5086 rp.supported_flags = cpu_to_le32(supported_flags);
5087 rp.current_flags = cpu_to_le32(current_flags);
5088
5089 status = MGMT_STATUS_SUCCESS;
5090
5091done:
5092 hci_dev_unlock(hdev);
5093
5094 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5095 &rp, sizeof(rp));
5096}
5097
5098static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5099 bdaddr_t *bdaddr, u8 bdaddr_type,
5100 u32 supported_flags, u32 current_flags)
5101{
5102 struct mgmt_ev_device_flags_changed ev;
5103
5104 bacpy(&ev.addr.bdaddr, bdaddr);
5105 ev.addr.type = bdaddr_type;
5106 ev.supported_flags = cpu_to_le32(supported_flags);
5107 ev.current_flags = cpu_to_le32(current_flags);
5108
5109 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5110}
5111
5112static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5113 u16 len)
5114{
5115 struct mgmt_cp_set_device_flags *cp = data;
5116 struct bdaddr_list_with_flags *br_params;
5117 struct hci_conn_params *params;
5118 u8 status = MGMT_STATUS_INVALID_PARAMS;
5119 u32 supported_flags;
5120 u32 current_flags = __le32_to_cpu(cp->current_flags);
5121
5122 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5123 &cp->addr.bdaddr, cp->addr.type, current_flags);
5124
5125 // We should take hci_dev_lock() early, I think.. conn_flags can change
5126 supported_flags = hdev->conn_flags;
5127
5128 if ((supported_flags | current_flags) != supported_flags) {
5129 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5130 current_flags, supported_flags);
5131 goto done;
5132 }
5133
5134 hci_dev_lock(hdev);
5135
5136 if (cp->addr.type == BDADDR_BREDR) {
5137 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5138 &cp->addr.bdaddr,
5139 cp->addr.type);
5140
5141 if (br_params) {
5142 br_params->flags = current_flags;
5143 status = MGMT_STATUS_SUCCESS;
5144 } else {
5145 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5146 &cp->addr.bdaddr, cp->addr.type);
5147 }
5148
5149 goto unlock;
5150 }
5151
5152 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5153 le_addr_type(cp->addr.type));
5154 if (!params) {
5155 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5156 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5157 goto unlock;
5158 }
5159
5160 supported_flags = get_params_flags(hdev, params);
5161
5162 if ((supported_flags | current_flags) != supported_flags) {
5163 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5164 current_flags, supported_flags);
5165 goto unlock;
5166 }
5167
5168 WRITE_ONCE(params->flags, current_flags);
5169 status = MGMT_STATUS_SUCCESS;
5170
5171 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5172 * has been set.
5173 */
5174 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5175 hci_update_passive_scan(hdev);
5176
5177unlock:
5178 hci_dev_unlock(hdev);
5179
5180done:
5181 if (status == MGMT_STATUS_SUCCESS)
5182 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5183 supported_flags, current_flags);
5184
5185 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5186 &cp->addr, sizeof(cp->addr));
5187}
5188
5189static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5190 u16 handle)
5191{
5192 struct mgmt_ev_adv_monitor_added ev;
5193
5194 ev.monitor_handle = cpu_to_le16(handle);
5195
5196 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5197}
5198
5199void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5200{
5201 struct mgmt_ev_adv_monitor_removed ev;
5202 struct mgmt_pending_cmd *cmd;
5203 struct sock *sk_skip = NULL;
5204 struct mgmt_cp_remove_adv_monitor *cp;
5205
5206 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5207 if (cmd) {
5208 cp = cmd->param;
5209
5210 if (cp->monitor_handle)
5211 sk_skip = cmd->sk;
5212 }
5213
5214 ev.monitor_handle = cpu_to_le16(handle);
5215
5216 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5217}
5218
5219static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5220 void *data, u16 len)
5221{
5222 struct adv_monitor *monitor = NULL;
5223 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5224 int handle, err;
5225 size_t rp_size = 0;
5226 __u32 supported = 0;
5227 __u32 enabled = 0;
5228 __u16 num_handles = 0;
5229 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5230
5231 BT_DBG("request for %s", hdev->name);
5232
5233 hci_dev_lock(hdev);
5234
5235 if (msft_monitor_supported(hdev))
5236 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5237
5238 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5239 handles[num_handles++] = monitor->handle;
5240
5241 hci_dev_unlock(hdev);
5242
5243 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5244 rp = kmalloc(rp_size, GFP_KERNEL);
5245 if (!rp)
5246 return -ENOMEM;
5247
5248 /* All supported features are currently enabled */
5249 enabled = supported;
5250
5251 rp->supported_features = cpu_to_le32(supported);
5252 rp->enabled_features = cpu_to_le32(enabled);
5253 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5254 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5255 rp->num_handles = cpu_to_le16(num_handles);
5256 if (num_handles)
5257 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5258
5259 err = mgmt_cmd_complete(sk, hdev->id,
5260 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5261 MGMT_STATUS_SUCCESS, rp, rp_size);
5262
5263 kfree(rp);
5264
5265 return err;
5266}
5267
5268static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5269 void *data, int status)
5270{
5271 struct mgmt_rp_add_adv_patterns_monitor rp;
5272 struct mgmt_pending_cmd *cmd = data;
5273 struct adv_monitor *monitor = cmd->user_data;
5274
5275 hci_dev_lock(hdev);
5276
5277 rp.monitor_handle = cpu_to_le16(monitor->handle);
5278
5279 if (!status) {
5280 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5281 hdev->adv_monitors_cnt++;
5282 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5283 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5284 hci_update_passive_scan(hdev);
5285 }
5286
5287 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5288 mgmt_status(status), &rp, sizeof(rp));
5289 mgmt_pending_remove(cmd);
5290
5291 hci_dev_unlock(hdev);
5292 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5293 rp.monitor_handle, status);
5294}
5295
5296static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5297{
5298 struct mgmt_pending_cmd *cmd = data;
5299 struct adv_monitor *monitor = cmd->user_data;
5300
5301 return hci_add_adv_monitor(hdev, monitor);
5302}
5303
5304static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5305 struct adv_monitor *m, u8 status,
5306 void *data, u16 len, u16 op)
5307{
5308 struct mgmt_pending_cmd *cmd;
5309 int err;
5310
5311 hci_dev_lock(hdev);
5312
5313 if (status)
5314 goto unlock;
5315
5316 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5317 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5318 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5319 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5320 status = MGMT_STATUS_BUSY;
5321 goto unlock;
5322 }
5323
5324 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5325 if (!cmd) {
5326 status = MGMT_STATUS_NO_RESOURCES;
5327 goto unlock;
5328 }
5329
5330 cmd->user_data = m;
5331 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5332 mgmt_add_adv_patterns_monitor_complete);
5333 if (err) {
5334 if (err == -ENOMEM)
5335 status = MGMT_STATUS_NO_RESOURCES;
5336 else
5337 status = MGMT_STATUS_FAILED;
5338
5339 goto unlock;
5340 }
5341
5342 hci_dev_unlock(hdev);
5343
5344 return 0;
5345
5346unlock:
5347 hci_free_adv_monitor(hdev, m);
5348 hci_dev_unlock(hdev);
5349 return mgmt_cmd_status(sk, hdev->id, op, status);
5350}
5351
5352static void parse_adv_monitor_rssi(struct adv_monitor *m,
5353 struct mgmt_adv_rssi_thresholds *rssi)
5354{
5355 if (rssi) {
5356 m->rssi.low_threshold = rssi->low_threshold;
5357 m->rssi.low_threshold_timeout =
5358 __le16_to_cpu(rssi->low_threshold_timeout);
5359 m->rssi.high_threshold = rssi->high_threshold;
5360 m->rssi.high_threshold_timeout =
5361 __le16_to_cpu(rssi->high_threshold_timeout);
5362 m->rssi.sampling_period = rssi->sampling_period;
5363 } else {
5364 /* Default values. These numbers are the least constricting
5365 * parameters for MSFT API to work, so it behaves as if there
5366 * are no rssi parameter to consider. May need to be changed
5367 * if other API are to be supported.
5368 */
5369 m->rssi.low_threshold = -127;
5370 m->rssi.low_threshold_timeout = 60;
5371 m->rssi.high_threshold = -127;
5372 m->rssi.high_threshold_timeout = 0;
5373 m->rssi.sampling_period = 0;
5374 }
5375}
5376
5377static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5378 struct mgmt_adv_pattern *patterns)
5379{
5380 u8 offset = 0, length = 0;
5381 struct adv_pattern *p = NULL;
5382 int i;
5383
5384 for (i = 0; i < pattern_count; i++) {
5385 offset = patterns[i].offset;
5386 length = patterns[i].length;
5387 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5388 length > HCI_MAX_EXT_AD_LENGTH ||
5389 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5390 return MGMT_STATUS_INVALID_PARAMS;
5391
5392 p = kmalloc(sizeof(*p), GFP_KERNEL);
5393 if (!p)
5394 return MGMT_STATUS_NO_RESOURCES;
5395
5396 p->ad_type = patterns[i].ad_type;
5397 p->offset = patterns[i].offset;
5398 p->length = patterns[i].length;
5399 memcpy(p->value, patterns[i].value, p->length);
5400
5401 INIT_LIST_HEAD(&p->list);
5402 list_add(&p->list, &m->patterns);
5403 }
5404
5405 return MGMT_STATUS_SUCCESS;
5406}
5407
5408static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5409 void *data, u16 len)
5410{
5411 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5412 struct adv_monitor *m = NULL;
5413 u8 status = MGMT_STATUS_SUCCESS;
5414 size_t expected_size = sizeof(*cp);
5415
5416 BT_DBG("request for %s", hdev->name);
5417
5418 if (len <= sizeof(*cp)) {
5419 status = MGMT_STATUS_INVALID_PARAMS;
5420 goto done;
5421 }
5422
5423 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5424 if (len != expected_size) {
5425 status = MGMT_STATUS_INVALID_PARAMS;
5426 goto done;
5427 }
5428
5429 m = kzalloc(sizeof(*m), GFP_KERNEL);
5430 if (!m) {
5431 status = MGMT_STATUS_NO_RESOURCES;
5432 goto done;
5433 }
5434
5435 INIT_LIST_HEAD(&m->patterns);
5436
5437 parse_adv_monitor_rssi(m, NULL);
5438 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5439
5440done:
5441 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5442 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5443}
5444
5445static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5446 void *data, u16 len)
5447{
5448 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5449 struct adv_monitor *m = NULL;
5450 u8 status = MGMT_STATUS_SUCCESS;
5451 size_t expected_size = sizeof(*cp);
5452
5453 BT_DBG("request for %s", hdev->name);
5454
5455 if (len <= sizeof(*cp)) {
5456 status = MGMT_STATUS_INVALID_PARAMS;
5457 goto done;
5458 }
5459
5460 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5461 if (len != expected_size) {
5462 status = MGMT_STATUS_INVALID_PARAMS;
5463 goto done;
5464 }
5465
5466 m = kzalloc(sizeof(*m), GFP_KERNEL);
5467 if (!m) {
5468 status = MGMT_STATUS_NO_RESOURCES;
5469 goto done;
5470 }
5471
5472 INIT_LIST_HEAD(&m->patterns);
5473
5474 parse_adv_monitor_rssi(m, &cp->rssi);
5475 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5476
5477done:
5478 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5479 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5480}
5481
5482static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5483 void *data, int status)
5484{
5485 struct mgmt_rp_remove_adv_monitor rp;
5486 struct mgmt_pending_cmd *cmd = data;
5487 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5488
5489 hci_dev_lock(hdev);
5490
5491 rp.monitor_handle = cp->monitor_handle;
5492
5493 if (!status)
5494 hci_update_passive_scan(hdev);
5495
5496 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5497 mgmt_status(status), &rp, sizeof(rp));
5498 mgmt_pending_remove(cmd);
5499
5500 hci_dev_unlock(hdev);
5501 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5502 rp.monitor_handle, status);
5503}
5504
5505static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5506{
5507 struct mgmt_pending_cmd *cmd = data;
5508 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5509 u16 handle = __le16_to_cpu(cp->monitor_handle);
5510
5511 if (!handle)
5512 return hci_remove_all_adv_monitor(hdev);
5513
5514 return hci_remove_single_adv_monitor(hdev, handle);
5515}
5516
5517static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5518 void *data, u16 len)
5519{
5520 struct mgmt_pending_cmd *cmd;
5521 int err, status;
5522
5523 hci_dev_lock(hdev);
5524
5525 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5526 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5527 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5528 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5529 status = MGMT_STATUS_BUSY;
5530 goto unlock;
5531 }
5532
5533 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5534 if (!cmd) {
5535 status = MGMT_STATUS_NO_RESOURCES;
5536 goto unlock;
5537 }
5538
5539 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5540 mgmt_remove_adv_monitor_complete);
5541
5542 if (err) {
5543 mgmt_pending_remove(cmd);
5544
5545 if (err == -ENOMEM)
5546 status = MGMT_STATUS_NO_RESOURCES;
5547 else
5548 status = MGMT_STATUS_FAILED;
5549
5550 goto unlock;
5551 }
5552
5553 hci_dev_unlock(hdev);
5554
5555 return 0;
5556
5557unlock:
5558 hci_dev_unlock(hdev);
5559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5560 status);
5561}
5562
5563static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5564{
5565 struct mgmt_rp_read_local_oob_data mgmt_rp;
5566 size_t rp_size = sizeof(mgmt_rp);
5567 struct mgmt_pending_cmd *cmd = data;
5568 struct sk_buff *skb = cmd->skb;
5569 u8 status = mgmt_status(err);
5570
5571 if (!status) {
5572 if (!skb)
5573 status = MGMT_STATUS_FAILED;
5574 else if (IS_ERR(skb))
5575 status = mgmt_status(PTR_ERR(skb));
5576 else
5577 status = mgmt_status(skb->data[0]);
5578 }
5579
5580 bt_dev_dbg(hdev, "status %d", status);
5581
5582 if (status) {
5583 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5584 goto remove;
5585 }
5586
5587 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5588
5589 if (!bredr_sc_enabled(hdev)) {
5590 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5591
5592 if (skb->len < sizeof(*rp)) {
5593 mgmt_cmd_status(cmd->sk, hdev->id,
5594 MGMT_OP_READ_LOCAL_OOB_DATA,
5595 MGMT_STATUS_FAILED);
5596 goto remove;
5597 }
5598
5599 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5600 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5601
5602 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5603 } else {
5604 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5605
5606 if (skb->len < sizeof(*rp)) {
5607 mgmt_cmd_status(cmd->sk, hdev->id,
5608 MGMT_OP_READ_LOCAL_OOB_DATA,
5609 MGMT_STATUS_FAILED);
5610 goto remove;
5611 }
5612
5613 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5614 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5615
5616 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5617 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5618 }
5619
5620 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5621 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5622
5623remove:
5624 if (skb && !IS_ERR(skb))
5625 kfree_skb(skb);
5626
5627 mgmt_pending_free(cmd);
5628}
5629
5630static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5631{
5632 struct mgmt_pending_cmd *cmd = data;
5633
5634 if (bredr_sc_enabled(hdev))
5635 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5636 else
5637 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5638
5639 if (IS_ERR(cmd->skb))
5640 return PTR_ERR(cmd->skb);
5641 else
5642 return 0;
5643}
5644
5645static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5646 void *data, u16 data_len)
5647{
5648 struct mgmt_pending_cmd *cmd;
5649 int err;
5650
5651 bt_dev_dbg(hdev, "sock %p", sk);
5652
5653 hci_dev_lock(hdev);
5654
5655 if (!hdev_is_powered(hdev)) {
5656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5657 MGMT_STATUS_NOT_POWERED);
5658 goto unlock;
5659 }
5660
5661 if (!lmp_ssp_capable(hdev)) {
5662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5663 MGMT_STATUS_NOT_SUPPORTED);
5664 goto unlock;
5665 }
5666
5667 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5668 if (!cmd)
5669 err = -ENOMEM;
5670 else
5671 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5672 read_local_oob_data_complete);
5673
5674 if (err < 0) {
5675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5676 MGMT_STATUS_FAILED);
5677
5678 if (cmd)
5679 mgmt_pending_free(cmd);
5680 }
5681
5682unlock:
5683 hci_dev_unlock(hdev);
5684 return err;
5685}
5686
5687static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5688 void *data, u16 len)
5689{
5690 struct mgmt_addr_info *addr = data;
5691 int err;
5692
5693 bt_dev_dbg(hdev, "sock %p", sk);
5694
5695 if (!bdaddr_type_is_valid(addr->type))
5696 return mgmt_cmd_complete(sk, hdev->id,
5697 MGMT_OP_ADD_REMOTE_OOB_DATA,
5698 MGMT_STATUS_INVALID_PARAMS,
5699 addr, sizeof(*addr));
5700
5701 hci_dev_lock(hdev);
5702
5703 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5704 struct mgmt_cp_add_remote_oob_data *cp = data;
5705 u8 status;
5706
5707 if (cp->addr.type != BDADDR_BREDR) {
5708 err = mgmt_cmd_complete(sk, hdev->id,
5709 MGMT_OP_ADD_REMOTE_OOB_DATA,
5710 MGMT_STATUS_INVALID_PARAMS,
5711 &cp->addr, sizeof(cp->addr));
5712 goto unlock;
5713 }
5714
5715 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5716 cp->addr.type, cp->hash,
5717 cp->rand, NULL, NULL);
5718 if (err < 0)
5719 status = MGMT_STATUS_FAILED;
5720 else
5721 status = MGMT_STATUS_SUCCESS;
5722
5723 err = mgmt_cmd_complete(sk, hdev->id,
5724 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5725 &cp->addr, sizeof(cp->addr));
5726 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5727 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5728 u8 *rand192, *hash192, *rand256, *hash256;
5729 u8 status;
5730
5731 if (bdaddr_type_is_le(cp->addr.type)) {
5732 /* Enforce zero-valued 192-bit parameters as
5733 * long as legacy SMP OOB isn't implemented.
5734 */
5735 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5736 memcmp(cp->hash192, ZERO_KEY, 16)) {
5737 err = mgmt_cmd_complete(sk, hdev->id,
5738 MGMT_OP_ADD_REMOTE_OOB_DATA,
5739 MGMT_STATUS_INVALID_PARAMS,
5740 addr, sizeof(*addr));
5741 goto unlock;
5742 }
5743
5744 rand192 = NULL;
5745 hash192 = NULL;
5746 } else {
5747 /* In case one of the P-192 values is set to zero,
5748 * then just disable OOB data for P-192.
5749 */
5750 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5751 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5752 rand192 = NULL;
5753 hash192 = NULL;
5754 } else {
5755 rand192 = cp->rand192;
5756 hash192 = cp->hash192;
5757 }
5758 }
5759
5760 /* In case one of the P-256 values is set to zero, then just
5761 * disable OOB data for P-256.
5762 */
5763 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5764 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5765 rand256 = NULL;
5766 hash256 = NULL;
5767 } else {
5768 rand256 = cp->rand256;
5769 hash256 = cp->hash256;
5770 }
5771
5772 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5773 cp->addr.type, hash192, rand192,
5774 hash256, rand256);
5775 if (err < 0)
5776 status = MGMT_STATUS_FAILED;
5777 else
5778 status = MGMT_STATUS_SUCCESS;
5779
5780 err = mgmt_cmd_complete(sk, hdev->id,
5781 MGMT_OP_ADD_REMOTE_OOB_DATA,
5782 status, &cp->addr, sizeof(cp->addr));
5783 } else {
5784 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5785 len);
5786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5787 MGMT_STATUS_INVALID_PARAMS);
5788 }
5789
5790unlock:
5791 hci_dev_unlock(hdev);
5792 return err;
5793}
5794
5795static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5796 void *data, u16 len)
5797{
5798 struct mgmt_cp_remove_remote_oob_data *cp = data;
5799 u8 status;
5800 int err;
5801
5802 bt_dev_dbg(hdev, "sock %p", sk);
5803
5804 if (cp->addr.type != BDADDR_BREDR)
5805 return mgmt_cmd_complete(sk, hdev->id,
5806 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5807 MGMT_STATUS_INVALID_PARAMS,
5808 &cp->addr, sizeof(cp->addr));
5809
5810 hci_dev_lock(hdev);
5811
5812 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5813 hci_remote_oob_data_clear(hdev);
5814 status = MGMT_STATUS_SUCCESS;
5815 goto done;
5816 }
5817
5818 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5819 if (err < 0)
5820 status = MGMT_STATUS_INVALID_PARAMS;
5821 else
5822 status = MGMT_STATUS_SUCCESS;
5823
5824done:
5825 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5826 status, &cp->addr, sizeof(cp->addr));
5827
5828 hci_dev_unlock(hdev);
5829 return err;
5830}
5831
5832void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5833{
5834 struct mgmt_pending_cmd *cmd;
5835
5836 bt_dev_dbg(hdev, "status %u", status);
5837
5838 hci_dev_lock(hdev);
5839
5840 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5841 if (!cmd)
5842 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5843
5844 if (!cmd)
5845 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5846
5847 if (cmd) {
5848 cmd->cmd_complete(cmd, mgmt_status(status));
5849 mgmt_pending_remove(cmd);
5850 }
5851
5852 hci_dev_unlock(hdev);
5853}
5854
5855static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5856 uint8_t *mgmt_status)
5857{
5858 switch (type) {
5859 case DISCOV_TYPE_LE:
5860 *mgmt_status = mgmt_le_support(hdev);
5861 if (*mgmt_status)
5862 return false;
5863 break;
5864 case DISCOV_TYPE_INTERLEAVED:
5865 *mgmt_status = mgmt_le_support(hdev);
5866 if (*mgmt_status)
5867 return false;
5868 fallthrough;
5869 case DISCOV_TYPE_BREDR:
5870 *mgmt_status = mgmt_bredr_support(hdev);
5871 if (*mgmt_status)
5872 return false;
5873 break;
5874 default:
5875 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5876 return false;
5877 }
5878
5879 return true;
5880}
5881
5882static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5883{
5884 struct mgmt_pending_cmd *cmd = data;
5885
5886 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5887 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5888 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5889 return;
5890
5891 bt_dev_dbg(hdev, "err %d", err);
5892
5893 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5894 cmd->param, 1);
5895 mgmt_pending_remove(cmd);
5896
5897 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5898 DISCOVERY_FINDING);
5899}
5900
5901static int start_discovery_sync(struct hci_dev *hdev, void *data)
5902{
5903 return hci_start_discovery_sync(hdev);
5904}
5905
5906static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5907 u16 op, void *data, u16 len)
5908{
5909 struct mgmt_cp_start_discovery *cp = data;
5910 struct mgmt_pending_cmd *cmd;
5911 u8 status;
5912 int err;
5913
5914 bt_dev_dbg(hdev, "sock %p", sk);
5915
5916 hci_dev_lock(hdev);
5917
5918 if (!hdev_is_powered(hdev)) {
5919 err = mgmt_cmd_complete(sk, hdev->id, op,
5920 MGMT_STATUS_NOT_POWERED,
5921 &cp->type, sizeof(cp->type));
5922 goto failed;
5923 }
5924
5925 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5926 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5927 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5928 &cp->type, sizeof(cp->type));
5929 goto failed;
5930 }
5931
5932 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5933 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5934 &cp->type, sizeof(cp->type));
5935 goto failed;
5936 }
5937
5938 /* Can't start discovery when it is paused */
5939 if (hdev->discovery_paused) {
5940 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5941 &cp->type, sizeof(cp->type));
5942 goto failed;
5943 }
5944
5945 /* Clear the discovery filter first to free any previously
5946 * allocated memory for the UUID list.
5947 */
5948 hci_discovery_filter_clear(hdev);
5949
5950 hdev->discovery.type = cp->type;
5951 hdev->discovery.report_invalid_rssi = false;
5952 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5953 hdev->discovery.limited = true;
5954 else
5955 hdev->discovery.limited = false;
5956
5957 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5958 if (!cmd) {
5959 err = -ENOMEM;
5960 goto failed;
5961 }
5962
5963 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5964 start_discovery_complete);
5965 if (err < 0) {
5966 mgmt_pending_remove(cmd);
5967 goto failed;
5968 }
5969
5970 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5971
5972failed:
5973 hci_dev_unlock(hdev);
5974 return err;
5975}
5976
5977static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5978 void *data, u16 len)
5979{
5980 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5981 data, len);
5982}
5983
5984static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5985 void *data, u16 len)
5986{
5987 return start_discovery_internal(sk, hdev,
5988 MGMT_OP_START_LIMITED_DISCOVERY,
5989 data, len);
5990}
5991
5992static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5993 void *data, u16 len)
5994{
5995 struct mgmt_cp_start_service_discovery *cp = data;
5996 struct mgmt_pending_cmd *cmd;
5997 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5998 u16 uuid_count, expected_len;
5999 u8 status;
6000 int err;
6001
6002 bt_dev_dbg(hdev, "sock %p", sk);
6003
6004 hci_dev_lock(hdev);
6005
6006 if (!hdev_is_powered(hdev)) {
6007 err = mgmt_cmd_complete(sk, hdev->id,
6008 MGMT_OP_START_SERVICE_DISCOVERY,
6009 MGMT_STATUS_NOT_POWERED,
6010 &cp->type, sizeof(cp->type));
6011 goto failed;
6012 }
6013
6014 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6015 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6016 err = mgmt_cmd_complete(sk, hdev->id,
6017 MGMT_OP_START_SERVICE_DISCOVERY,
6018 MGMT_STATUS_BUSY, &cp->type,
6019 sizeof(cp->type));
6020 goto failed;
6021 }
6022
6023 if (hdev->discovery_paused) {
6024 err = mgmt_cmd_complete(sk, hdev->id,
6025 MGMT_OP_START_SERVICE_DISCOVERY,
6026 MGMT_STATUS_BUSY, &cp->type,
6027 sizeof(cp->type));
6028 goto failed;
6029 }
6030
6031 uuid_count = __le16_to_cpu(cp->uuid_count);
6032 if (uuid_count > max_uuid_count) {
6033 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6034 uuid_count);
6035 err = mgmt_cmd_complete(sk, hdev->id,
6036 MGMT_OP_START_SERVICE_DISCOVERY,
6037 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6038 sizeof(cp->type));
6039 goto failed;
6040 }
6041
6042 expected_len = sizeof(*cp) + uuid_count * 16;
6043 if (expected_len != len) {
6044 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6045 expected_len, len);
6046 err = mgmt_cmd_complete(sk, hdev->id,
6047 MGMT_OP_START_SERVICE_DISCOVERY,
6048 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6049 sizeof(cp->type));
6050 goto failed;
6051 }
6052
6053 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6054 err = mgmt_cmd_complete(sk, hdev->id,
6055 MGMT_OP_START_SERVICE_DISCOVERY,
6056 status, &cp->type, sizeof(cp->type));
6057 goto failed;
6058 }
6059
6060 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6061 hdev, data, len);
6062 if (!cmd) {
6063 err = -ENOMEM;
6064 goto failed;
6065 }
6066
6067 /* Clear the discovery filter first to free any previously
6068 * allocated memory for the UUID list.
6069 */
6070 hci_discovery_filter_clear(hdev);
6071
6072 hdev->discovery.result_filtering = true;
6073 hdev->discovery.type = cp->type;
6074 hdev->discovery.rssi = cp->rssi;
6075 hdev->discovery.uuid_count = uuid_count;
6076
6077 if (uuid_count > 0) {
6078 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6079 GFP_KERNEL);
6080 if (!hdev->discovery.uuids) {
6081 err = mgmt_cmd_complete(sk, hdev->id,
6082 MGMT_OP_START_SERVICE_DISCOVERY,
6083 MGMT_STATUS_FAILED,
6084 &cp->type, sizeof(cp->type));
6085 mgmt_pending_remove(cmd);
6086 goto failed;
6087 }
6088 }
6089
6090 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6091 start_discovery_complete);
6092 if (err < 0) {
6093 mgmt_pending_remove(cmd);
6094 goto failed;
6095 }
6096
6097 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6098
6099failed:
6100 hci_dev_unlock(hdev);
6101 return err;
6102}
6103
6104void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6105{
6106 struct mgmt_pending_cmd *cmd;
6107
6108 bt_dev_dbg(hdev, "status %u", status);
6109
6110 hci_dev_lock(hdev);
6111
6112 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6113 if (cmd) {
6114 cmd->cmd_complete(cmd, mgmt_status(status));
6115 mgmt_pending_remove(cmd);
6116 }
6117
6118 hci_dev_unlock(hdev);
6119}
6120
6121static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6122{
6123 struct mgmt_pending_cmd *cmd = data;
6124
6125 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6126 return;
6127
6128 bt_dev_dbg(hdev, "err %d", err);
6129
6130 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6131 cmd->param, 1);
6132 mgmt_pending_remove(cmd);
6133
6134 if (!err)
6135 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6136}
6137
6138static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6139{
6140 return hci_stop_discovery_sync(hdev);
6141}
6142
6143static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6144 u16 len)
6145{
6146 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6147 struct mgmt_pending_cmd *cmd;
6148 int err;
6149
6150 bt_dev_dbg(hdev, "sock %p", sk);
6151
6152 hci_dev_lock(hdev);
6153
6154 if (!hci_discovery_active(hdev)) {
6155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6156 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6157 sizeof(mgmt_cp->type));
6158 goto unlock;
6159 }
6160
6161 if (hdev->discovery.type != mgmt_cp->type) {
6162 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6163 MGMT_STATUS_INVALID_PARAMS,
6164 &mgmt_cp->type, sizeof(mgmt_cp->type));
6165 goto unlock;
6166 }
6167
6168 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6169 if (!cmd) {
6170 err = -ENOMEM;
6171 goto unlock;
6172 }
6173
6174 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6175 stop_discovery_complete);
6176 if (err < 0) {
6177 mgmt_pending_remove(cmd);
6178 goto unlock;
6179 }
6180
6181 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6182
6183unlock:
6184 hci_dev_unlock(hdev);
6185 return err;
6186}
6187
6188static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6189 u16 len)
6190{
6191 struct mgmt_cp_confirm_name *cp = data;
6192 struct inquiry_entry *e;
6193 int err;
6194
6195 bt_dev_dbg(hdev, "sock %p", sk);
6196
6197 hci_dev_lock(hdev);
6198
6199 if (!hci_discovery_active(hdev)) {
6200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6201 MGMT_STATUS_FAILED, &cp->addr,
6202 sizeof(cp->addr));
6203 goto failed;
6204 }
6205
6206 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6207 if (!e) {
6208 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6209 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6210 sizeof(cp->addr));
6211 goto failed;
6212 }
6213
6214 if (cp->name_known) {
6215 e->name_state = NAME_KNOWN;
6216 list_del(&e->list);
6217 } else {
6218 e->name_state = NAME_NEEDED;
6219 hci_inquiry_cache_update_resolve(hdev, e);
6220 }
6221
6222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6223 &cp->addr, sizeof(cp->addr));
6224
6225failed:
6226 hci_dev_unlock(hdev);
6227 return err;
6228}
6229
6230static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6231 u16 len)
6232{
6233 struct mgmt_cp_block_device *cp = data;
6234 u8 status;
6235 int err;
6236
6237 bt_dev_dbg(hdev, "sock %p", sk);
6238
6239 if (!bdaddr_type_is_valid(cp->addr.type))
6240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6241 MGMT_STATUS_INVALID_PARAMS,
6242 &cp->addr, sizeof(cp->addr));
6243
6244 hci_dev_lock(hdev);
6245
6246 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6247 cp->addr.type);
6248 if (err < 0) {
6249 status = MGMT_STATUS_FAILED;
6250 goto done;
6251 }
6252
6253 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6254 sk);
6255 status = MGMT_STATUS_SUCCESS;
6256
6257done:
6258 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6259 &cp->addr, sizeof(cp->addr));
6260
6261 hci_dev_unlock(hdev);
6262
6263 return err;
6264}
6265
6266static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6267 u16 len)
6268{
6269 struct mgmt_cp_unblock_device *cp = data;
6270 u8 status;
6271 int err;
6272
6273 bt_dev_dbg(hdev, "sock %p", sk);
6274
6275 if (!bdaddr_type_is_valid(cp->addr.type))
6276 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6277 MGMT_STATUS_INVALID_PARAMS,
6278 &cp->addr, sizeof(cp->addr));
6279
6280 hci_dev_lock(hdev);
6281
6282 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6283 cp->addr.type);
6284 if (err < 0) {
6285 status = MGMT_STATUS_INVALID_PARAMS;
6286 goto done;
6287 }
6288
6289 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6290 sk);
6291 status = MGMT_STATUS_SUCCESS;
6292
6293done:
6294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6295 &cp->addr, sizeof(cp->addr));
6296
6297 hci_dev_unlock(hdev);
6298
6299 return err;
6300}
6301
6302static int set_device_id_sync(struct hci_dev *hdev, void *data)
6303{
6304 return hci_update_eir_sync(hdev);
6305}
6306
6307static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6308 u16 len)
6309{
6310 struct mgmt_cp_set_device_id *cp = data;
6311 int err;
6312 __u16 source;
6313
6314 bt_dev_dbg(hdev, "sock %p", sk);
6315
6316 source = __le16_to_cpu(cp->source);
6317
6318 if (source > 0x0002)
6319 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6320 MGMT_STATUS_INVALID_PARAMS);
6321
6322 hci_dev_lock(hdev);
6323
6324 hdev->devid_source = source;
6325 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6326 hdev->devid_product = __le16_to_cpu(cp->product);
6327 hdev->devid_version = __le16_to_cpu(cp->version);
6328
6329 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6330 NULL, 0);
6331
6332 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6333
6334 hci_dev_unlock(hdev);
6335
6336 return err;
6337}
6338
6339static void enable_advertising_instance(struct hci_dev *hdev, int err)
6340{
6341 if (err)
6342 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6343 else
6344 bt_dev_dbg(hdev, "status %d", err);
6345}
6346
6347static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6348{
6349 struct cmd_lookup match = { NULL, hdev };
6350 u8 instance;
6351 struct adv_info *adv_instance;
6352 u8 status = mgmt_status(err);
6353
6354 if (status) {
6355 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6356 cmd_status_rsp, &status);
6357 return;
6358 }
6359
6360 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6361 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6362 else
6363 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6364
6365 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6366 &match);
6367
6368 new_settings(hdev, match.sk);
6369
6370 if (match.sk)
6371 sock_put(match.sk);
6372
6373 /* If "Set Advertising" was just disabled and instance advertising was
6374 * set up earlier, then re-enable multi-instance advertising.
6375 */
6376 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6377 list_empty(&hdev->adv_instances))
6378 return;
6379
6380 instance = hdev->cur_adv_instance;
6381 if (!instance) {
6382 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6383 struct adv_info, list);
6384 if (!adv_instance)
6385 return;
6386
6387 instance = adv_instance->instance;
6388 }
6389
6390 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6391
6392 enable_advertising_instance(hdev, err);
6393}
6394
6395static int set_adv_sync(struct hci_dev *hdev, void *data)
6396{
6397 struct mgmt_pending_cmd *cmd = data;
6398 struct mgmt_mode *cp = cmd->param;
6399 u8 val = !!cp->val;
6400
6401 if (cp->val == 0x02)
6402 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6403 else
6404 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6405
6406 cancel_adv_timeout(hdev);
6407
6408 if (val) {
6409 /* Switch to instance "0" for the Set Advertising setting.
6410 * We cannot use update_[adv|scan_rsp]_data() here as the
6411 * HCI_ADVERTISING flag is not yet set.
6412 */
6413 hdev->cur_adv_instance = 0x00;
6414
6415 if (ext_adv_capable(hdev)) {
6416 hci_start_ext_adv_sync(hdev, 0x00);
6417 } else {
6418 hci_update_adv_data_sync(hdev, 0x00);
6419 hci_update_scan_rsp_data_sync(hdev, 0x00);
6420 hci_enable_advertising_sync(hdev);
6421 }
6422 } else {
6423 hci_disable_advertising_sync(hdev);
6424 }
6425
6426 return 0;
6427}
6428
6429static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6430 u16 len)
6431{
6432 struct mgmt_mode *cp = data;
6433 struct mgmt_pending_cmd *cmd;
6434 u8 val, status;
6435 int err;
6436
6437 bt_dev_dbg(hdev, "sock %p", sk);
6438
6439 status = mgmt_le_support(hdev);
6440 if (status)
6441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6442 status);
6443
6444 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 MGMT_STATUS_INVALID_PARAMS);
6447
6448 if (hdev->advertising_paused)
6449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6450 MGMT_STATUS_BUSY);
6451
6452 hci_dev_lock(hdev);
6453
6454 val = !!cp->val;
6455
6456 /* The following conditions are ones which mean that we should
6457 * not do any HCI communication but directly send a mgmt
6458 * response to user space (after toggling the flag if
6459 * necessary).
6460 */
6461 if (!hdev_is_powered(hdev) ||
6462 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6463 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6464 hci_dev_test_flag(hdev, HCI_MESH) ||
6465 hci_conn_num(hdev, LE_LINK) > 0 ||
6466 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6467 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6468 bool changed;
6469
6470 if (cp->val) {
6471 hdev->cur_adv_instance = 0x00;
6472 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6473 if (cp->val == 0x02)
6474 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6475 else
6476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6477 } else {
6478 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6479 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6480 }
6481
6482 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6483 if (err < 0)
6484 goto unlock;
6485
6486 if (changed)
6487 err = new_settings(hdev, sk);
6488
6489 goto unlock;
6490 }
6491
6492 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6493 pending_find(MGMT_OP_SET_LE, hdev)) {
6494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6495 MGMT_STATUS_BUSY);
6496 goto unlock;
6497 }
6498
6499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6500 if (!cmd)
6501 err = -ENOMEM;
6502 else
6503 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6504 set_advertising_complete);
6505
6506 if (err < 0 && cmd)
6507 mgmt_pending_remove(cmd);
6508
6509unlock:
6510 hci_dev_unlock(hdev);
6511 return err;
6512}
6513
6514static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6515 void *data, u16 len)
6516{
6517 struct mgmt_cp_set_static_address *cp = data;
6518 int err;
6519
6520 bt_dev_dbg(hdev, "sock %p", sk);
6521
6522 if (!lmp_le_capable(hdev))
6523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6524 MGMT_STATUS_NOT_SUPPORTED);
6525
6526 if (hdev_is_powered(hdev))
6527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6528 MGMT_STATUS_REJECTED);
6529
6530 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6531 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6532 return mgmt_cmd_status(sk, hdev->id,
6533 MGMT_OP_SET_STATIC_ADDRESS,
6534 MGMT_STATUS_INVALID_PARAMS);
6535
6536 /* Two most significant bits shall be set */
6537 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6538 return mgmt_cmd_status(sk, hdev->id,
6539 MGMT_OP_SET_STATIC_ADDRESS,
6540 MGMT_STATUS_INVALID_PARAMS);
6541 }
6542
6543 hci_dev_lock(hdev);
6544
6545 bacpy(&hdev->static_addr, &cp->bdaddr);
6546
6547 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6548 if (err < 0)
6549 goto unlock;
6550
6551 err = new_settings(hdev, sk);
6552
6553unlock:
6554 hci_dev_unlock(hdev);
6555 return err;
6556}
6557
6558static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6559 void *data, u16 len)
6560{
6561 struct mgmt_cp_set_scan_params *cp = data;
6562 __u16 interval, window;
6563 int err;
6564
6565 bt_dev_dbg(hdev, "sock %p", sk);
6566
6567 if (!lmp_le_capable(hdev))
6568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6569 MGMT_STATUS_NOT_SUPPORTED);
6570
6571 interval = __le16_to_cpu(cp->interval);
6572
6573 if (interval < 0x0004 || interval > 0x4000)
6574 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6575 MGMT_STATUS_INVALID_PARAMS);
6576
6577 window = __le16_to_cpu(cp->window);
6578
6579 if (window < 0x0004 || window > 0x4000)
6580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6581 MGMT_STATUS_INVALID_PARAMS);
6582
6583 if (window > interval)
6584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6585 MGMT_STATUS_INVALID_PARAMS);
6586
6587 hci_dev_lock(hdev);
6588
6589 hdev->le_scan_interval = interval;
6590 hdev->le_scan_window = window;
6591
6592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6593 NULL, 0);
6594
6595 /* If background scan is running, restart it so new parameters are
6596 * loaded.
6597 */
6598 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6599 hdev->discovery.state == DISCOVERY_STOPPED)
6600 hci_update_passive_scan(hdev);
6601
6602 hci_dev_unlock(hdev);
6603
6604 return err;
6605}
6606
6607static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6608{
6609 struct mgmt_pending_cmd *cmd = data;
6610
6611 bt_dev_dbg(hdev, "err %d", err);
6612
6613 if (err) {
6614 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6615 mgmt_status(err));
6616 } else {
6617 struct mgmt_mode *cp = cmd->param;
6618
6619 if (cp->val)
6620 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6621 else
6622 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6623
6624 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6625 new_settings(hdev, cmd->sk);
6626 }
6627
6628 mgmt_pending_free(cmd);
6629}
6630
6631static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6632{
6633 struct mgmt_pending_cmd *cmd = data;
6634 struct mgmt_mode *cp = cmd->param;
6635
6636 return hci_write_fast_connectable_sync(hdev, cp->val);
6637}
6638
6639static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6640 void *data, u16 len)
6641{
6642 struct mgmt_mode *cp = data;
6643 struct mgmt_pending_cmd *cmd;
6644 int err;
6645
6646 bt_dev_dbg(hdev, "sock %p", sk);
6647
6648 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6649 hdev->hci_ver < BLUETOOTH_VER_1_2)
6650 return mgmt_cmd_status(sk, hdev->id,
6651 MGMT_OP_SET_FAST_CONNECTABLE,
6652 MGMT_STATUS_NOT_SUPPORTED);
6653
6654 if (cp->val != 0x00 && cp->val != 0x01)
6655 return mgmt_cmd_status(sk, hdev->id,
6656 MGMT_OP_SET_FAST_CONNECTABLE,
6657 MGMT_STATUS_INVALID_PARAMS);
6658
6659 hci_dev_lock(hdev);
6660
6661 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6662 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6663 goto unlock;
6664 }
6665
6666 if (!hdev_is_powered(hdev)) {
6667 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6668 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6669 new_settings(hdev, sk);
6670 goto unlock;
6671 }
6672
6673 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6674 len);
6675 if (!cmd)
6676 err = -ENOMEM;
6677 else
6678 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6679 fast_connectable_complete);
6680
6681 if (err < 0) {
6682 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6683 MGMT_STATUS_FAILED);
6684
6685 if (cmd)
6686 mgmt_pending_free(cmd);
6687 }
6688
6689unlock:
6690 hci_dev_unlock(hdev);
6691
6692 return err;
6693}
6694
6695static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6696{
6697 struct mgmt_pending_cmd *cmd = data;
6698
6699 bt_dev_dbg(hdev, "err %d", err);
6700
6701 if (err) {
6702 u8 mgmt_err = mgmt_status(err);
6703
6704 /* We need to restore the flag if related HCI commands
6705 * failed.
6706 */
6707 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6708
6709 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6710 } else {
6711 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6712 new_settings(hdev, cmd->sk);
6713 }
6714
6715 mgmt_pending_free(cmd);
6716}
6717
6718static int set_bredr_sync(struct hci_dev *hdev, void *data)
6719{
6720 int status;
6721
6722 status = hci_write_fast_connectable_sync(hdev, false);
6723
6724 if (!status)
6725 status = hci_update_scan_sync(hdev);
6726
6727 /* Since only the advertising data flags will change, there
6728 * is no need to update the scan response data.
6729 */
6730 if (!status)
6731 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6732
6733 return status;
6734}
6735
6736static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6737{
6738 struct mgmt_mode *cp = data;
6739 struct mgmt_pending_cmd *cmd;
6740 int err;
6741
6742 bt_dev_dbg(hdev, "sock %p", sk);
6743
6744 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6746 MGMT_STATUS_NOT_SUPPORTED);
6747
6748 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 MGMT_STATUS_REJECTED);
6751
6752 if (cp->val != 0x00 && cp->val != 0x01)
6753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754 MGMT_STATUS_INVALID_PARAMS);
6755
6756 hci_dev_lock(hdev);
6757
6758 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6759 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6760 goto unlock;
6761 }
6762
6763 if (!hdev_is_powered(hdev)) {
6764 if (!cp->val) {
6765 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6766 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6767 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6768 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6769 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6770 }
6771
6772 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6773
6774 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6775 if (err < 0)
6776 goto unlock;
6777
6778 err = new_settings(hdev, sk);
6779 goto unlock;
6780 }
6781
6782 /* Reject disabling when powered on */
6783 if (!cp->val) {
6784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6785 MGMT_STATUS_REJECTED);
6786 goto unlock;
6787 } else {
6788 /* When configuring a dual-mode controller to operate
6789 * with LE only and using a static address, then switching
6790 * BR/EDR back on is not allowed.
6791 *
6792 * Dual-mode controllers shall operate with the public
6793 * address as its identity address for BR/EDR and LE. So
6794 * reject the attempt to create an invalid configuration.
6795 *
6796 * The same restrictions applies when secure connections
6797 * has been enabled. For BR/EDR this is a controller feature
6798 * while for LE it is a host stack feature. This means that
6799 * switching BR/EDR back on when secure connections has been
6800 * enabled is not a supported transaction.
6801 */
6802 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6803 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6804 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6806 MGMT_STATUS_REJECTED);
6807 goto unlock;
6808 }
6809 }
6810
6811 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6812 if (!cmd)
6813 err = -ENOMEM;
6814 else
6815 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6816 set_bredr_complete);
6817
6818 if (err < 0) {
6819 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6820 MGMT_STATUS_FAILED);
6821 if (cmd)
6822 mgmt_pending_free(cmd);
6823
6824 goto unlock;
6825 }
6826
6827 /* We need to flip the bit already here so that
6828 * hci_req_update_adv_data generates the correct flags.
6829 */
6830 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6831
6832unlock:
6833 hci_dev_unlock(hdev);
6834 return err;
6835}
6836
6837static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6838{
6839 struct mgmt_pending_cmd *cmd = data;
6840 struct mgmt_mode *cp;
6841
6842 bt_dev_dbg(hdev, "err %d", err);
6843
6844 if (err) {
6845 u8 mgmt_err = mgmt_status(err);
6846
6847 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6848 goto done;
6849 }
6850
6851 cp = cmd->param;
6852
6853 switch (cp->val) {
6854 case 0x00:
6855 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6856 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6857 break;
6858 case 0x01:
6859 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6860 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861 break;
6862 case 0x02:
6863 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6864 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6865 break;
6866 }
6867
6868 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6869 new_settings(hdev, cmd->sk);
6870
6871done:
6872 mgmt_pending_free(cmd);
6873}
6874
6875static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6876{
6877 struct mgmt_pending_cmd *cmd = data;
6878 struct mgmt_mode *cp = cmd->param;
6879 u8 val = !!cp->val;
6880
6881 /* Force write of val */
6882 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6883
6884 return hci_write_sc_support_sync(hdev, val);
6885}
6886
6887static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6888 void *data, u16 len)
6889{
6890 struct mgmt_mode *cp = data;
6891 struct mgmt_pending_cmd *cmd;
6892 u8 val;
6893 int err;
6894
6895 bt_dev_dbg(hdev, "sock %p", sk);
6896
6897 if (!lmp_sc_capable(hdev) &&
6898 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6900 MGMT_STATUS_NOT_SUPPORTED);
6901
6902 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6903 lmp_sc_capable(hdev) &&
6904 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6906 MGMT_STATUS_REJECTED);
6907
6908 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910 MGMT_STATUS_INVALID_PARAMS);
6911
6912 hci_dev_lock(hdev);
6913
6914 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6916 bool changed;
6917
6918 if (cp->val) {
6919 changed = !hci_dev_test_and_set_flag(hdev,
6920 HCI_SC_ENABLED);
6921 if (cp->val == 0x02)
6922 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6923 else
6924 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6925 } else {
6926 changed = hci_dev_test_and_clear_flag(hdev,
6927 HCI_SC_ENABLED);
6928 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6929 }
6930
6931 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6932 if (err < 0)
6933 goto failed;
6934
6935 if (changed)
6936 err = new_settings(hdev, sk);
6937
6938 goto failed;
6939 }
6940
6941 val = !!cp->val;
6942
6943 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6944 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6945 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6946 goto failed;
6947 }
6948
6949 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6950 if (!cmd)
6951 err = -ENOMEM;
6952 else
6953 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6954 set_secure_conn_complete);
6955
6956 if (err < 0) {
6957 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6958 MGMT_STATUS_FAILED);
6959 if (cmd)
6960 mgmt_pending_free(cmd);
6961 }
6962
6963failed:
6964 hci_dev_unlock(hdev);
6965 return err;
6966}
6967
6968static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6969 void *data, u16 len)
6970{
6971 struct mgmt_mode *cp = data;
6972 bool changed, use_changed;
6973 int err;
6974
6975 bt_dev_dbg(hdev, "sock %p", sk);
6976
6977 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6979 MGMT_STATUS_INVALID_PARAMS);
6980
6981 hci_dev_lock(hdev);
6982
6983 if (cp->val)
6984 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6985 else
6986 changed = hci_dev_test_and_clear_flag(hdev,
6987 HCI_KEEP_DEBUG_KEYS);
6988
6989 if (cp->val == 0x02)
6990 use_changed = !hci_dev_test_and_set_flag(hdev,
6991 HCI_USE_DEBUG_KEYS);
6992 else
6993 use_changed = hci_dev_test_and_clear_flag(hdev,
6994 HCI_USE_DEBUG_KEYS);
6995
6996 if (hdev_is_powered(hdev) && use_changed &&
6997 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6998 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6999 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7000 sizeof(mode), &mode);
7001 }
7002
7003 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7004 if (err < 0)
7005 goto unlock;
7006
7007 if (changed)
7008 err = new_settings(hdev, sk);
7009
7010unlock:
7011 hci_dev_unlock(hdev);
7012 return err;
7013}
7014
7015static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7016 u16 len)
7017{
7018 struct mgmt_cp_set_privacy *cp = cp_data;
7019 bool changed;
7020 int err;
7021
7022 bt_dev_dbg(hdev, "sock %p", sk);
7023
7024 if (!lmp_le_capable(hdev))
7025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7026 MGMT_STATUS_NOT_SUPPORTED);
7027
7028 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030 MGMT_STATUS_INVALID_PARAMS);
7031
7032 if (hdev_is_powered(hdev))
7033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7034 MGMT_STATUS_REJECTED);
7035
7036 hci_dev_lock(hdev);
7037
7038 /* If user space supports this command it is also expected to
7039 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7040 */
7041 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7042
7043 if (cp->privacy) {
7044 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7045 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7046 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7047 hci_adv_instances_set_rpa_expired(hdev, true);
7048 if (cp->privacy == 0x02)
7049 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7050 else
7051 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7052 } else {
7053 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7054 memset(hdev->irk, 0, sizeof(hdev->irk));
7055 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7056 hci_adv_instances_set_rpa_expired(hdev, false);
7057 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7058 }
7059
7060 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7061 if (err < 0)
7062 goto unlock;
7063
7064 if (changed)
7065 err = new_settings(hdev, sk);
7066
7067unlock:
7068 hci_dev_unlock(hdev);
7069 return err;
7070}
7071
7072static bool irk_is_valid(struct mgmt_irk_info *irk)
7073{
7074 switch (irk->addr.type) {
7075 case BDADDR_LE_PUBLIC:
7076 return true;
7077
7078 case BDADDR_LE_RANDOM:
7079 /* Two most significant bits shall be set */
7080 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7081 return false;
7082 return true;
7083 }
7084
7085 return false;
7086}
7087
7088static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7089 u16 len)
7090{
7091 struct mgmt_cp_load_irks *cp = cp_data;
7092 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7093 sizeof(struct mgmt_irk_info));
7094 u16 irk_count, expected_len;
7095 int i, err;
7096
7097 bt_dev_dbg(hdev, "sock %p", sk);
7098
7099 if (!lmp_le_capable(hdev))
7100 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7101 MGMT_STATUS_NOT_SUPPORTED);
7102
7103 irk_count = __le16_to_cpu(cp->irk_count);
7104 if (irk_count > max_irk_count) {
7105 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7106 irk_count);
7107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7108 MGMT_STATUS_INVALID_PARAMS);
7109 }
7110
7111 expected_len = struct_size(cp, irks, irk_count);
7112 if (expected_len != len) {
7113 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7114 expected_len, len);
7115 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7116 MGMT_STATUS_INVALID_PARAMS);
7117 }
7118
7119 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7120
7121 for (i = 0; i < irk_count; i++) {
7122 struct mgmt_irk_info *key = &cp->irks[i];
7123
7124 if (!irk_is_valid(key))
7125 return mgmt_cmd_status(sk, hdev->id,
7126 MGMT_OP_LOAD_IRKS,
7127 MGMT_STATUS_INVALID_PARAMS);
7128 }
7129
7130 hci_dev_lock(hdev);
7131
7132 hci_smp_irks_clear(hdev);
7133
7134 for (i = 0; i < irk_count; i++) {
7135 struct mgmt_irk_info *irk = &cp->irks[i];
7136 u8 addr_type = le_addr_type(irk->addr.type);
7137
7138 if (hci_is_blocked_key(hdev,
7139 HCI_BLOCKED_KEY_TYPE_IRK,
7140 irk->val)) {
7141 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7142 &irk->addr.bdaddr);
7143 continue;
7144 }
7145
7146 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7147 if (irk->addr.type == BDADDR_BREDR)
7148 addr_type = BDADDR_BREDR;
7149
7150 hci_add_irk(hdev, &irk->addr.bdaddr,
7151 addr_type, irk->val,
7152 BDADDR_ANY);
7153 }
7154
7155 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7156
7157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7158
7159 hci_dev_unlock(hdev);
7160
7161 return err;
7162}
7163
7164static bool ltk_is_valid(struct mgmt_ltk_info *key)
7165{
7166 if (key->initiator != 0x00 && key->initiator != 0x01)
7167 return false;
7168
7169 switch (key->addr.type) {
7170 case BDADDR_LE_PUBLIC:
7171 return true;
7172
7173 case BDADDR_LE_RANDOM:
7174 /* Two most significant bits shall be set */
7175 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7176 return false;
7177 return true;
7178 }
7179
7180 return false;
7181}
7182
7183static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7184 void *cp_data, u16 len)
7185{
7186 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7187 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7188 sizeof(struct mgmt_ltk_info));
7189 u16 key_count, expected_len;
7190 int i, err;
7191
7192 bt_dev_dbg(hdev, "sock %p", sk);
7193
7194 if (!lmp_le_capable(hdev))
7195 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7196 MGMT_STATUS_NOT_SUPPORTED);
7197
7198 key_count = __le16_to_cpu(cp->key_count);
7199 if (key_count > max_key_count) {
7200 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7201 key_count);
7202 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7203 MGMT_STATUS_INVALID_PARAMS);
7204 }
7205
7206 expected_len = struct_size(cp, keys, key_count);
7207 if (expected_len != len) {
7208 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7209 expected_len, len);
7210 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7211 MGMT_STATUS_INVALID_PARAMS);
7212 }
7213
7214 bt_dev_dbg(hdev, "key_count %u", key_count);
7215
7216 for (i = 0; i < key_count; i++) {
7217 struct mgmt_ltk_info *key = &cp->keys[i];
7218
7219 if (!ltk_is_valid(key))
7220 return mgmt_cmd_status(sk, hdev->id,
7221 MGMT_OP_LOAD_LONG_TERM_KEYS,
7222 MGMT_STATUS_INVALID_PARAMS);
7223 }
7224
7225 hci_dev_lock(hdev);
7226
7227 hci_smp_ltks_clear(hdev);
7228
7229 for (i = 0; i < key_count; i++) {
7230 struct mgmt_ltk_info *key = &cp->keys[i];
7231 u8 type, authenticated;
7232 u8 addr_type = le_addr_type(key->addr.type);
7233
7234 if (hci_is_blocked_key(hdev,
7235 HCI_BLOCKED_KEY_TYPE_LTK,
7236 key->val)) {
7237 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7238 &key->addr.bdaddr);
7239 continue;
7240 }
7241
7242 switch (key->type) {
7243 case MGMT_LTK_UNAUTHENTICATED:
7244 authenticated = 0x00;
7245 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7246 break;
7247 case MGMT_LTK_AUTHENTICATED:
7248 authenticated = 0x01;
7249 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7250 break;
7251 case MGMT_LTK_P256_UNAUTH:
7252 authenticated = 0x00;
7253 type = SMP_LTK_P256;
7254 break;
7255 case MGMT_LTK_P256_AUTH:
7256 authenticated = 0x01;
7257 type = SMP_LTK_P256;
7258 break;
7259 case MGMT_LTK_P256_DEBUG:
7260 authenticated = 0x00;
7261 type = SMP_LTK_P256_DEBUG;
7262 fallthrough;
7263 default:
7264 continue;
7265 }
7266
7267 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7268 if (key->addr.type == BDADDR_BREDR)
7269 addr_type = BDADDR_BREDR;
7270
7271 hci_add_ltk(hdev, &key->addr.bdaddr,
7272 addr_type, type, authenticated,
7273 key->val, key->enc_size, key->ediv, key->rand);
7274 }
7275
7276 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7277 NULL, 0);
7278
7279 hci_dev_unlock(hdev);
7280
7281 return err;
7282}
7283
7284static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7285{
7286 struct mgmt_pending_cmd *cmd = data;
7287 struct hci_conn *conn = cmd->user_data;
7288 struct mgmt_cp_get_conn_info *cp = cmd->param;
7289 struct mgmt_rp_get_conn_info rp;
7290 u8 status;
7291
7292 bt_dev_dbg(hdev, "err %d", err);
7293
7294 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7295
7296 status = mgmt_status(err);
7297 if (status == MGMT_STATUS_SUCCESS) {
7298 rp.rssi = conn->rssi;
7299 rp.tx_power = conn->tx_power;
7300 rp.max_tx_power = conn->max_tx_power;
7301 } else {
7302 rp.rssi = HCI_RSSI_INVALID;
7303 rp.tx_power = HCI_TX_POWER_INVALID;
7304 rp.max_tx_power = HCI_TX_POWER_INVALID;
7305 }
7306
7307 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7308 &rp, sizeof(rp));
7309
7310 mgmt_pending_free(cmd);
7311}
7312
7313static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7314{
7315 struct mgmt_pending_cmd *cmd = data;
7316 struct mgmt_cp_get_conn_info *cp = cmd->param;
7317 struct hci_conn *conn;
7318 int err;
7319 __le16 handle;
7320
7321 /* Make sure we are still connected */
7322 if (cp->addr.type == BDADDR_BREDR)
7323 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7324 &cp->addr.bdaddr);
7325 else
7326 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7327
7328 if (!conn || conn->state != BT_CONNECTED)
7329 return MGMT_STATUS_NOT_CONNECTED;
7330
7331 cmd->user_data = conn;
7332 handle = cpu_to_le16(conn->handle);
7333
7334 /* Refresh RSSI each time */
7335 err = hci_read_rssi_sync(hdev, handle);
7336
7337 /* For LE links TX power does not change thus we don't need to
7338 * query for it once value is known.
7339 */
7340 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7341 conn->tx_power == HCI_TX_POWER_INVALID))
7342 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7343
7344 /* Max TX power needs to be read only once per connection */
7345 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7346 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7347
7348 return err;
7349}
7350
7351static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7352 u16 len)
7353{
7354 struct mgmt_cp_get_conn_info *cp = data;
7355 struct mgmt_rp_get_conn_info rp;
7356 struct hci_conn *conn;
7357 unsigned long conn_info_age;
7358 int err = 0;
7359
7360 bt_dev_dbg(hdev, "sock %p", sk);
7361
7362 memset(&rp, 0, sizeof(rp));
7363 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7364 rp.addr.type = cp->addr.type;
7365
7366 if (!bdaddr_type_is_valid(cp->addr.type))
7367 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7368 MGMT_STATUS_INVALID_PARAMS,
7369 &rp, sizeof(rp));
7370
7371 hci_dev_lock(hdev);
7372
7373 if (!hdev_is_powered(hdev)) {
7374 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7375 MGMT_STATUS_NOT_POWERED, &rp,
7376 sizeof(rp));
7377 goto unlock;
7378 }
7379
7380 if (cp->addr.type == BDADDR_BREDR)
7381 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7382 &cp->addr.bdaddr);
7383 else
7384 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7385
7386 if (!conn || conn->state != BT_CONNECTED) {
7387 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7388 MGMT_STATUS_NOT_CONNECTED, &rp,
7389 sizeof(rp));
7390 goto unlock;
7391 }
7392
7393 /* To avoid client trying to guess when to poll again for information we
7394 * calculate conn info age as random value between min/max set in hdev.
7395 */
7396 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7397 hdev->conn_info_max_age - 1);
7398
7399 /* Query controller to refresh cached values if they are too old or were
7400 * never read.
7401 */
7402 if (time_after(jiffies, conn->conn_info_timestamp +
7403 msecs_to_jiffies(conn_info_age)) ||
7404 !conn->conn_info_timestamp) {
7405 struct mgmt_pending_cmd *cmd;
7406
7407 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7408 len);
7409 if (!cmd) {
7410 err = -ENOMEM;
7411 } else {
7412 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7413 cmd, get_conn_info_complete);
7414 }
7415
7416 if (err < 0) {
7417 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7418 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7419
7420 if (cmd)
7421 mgmt_pending_free(cmd);
7422
7423 goto unlock;
7424 }
7425
7426 conn->conn_info_timestamp = jiffies;
7427 } else {
7428 /* Cache is valid, just reply with values cached in hci_conn */
7429 rp.rssi = conn->rssi;
7430 rp.tx_power = conn->tx_power;
7431 rp.max_tx_power = conn->max_tx_power;
7432
7433 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7434 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7435 }
7436
7437unlock:
7438 hci_dev_unlock(hdev);
7439 return err;
7440}
7441
7442static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7443{
7444 struct mgmt_pending_cmd *cmd = data;
7445 struct mgmt_cp_get_clock_info *cp = cmd->param;
7446 struct mgmt_rp_get_clock_info rp;
7447 struct hci_conn *conn = cmd->user_data;
7448 u8 status = mgmt_status(err);
7449
7450 bt_dev_dbg(hdev, "err %d", err);
7451
7452 memset(&rp, 0, sizeof(rp));
7453 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7454 rp.addr.type = cp->addr.type;
7455
7456 if (err)
7457 goto complete;
7458
7459 rp.local_clock = cpu_to_le32(hdev->clock);
7460
7461 if (conn) {
7462 rp.piconet_clock = cpu_to_le32(conn->clock);
7463 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7464 }
7465
7466complete:
7467 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7468 sizeof(rp));
7469
7470 mgmt_pending_free(cmd);
7471}
7472
7473static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7474{
7475 struct mgmt_pending_cmd *cmd = data;
7476 struct mgmt_cp_get_clock_info *cp = cmd->param;
7477 struct hci_cp_read_clock hci_cp;
7478 struct hci_conn *conn;
7479
7480 memset(&hci_cp, 0, sizeof(hci_cp));
7481 hci_read_clock_sync(hdev, &hci_cp);
7482
7483 /* Make sure connection still exists */
7484 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7485 if (!conn || conn->state != BT_CONNECTED)
7486 return MGMT_STATUS_NOT_CONNECTED;
7487
7488 cmd->user_data = conn;
7489 hci_cp.handle = cpu_to_le16(conn->handle);
7490 hci_cp.which = 0x01; /* Piconet clock */
7491
7492 return hci_read_clock_sync(hdev, &hci_cp);
7493}
7494
7495static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7496 u16 len)
7497{
7498 struct mgmt_cp_get_clock_info *cp = data;
7499 struct mgmt_rp_get_clock_info rp;
7500 struct mgmt_pending_cmd *cmd;
7501 struct hci_conn *conn;
7502 int err;
7503
7504 bt_dev_dbg(hdev, "sock %p", sk);
7505
7506 memset(&rp, 0, sizeof(rp));
7507 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7508 rp.addr.type = cp->addr.type;
7509
7510 if (cp->addr.type != BDADDR_BREDR)
7511 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7512 MGMT_STATUS_INVALID_PARAMS,
7513 &rp, sizeof(rp));
7514
7515 hci_dev_lock(hdev);
7516
7517 if (!hdev_is_powered(hdev)) {
7518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7519 MGMT_STATUS_NOT_POWERED, &rp,
7520 sizeof(rp));
7521 goto unlock;
7522 }
7523
7524 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7525 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7526 &cp->addr.bdaddr);
7527 if (!conn || conn->state != BT_CONNECTED) {
7528 err = mgmt_cmd_complete(sk, hdev->id,
7529 MGMT_OP_GET_CLOCK_INFO,
7530 MGMT_STATUS_NOT_CONNECTED,
7531 &rp, sizeof(rp));
7532 goto unlock;
7533 }
7534 } else {
7535 conn = NULL;
7536 }
7537
7538 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7539 if (!cmd)
7540 err = -ENOMEM;
7541 else
7542 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7543 get_clock_info_complete);
7544
7545 if (err < 0) {
7546 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7547 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7548
7549 if (cmd)
7550 mgmt_pending_free(cmd);
7551 }
7552
7553
7554unlock:
7555 hci_dev_unlock(hdev);
7556 return err;
7557}
7558
7559static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7560{
7561 struct hci_conn *conn;
7562
7563 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7564 if (!conn)
7565 return false;
7566
7567 if (conn->dst_type != type)
7568 return false;
7569
7570 if (conn->state != BT_CONNECTED)
7571 return false;
7572
7573 return true;
7574}
7575
7576/* This function requires the caller holds hdev->lock */
7577static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7578 u8 addr_type, u8 auto_connect)
7579{
7580 struct hci_conn_params *params;
7581
7582 params = hci_conn_params_add(hdev, addr, addr_type);
7583 if (!params)
7584 return -EIO;
7585
7586 if (params->auto_connect == auto_connect)
7587 return 0;
7588
7589 hci_pend_le_list_del_init(params);
7590
7591 switch (auto_connect) {
7592 case HCI_AUTO_CONN_DISABLED:
7593 case HCI_AUTO_CONN_LINK_LOSS:
7594 /* If auto connect is being disabled when we're trying to
7595 * connect to device, keep connecting.
7596 */
7597 if (params->explicit_connect)
7598 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7599 break;
7600 case HCI_AUTO_CONN_REPORT:
7601 if (params->explicit_connect)
7602 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7603 else
7604 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7605 break;
7606 case HCI_AUTO_CONN_DIRECT:
7607 case HCI_AUTO_CONN_ALWAYS:
7608 if (!is_connected(hdev, addr, addr_type))
7609 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7610 break;
7611 }
7612
7613 params->auto_connect = auto_connect;
7614
7615 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7616 addr, addr_type, auto_connect);
7617
7618 return 0;
7619}
7620
7621static void device_added(struct sock *sk, struct hci_dev *hdev,
7622 bdaddr_t *bdaddr, u8 type, u8 action)
7623{
7624 struct mgmt_ev_device_added ev;
7625
7626 bacpy(&ev.addr.bdaddr, bdaddr);
7627 ev.addr.type = type;
7628 ev.action = action;
7629
7630 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7631}
7632
7633static int add_device_sync(struct hci_dev *hdev, void *data)
7634{
7635 return hci_update_passive_scan_sync(hdev);
7636}
7637
7638static int add_device(struct sock *sk, struct hci_dev *hdev,
7639 void *data, u16 len)
7640{
7641 struct mgmt_cp_add_device *cp = data;
7642 u8 auto_conn, addr_type;
7643 struct hci_conn_params *params;
7644 int err;
7645 u32 current_flags = 0;
7646 u32 supported_flags;
7647
7648 bt_dev_dbg(hdev, "sock %p", sk);
7649
7650 if (!bdaddr_type_is_valid(cp->addr.type) ||
7651 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7652 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7653 MGMT_STATUS_INVALID_PARAMS,
7654 &cp->addr, sizeof(cp->addr));
7655
7656 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7657 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7658 MGMT_STATUS_INVALID_PARAMS,
7659 &cp->addr, sizeof(cp->addr));
7660
7661 hci_dev_lock(hdev);
7662
7663 if (cp->addr.type == BDADDR_BREDR) {
7664 /* Only incoming connections action is supported for now */
7665 if (cp->action != 0x01) {
7666 err = mgmt_cmd_complete(sk, hdev->id,
7667 MGMT_OP_ADD_DEVICE,
7668 MGMT_STATUS_INVALID_PARAMS,
7669 &cp->addr, sizeof(cp->addr));
7670 goto unlock;
7671 }
7672
7673 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7674 &cp->addr.bdaddr,
7675 cp->addr.type, 0);
7676 if (err)
7677 goto unlock;
7678
7679 hci_update_scan(hdev);
7680
7681 goto added;
7682 }
7683
7684 addr_type = le_addr_type(cp->addr.type);
7685
7686 if (cp->action == 0x02)
7687 auto_conn = HCI_AUTO_CONN_ALWAYS;
7688 else if (cp->action == 0x01)
7689 auto_conn = HCI_AUTO_CONN_DIRECT;
7690 else
7691 auto_conn = HCI_AUTO_CONN_REPORT;
7692
7693 /* Kernel internally uses conn_params with resolvable private
7694 * address, but Add Device allows only identity addresses.
7695 * Make sure it is enforced before calling
7696 * hci_conn_params_lookup.
7697 */
7698 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7699 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7700 MGMT_STATUS_INVALID_PARAMS,
7701 &cp->addr, sizeof(cp->addr));
7702 goto unlock;
7703 }
7704
7705 /* If the connection parameters don't exist for this device,
7706 * they will be created and configured with defaults.
7707 */
7708 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7709 auto_conn) < 0) {
7710 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7711 MGMT_STATUS_FAILED, &cp->addr,
7712 sizeof(cp->addr));
7713 goto unlock;
7714 } else {
7715 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7716 addr_type);
7717 if (params)
7718 current_flags = params->flags;
7719 }
7720
7721 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7722 if (err < 0)
7723 goto unlock;
7724
7725added:
7726 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7727 supported_flags = hdev->conn_flags;
7728 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7729 supported_flags, current_flags);
7730
7731 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7732 MGMT_STATUS_SUCCESS, &cp->addr,
7733 sizeof(cp->addr));
7734
7735unlock:
7736 hci_dev_unlock(hdev);
7737 return err;
7738}
7739
7740static void device_removed(struct sock *sk, struct hci_dev *hdev,
7741 bdaddr_t *bdaddr, u8 type)
7742{
7743 struct mgmt_ev_device_removed ev;
7744
7745 bacpy(&ev.addr.bdaddr, bdaddr);
7746 ev.addr.type = type;
7747
7748 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7749}
7750
7751static int remove_device_sync(struct hci_dev *hdev, void *data)
7752{
7753 return hci_update_passive_scan_sync(hdev);
7754}
7755
7756static int remove_device(struct sock *sk, struct hci_dev *hdev,
7757 void *data, u16 len)
7758{
7759 struct mgmt_cp_remove_device *cp = data;
7760 int err;
7761
7762 bt_dev_dbg(hdev, "sock %p", sk);
7763
7764 hci_dev_lock(hdev);
7765
7766 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7767 struct hci_conn_params *params;
7768 u8 addr_type;
7769
7770 if (!bdaddr_type_is_valid(cp->addr.type)) {
7771 err = mgmt_cmd_complete(sk, hdev->id,
7772 MGMT_OP_REMOVE_DEVICE,
7773 MGMT_STATUS_INVALID_PARAMS,
7774 &cp->addr, sizeof(cp->addr));
7775 goto unlock;
7776 }
7777
7778 if (cp->addr.type == BDADDR_BREDR) {
7779 err = hci_bdaddr_list_del(&hdev->accept_list,
7780 &cp->addr.bdaddr,
7781 cp->addr.type);
7782 if (err) {
7783 err = mgmt_cmd_complete(sk, hdev->id,
7784 MGMT_OP_REMOVE_DEVICE,
7785 MGMT_STATUS_INVALID_PARAMS,
7786 &cp->addr,
7787 sizeof(cp->addr));
7788 goto unlock;
7789 }
7790
7791 hci_update_scan(hdev);
7792
7793 device_removed(sk, hdev, &cp->addr.bdaddr,
7794 cp->addr.type);
7795 goto complete;
7796 }
7797
7798 addr_type = le_addr_type(cp->addr.type);
7799
7800 /* Kernel internally uses conn_params with resolvable private
7801 * address, but Remove Device allows only identity addresses.
7802 * Make sure it is enforced before calling
7803 * hci_conn_params_lookup.
7804 */
7805 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7806 err = mgmt_cmd_complete(sk, hdev->id,
7807 MGMT_OP_REMOVE_DEVICE,
7808 MGMT_STATUS_INVALID_PARAMS,
7809 &cp->addr, sizeof(cp->addr));
7810 goto unlock;
7811 }
7812
7813 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7814 addr_type);
7815 if (!params) {
7816 err = mgmt_cmd_complete(sk, hdev->id,
7817 MGMT_OP_REMOVE_DEVICE,
7818 MGMT_STATUS_INVALID_PARAMS,
7819 &cp->addr, sizeof(cp->addr));
7820 goto unlock;
7821 }
7822
7823 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7824 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7825 err = mgmt_cmd_complete(sk, hdev->id,
7826 MGMT_OP_REMOVE_DEVICE,
7827 MGMT_STATUS_INVALID_PARAMS,
7828 &cp->addr, sizeof(cp->addr));
7829 goto unlock;
7830 }
7831
7832 hci_conn_params_free(params);
7833
7834 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7835 } else {
7836 struct hci_conn_params *p, *tmp;
7837 struct bdaddr_list *b, *btmp;
7838
7839 if (cp->addr.type) {
7840 err = mgmt_cmd_complete(sk, hdev->id,
7841 MGMT_OP_REMOVE_DEVICE,
7842 MGMT_STATUS_INVALID_PARAMS,
7843 &cp->addr, sizeof(cp->addr));
7844 goto unlock;
7845 }
7846
7847 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7848 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7849 list_del(&b->list);
7850 kfree(b);
7851 }
7852
7853 hci_update_scan(hdev);
7854
7855 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7856 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7857 continue;
7858 device_removed(sk, hdev, &p->addr, p->addr_type);
7859 if (p->explicit_connect) {
7860 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7861 continue;
7862 }
7863 hci_conn_params_free(p);
7864 }
7865
7866 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7867 }
7868
7869 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7870
7871complete:
7872 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7873 MGMT_STATUS_SUCCESS, &cp->addr,
7874 sizeof(cp->addr));
7875unlock:
7876 hci_dev_unlock(hdev);
7877 return err;
7878}
7879
7880static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7881 u16 len)
7882{
7883 struct mgmt_cp_load_conn_param *cp = data;
7884 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7885 sizeof(struct mgmt_conn_param));
7886 u16 param_count, expected_len;
7887 int i;
7888
7889 if (!lmp_le_capable(hdev))
7890 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7891 MGMT_STATUS_NOT_SUPPORTED);
7892
7893 param_count = __le16_to_cpu(cp->param_count);
7894 if (param_count > max_param_count) {
7895 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7896 param_count);
7897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7898 MGMT_STATUS_INVALID_PARAMS);
7899 }
7900
7901 expected_len = struct_size(cp, params, param_count);
7902 if (expected_len != len) {
7903 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7904 expected_len, len);
7905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7906 MGMT_STATUS_INVALID_PARAMS);
7907 }
7908
7909 bt_dev_dbg(hdev, "param_count %u", param_count);
7910
7911 hci_dev_lock(hdev);
7912
7913 hci_conn_params_clear_disabled(hdev);
7914
7915 for (i = 0; i < param_count; i++) {
7916 struct mgmt_conn_param *param = &cp->params[i];
7917 struct hci_conn_params *hci_param;
7918 u16 min, max, latency, timeout;
7919 u8 addr_type;
7920
7921 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7922 param->addr.type);
7923
7924 if (param->addr.type == BDADDR_LE_PUBLIC) {
7925 addr_type = ADDR_LE_DEV_PUBLIC;
7926 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7927 addr_type = ADDR_LE_DEV_RANDOM;
7928 } else {
7929 bt_dev_err(hdev, "ignoring invalid connection parameters");
7930 continue;
7931 }
7932
7933 min = le16_to_cpu(param->min_interval);
7934 max = le16_to_cpu(param->max_interval);
7935 latency = le16_to_cpu(param->latency);
7936 timeout = le16_to_cpu(param->timeout);
7937
7938 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7939 min, max, latency, timeout);
7940
7941 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7942 bt_dev_err(hdev, "ignoring invalid connection parameters");
7943 continue;
7944 }
7945
7946 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7947 addr_type);
7948 if (!hci_param) {
7949 bt_dev_err(hdev, "failed to add connection parameters");
7950 continue;
7951 }
7952
7953 hci_param->conn_min_interval = min;
7954 hci_param->conn_max_interval = max;
7955 hci_param->conn_latency = latency;
7956 hci_param->supervision_timeout = timeout;
7957 }
7958
7959 hci_dev_unlock(hdev);
7960
7961 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7962 NULL, 0);
7963}
7964
7965static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7966 void *data, u16 len)
7967{
7968 struct mgmt_cp_set_external_config *cp = data;
7969 bool changed;
7970 int err;
7971
7972 bt_dev_dbg(hdev, "sock %p", sk);
7973
7974 if (hdev_is_powered(hdev))
7975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7976 MGMT_STATUS_REJECTED);
7977
7978 if (cp->config != 0x00 && cp->config != 0x01)
7979 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7980 MGMT_STATUS_INVALID_PARAMS);
7981
7982 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7984 MGMT_STATUS_NOT_SUPPORTED);
7985
7986 hci_dev_lock(hdev);
7987
7988 if (cp->config)
7989 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7990 else
7991 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7992
7993 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7994 if (err < 0)
7995 goto unlock;
7996
7997 if (!changed)
7998 goto unlock;
7999
8000 err = new_options(hdev, sk);
8001
8002 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8003 mgmt_index_removed(hdev);
8004
8005 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8006 hci_dev_set_flag(hdev, HCI_CONFIG);
8007 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8008
8009 queue_work(hdev->req_workqueue, &hdev->power_on);
8010 } else {
8011 set_bit(HCI_RAW, &hdev->flags);
8012 mgmt_index_added(hdev);
8013 }
8014 }
8015
8016unlock:
8017 hci_dev_unlock(hdev);
8018 return err;
8019}
8020
8021static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8022 void *data, u16 len)
8023{
8024 struct mgmt_cp_set_public_address *cp = data;
8025 bool changed;
8026 int err;
8027
8028 bt_dev_dbg(hdev, "sock %p", sk);
8029
8030 if (hdev_is_powered(hdev))
8031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8032 MGMT_STATUS_REJECTED);
8033
8034 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8036 MGMT_STATUS_INVALID_PARAMS);
8037
8038 if (!hdev->set_bdaddr)
8039 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8040 MGMT_STATUS_NOT_SUPPORTED);
8041
8042 hci_dev_lock(hdev);
8043
8044 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8045 bacpy(&hdev->public_addr, &cp->bdaddr);
8046
8047 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8048 if (err < 0)
8049 goto unlock;
8050
8051 if (!changed)
8052 goto unlock;
8053
8054 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8055 err = new_options(hdev, sk);
8056
8057 if (is_configured(hdev)) {
8058 mgmt_index_removed(hdev);
8059
8060 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8061
8062 hci_dev_set_flag(hdev, HCI_CONFIG);
8063 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8064
8065 queue_work(hdev->req_workqueue, &hdev->power_on);
8066 }
8067
8068unlock:
8069 hci_dev_unlock(hdev);
8070 return err;
8071}
8072
8073static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8074 int err)
8075{
8076 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8077 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8078 u8 *h192, *r192, *h256, *r256;
8079 struct mgmt_pending_cmd *cmd = data;
8080 struct sk_buff *skb = cmd->skb;
8081 u8 status = mgmt_status(err);
8082 u16 eir_len;
8083
8084 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8085 return;
8086
8087 if (!status) {
8088 if (!skb)
8089 status = MGMT_STATUS_FAILED;
8090 else if (IS_ERR(skb))
8091 status = mgmt_status(PTR_ERR(skb));
8092 else
8093 status = mgmt_status(skb->data[0]);
8094 }
8095
8096 bt_dev_dbg(hdev, "status %u", status);
8097
8098 mgmt_cp = cmd->param;
8099
8100 if (status) {
8101 status = mgmt_status(status);
8102 eir_len = 0;
8103
8104 h192 = NULL;
8105 r192 = NULL;
8106 h256 = NULL;
8107 r256 = NULL;
8108 } else if (!bredr_sc_enabled(hdev)) {
8109 struct hci_rp_read_local_oob_data *rp;
8110
8111 if (skb->len != sizeof(*rp)) {
8112 status = MGMT_STATUS_FAILED;
8113 eir_len = 0;
8114 } else {
8115 status = MGMT_STATUS_SUCCESS;
8116 rp = (void *)skb->data;
8117
8118 eir_len = 5 + 18 + 18;
8119 h192 = rp->hash;
8120 r192 = rp->rand;
8121 h256 = NULL;
8122 r256 = NULL;
8123 }
8124 } else {
8125 struct hci_rp_read_local_oob_ext_data *rp;
8126
8127 if (skb->len != sizeof(*rp)) {
8128 status = MGMT_STATUS_FAILED;
8129 eir_len = 0;
8130 } else {
8131 status = MGMT_STATUS_SUCCESS;
8132 rp = (void *)skb->data;
8133
8134 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8135 eir_len = 5 + 18 + 18;
8136 h192 = NULL;
8137 r192 = NULL;
8138 } else {
8139 eir_len = 5 + 18 + 18 + 18 + 18;
8140 h192 = rp->hash192;
8141 r192 = rp->rand192;
8142 }
8143
8144 h256 = rp->hash256;
8145 r256 = rp->rand256;
8146 }
8147 }
8148
8149 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8150 if (!mgmt_rp)
8151 goto done;
8152
8153 if (eir_len == 0)
8154 goto send_rsp;
8155
8156 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8157 hdev->dev_class, 3);
8158
8159 if (h192 && r192) {
8160 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8161 EIR_SSP_HASH_C192, h192, 16);
8162 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8163 EIR_SSP_RAND_R192, r192, 16);
8164 }
8165
8166 if (h256 && r256) {
8167 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8168 EIR_SSP_HASH_C256, h256, 16);
8169 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8170 EIR_SSP_RAND_R256, r256, 16);
8171 }
8172
8173send_rsp:
8174 mgmt_rp->type = mgmt_cp->type;
8175 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8176
8177 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8178 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8179 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8180 if (err < 0 || status)
8181 goto done;
8182
8183 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8184
8185 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8186 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8187 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8188done:
8189 if (skb && !IS_ERR(skb))
8190 kfree_skb(skb);
8191
8192 kfree(mgmt_rp);
8193 mgmt_pending_remove(cmd);
8194}
8195
8196static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8197 struct mgmt_cp_read_local_oob_ext_data *cp)
8198{
8199 struct mgmt_pending_cmd *cmd;
8200 int err;
8201
8202 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8203 cp, sizeof(*cp));
8204 if (!cmd)
8205 return -ENOMEM;
8206
8207 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8208 read_local_oob_ext_data_complete);
8209
8210 if (err < 0) {
8211 mgmt_pending_remove(cmd);
8212 return err;
8213 }
8214
8215 return 0;
8216}
8217
8218static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8219 void *data, u16 data_len)
8220{
8221 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8222 struct mgmt_rp_read_local_oob_ext_data *rp;
8223 size_t rp_len;
8224 u16 eir_len;
8225 u8 status, flags, role, addr[7], hash[16], rand[16];
8226 int err;
8227
8228 bt_dev_dbg(hdev, "sock %p", sk);
8229
8230 if (hdev_is_powered(hdev)) {
8231 switch (cp->type) {
8232 case BIT(BDADDR_BREDR):
8233 status = mgmt_bredr_support(hdev);
8234 if (status)
8235 eir_len = 0;
8236 else
8237 eir_len = 5;
8238 break;
8239 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8240 status = mgmt_le_support(hdev);
8241 if (status)
8242 eir_len = 0;
8243 else
8244 eir_len = 9 + 3 + 18 + 18 + 3;
8245 break;
8246 default:
8247 status = MGMT_STATUS_INVALID_PARAMS;
8248 eir_len = 0;
8249 break;
8250 }
8251 } else {
8252 status = MGMT_STATUS_NOT_POWERED;
8253 eir_len = 0;
8254 }
8255
8256 rp_len = sizeof(*rp) + eir_len;
8257 rp = kmalloc(rp_len, GFP_ATOMIC);
8258 if (!rp)
8259 return -ENOMEM;
8260
8261 if (!status && !lmp_ssp_capable(hdev)) {
8262 status = MGMT_STATUS_NOT_SUPPORTED;
8263 eir_len = 0;
8264 }
8265
8266 if (status)
8267 goto complete;
8268
8269 hci_dev_lock(hdev);
8270
8271 eir_len = 0;
8272 switch (cp->type) {
8273 case BIT(BDADDR_BREDR):
8274 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8275 err = read_local_ssp_oob_req(hdev, sk, cp);
8276 hci_dev_unlock(hdev);
8277 if (!err)
8278 goto done;
8279
8280 status = MGMT_STATUS_FAILED;
8281 goto complete;
8282 } else {
8283 eir_len = eir_append_data(rp->eir, eir_len,
8284 EIR_CLASS_OF_DEV,
8285 hdev->dev_class, 3);
8286 }
8287 break;
8288 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8289 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8290 smp_generate_oob(hdev, hash, rand) < 0) {
8291 hci_dev_unlock(hdev);
8292 status = MGMT_STATUS_FAILED;
8293 goto complete;
8294 }
8295
8296 /* This should return the active RPA, but since the RPA
8297 * is only programmed on demand, it is really hard to fill
8298 * this in at the moment. For now disallow retrieving
8299 * local out-of-band data when privacy is in use.
8300 *
8301 * Returning the identity address will not help here since
8302 * pairing happens before the identity resolving key is
8303 * known and thus the connection establishment happens
8304 * based on the RPA and not the identity address.
8305 */
8306 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8307 hci_dev_unlock(hdev);
8308 status = MGMT_STATUS_REJECTED;
8309 goto complete;
8310 }
8311
8312 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8313 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8314 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8315 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8316 memcpy(addr, &hdev->static_addr, 6);
8317 addr[6] = 0x01;
8318 } else {
8319 memcpy(addr, &hdev->bdaddr, 6);
8320 addr[6] = 0x00;
8321 }
8322
8323 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8324 addr, sizeof(addr));
8325
8326 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8327 role = 0x02;
8328 else
8329 role = 0x01;
8330
8331 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8332 &role, sizeof(role));
8333
8334 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8335 eir_len = eir_append_data(rp->eir, eir_len,
8336 EIR_LE_SC_CONFIRM,
8337 hash, sizeof(hash));
8338
8339 eir_len = eir_append_data(rp->eir, eir_len,
8340 EIR_LE_SC_RANDOM,
8341 rand, sizeof(rand));
8342 }
8343
8344 flags = mgmt_get_adv_discov_flags(hdev);
8345
8346 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8347 flags |= LE_AD_NO_BREDR;
8348
8349 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8350 &flags, sizeof(flags));
8351 break;
8352 }
8353
8354 hci_dev_unlock(hdev);
8355
8356 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8357
8358 status = MGMT_STATUS_SUCCESS;
8359
8360complete:
8361 rp->type = cp->type;
8362 rp->eir_len = cpu_to_le16(eir_len);
8363
8364 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8365 status, rp, sizeof(*rp) + eir_len);
8366 if (err < 0 || status)
8367 goto done;
8368
8369 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8370 rp, sizeof(*rp) + eir_len,
8371 HCI_MGMT_OOB_DATA_EVENTS, sk);
8372
8373done:
8374 kfree(rp);
8375
8376 return err;
8377}
8378
8379static u32 get_supported_adv_flags(struct hci_dev *hdev)
8380{
8381 u32 flags = 0;
8382
8383 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8384 flags |= MGMT_ADV_FLAG_DISCOV;
8385 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8386 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8387 flags |= MGMT_ADV_FLAG_APPEARANCE;
8388 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8389 flags |= MGMT_ADV_PARAM_DURATION;
8390 flags |= MGMT_ADV_PARAM_TIMEOUT;
8391 flags |= MGMT_ADV_PARAM_INTERVALS;
8392 flags |= MGMT_ADV_PARAM_TX_POWER;
8393 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8394
8395 /* In extended adv TX_POWER returned from Set Adv Param
8396 * will be always valid.
8397 */
8398 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8399 flags |= MGMT_ADV_FLAG_TX_POWER;
8400
8401 if (ext_adv_capable(hdev)) {
8402 flags |= MGMT_ADV_FLAG_SEC_1M;
8403 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8404 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8405
8406 if (le_2m_capable(hdev))
8407 flags |= MGMT_ADV_FLAG_SEC_2M;
8408
8409 if (le_coded_capable(hdev))
8410 flags |= MGMT_ADV_FLAG_SEC_CODED;
8411 }
8412
8413 return flags;
8414}
8415
8416static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8417 void *data, u16 data_len)
8418{
8419 struct mgmt_rp_read_adv_features *rp;
8420 size_t rp_len;
8421 int err;
8422 struct adv_info *adv_instance;
8423 u32 supported_flags;
8424 u8 *instance;
8425
8426 bt_dev_dbg(hdev, "sock %p", sk);
8427
8428 if (!lmp_le_capable(hdev))
8429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8430 MGMT_STATUS_REJECTED);
8431
8432 hci_dev_lock(hdev);
8433
8434 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8435 rp = kmalloc(rp_len, GFP_ATOMIC);
8436 if (!rp) {
8437 hci_dev_unlock(hdev);
8438 return -ENOMEM;
8439 }
8440
8441 supported_flags = get_supported_adv_flags(hdev);
8442
8443 rp->supported_flags = cpu_to_le32(supported_flags);
8444 rp->max_adv_data_len = max_adv_len(hdev);
8445 rp->max_scan_rsp_len = max_adv_len(hdev);
8446 rp->max_instances = hdev->le_num_of_adv_sets;
8447 rp->num_instances = hdev->adv_instance_cnt;
8448
8449 instance = rp->instance;
8450 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8451 /* Only instances 1-le_num_of_adv_sets are externally visible */
8452 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8453 *instance = adv_instance->instance;
8454 instance++;
8455 } else {
8456 rp->num_instances--;
8457 rp_len--;
8458 }
8459 }
8460
8461 hci_dev_unlock(hdev);
8462
8463 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8464 MGMT_STATUS_SUCCESS, rp, rp_len);
8465
8466 kfree(rp);
8467
8468 return err;
8469}
8470
8471static u8 calculate_name_len(struct hci_dev *hdev)
8472{
8473 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8474
8475 return eir_append_local_name(hdev, buf, 0);
8476}
8477
8478static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8479 bool is_adv_data)
8480{
8481 u8 max_len = max_adv_len(hdev);
8482
8483 if (is_adv_data) {
8484 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8485 MGMT_ADV_FLAG_LIMITED_DISCOV |
8486 MGMT_ADV_FLAG_MANAGED_FLAGS))
8487 max_len -= 3;
8488
8489 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8490 max_len -= 3;
8491 } else {
8492 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8493 max_len -= calculate_name_len(hdev);
8494
8495 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8496 max_len -= 4;
8497 }
8498
8499 return max_len;
8500}
8501
8502static bool flags_managed(u32 adv_flags)
8503{
8504 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8505 MGMT_ADV_FLAG_LIMITED_DISCOV |
8506 MGMT_ADV_FLAG_MANAGED_FLAGS);
8507}
8508
8509static bool tx_power_managed(u32 adv_flags)
8510{
8511 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8512}
8513
8514static bool name_managed(u32 adv_flags)
8515{
8516 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8517}
8518
8519static bool appearance_managed(u32 adv_flags)
8520{
8521 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8522}
8523
8524static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8525 u8 len, bool is_adv_data)
8526{
8527 int i, cur_len;
8528 u8 max_len;
8529
8530 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8531
8532 if (len > max_len)
8533 return false;
8534
8535 /* Make sure that the data is correctly formatted. */
8536 for (i = 0; i < len; i += (cur_len + 1)) {
8537 cur_len = data[i];
8538
8539 if (!cur_len)
8540 continue;
8541
8542 if (data[i + 1] == EIR_FLAGS &&
8543 (!is_adv_data || flags_managed(adv_flags)))
8544 return false;
8545
8546 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8547 return false;
8548
8549 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8550 return false;
8551
8552 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8553 return false;
8554
8555 if (data[i + 1] == EIR_APPEARANCE &&
8556 appearance_managed(adv_flags))
8557 return false;
8558
8559 /* If the current field length would exceed the total data
8560 * length, then it's invalid.
8561 */
8562 if (i + cur_len >= len)
8563 return false;
8564 }
8565
8566 return true;
8567}
8568
8569static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8570{
8571 u32 supported_flags, phy_flags;
8572
8573 /* The current implementation only supports a subset of the specified
8574 * flags. Also need to check mutual exclusiveness of sec flags.
8575 */
8576 supported_flags = get_supported_adv_flags(hdev);
8577 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8578 if (adv_flags & ~supported_flags ||
8579 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8580 return false;
8581
8582 return true;
8583}
8584
8585static bool adv_busy(struct hci_dev *hdev)
8586{
8587 return pending_find(MGMT_OP_SET_LE, hdev);
8588}
8589
8590static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8591 int err)
8592{
8593 struct adv_info *adv, *n;
8594
8595 bt_dev_dbg(hdev, "err %d", err);
8596
8597 hci_dev_lock(hdev);
8598
8599 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8600 u8 instance;
8601
8602 if (!adv->pending)
8603 continue;
8604
8605 if (!err) {
8606 adv->pending = false;
8607 continue;
8608 }
8609
8610 instance = adv->instance;
8611
8612 if (hdev->cur_adv_instance == instance)
8613 cancel_adv_timeout(hdev);
8614
8615 hci_remove_adv_instance(hdev, instance);
8616 mgmt_advertising_removed(sk, hdev, instance);
8617 }
8618
8619 hci_dev_unlock(hdev);
8620}
8621
8622static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8623{
8624 struct mgmt_pending_cmd *cmd = data;
8625 struct mgmt_cp_add_advertising *cp = cmd->param;
8626 struct mgmt_rp_add_advertising rp;
8627
8628 memset(&rp, 0, sizeof(rp));
8629
8630 rp.instance = cp->instance;
8631
8632 if (err)
8633 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8634 mgmt_status(err));
8635 else
8636 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8637 mgmt_status(err), &rp, sizeof(rp));
8638
8639 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8640
8641 mgmt_pending_free(cmd);
8642}
8643
8644static int add_advertising_sync(struct hci_dev *hdev, void *data)
8645{
8646 struct mgmt_pending_cmd *cmd = data;
8647 struct mgmt_cp_add_advertising *cp = cmd->param;
8648
8649 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8650}
8651
8652static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8653 void *data, u16 data_len)
8654{
8655 struct mgmt_cp_add_advertising *cp = data;
8656 struct mgmt_rp_add_advertising rp;
8657 u32 flags;
8658 u8 status;
8659 u16 timeout, duration;
8660 unsigned int prev_instance_cnt;
8661 u8 schedule_instance = 0;
8662 struct adv_info *adv, *next_instance;
8663 int err;
8664 struct mgmt_pending_cmd *cmd;
8665
8666 bt_dev_dbg(hdev, "sock %p", sk);
8667
8668 status = mgmt_le_support(hdev);
8669 if (status)
8670 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8671 status);
8672
8673 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8675 MGMT_STATUS_INVALID_PARAMS);
8676
8677 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8679 MGMT_STATUS_INVALID_PARAMS);
8680
8681 flags = __le32_to_cpu(cp->flags);
8682 timeout = __le16_to_cpu(cp->timeout);
8683 duration = __le16_to_cpu(cp->duration);
8684
8685 if (!requested_adv_flags_are_valid(hdev, flags))
8686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8687 MGMT_STATUS_INVALID_PARAMS);
8688
8689 hci_dev_lock(hdev);
8690
8691 if (timeout && !hdev_is_powered(hdev)) {
8692 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8693 MGMT_STATUS_REJECTED);
8694 goto unlock;
8695 }
8696
8697 if (adv_busy(hdev)) {
8698 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8699 MGMT_STATUS_BUSY);
8700 goto unlock;
8701 }
8702
8703 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8704 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8705 cp->scan_rsp_len, false)) {
8706 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8707 MGMT_STATUS_INVALID_PARAMS);
8708 goto unlock;
8709 }
8710
8711 prev_instance_cnt = hdev->adv_instance_cnt;
8712
8713 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8714 cp->adv_data_len, cp->data,
8715 cp->scan_rsp_len,
8716 cp->data + cp->adv_data_len,
8717 timeout, duration,
8718 HCI_ADV_TX_POWER_NO_PREFERENCE,
8719 hdev->le_adv_min_interval,
8720 hdev->le_adv_max_interval, 0);
8721 if (IS_ERR(adv)) {
8722 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8723 MGMT_STATUS_FAILED);
8724 goto unlock;
8725 }
8726
8727 /* Only trigger an advertising added event if a new instance was
8728 * actually added.
8729 */
8730 if (hdev->adv_instance_cnt > prev_instance_cnt)
8731 mgmt_advertising_added(sk, hdev, cp->instance);
8732
8733 if (hdev->cur_adv_instance == cp->instance) {
8734 /* If the currently advertised instance is being changed then
8735 * cancel the current advertising and schedule the next
8736 * instance. If there is only one instance then the overridden
8737 * advertising data will be visible right away.
8738 */
8739 cancel_adv_timeout(hdev);
8740
8741 next_instance = hci_get_next_instance(hdev, cp->instance);
8742 if (next_instance)
8743 schedule_instance = next_instance->instance;
8744 } else if (!hdev->adv_instance_timeout) {
8745 /* Immediately advertise the new instance if no other
8746 * instance is currently being advertised.
8747 */
8748 schedule_instance = cp->instance;
8749 }
8750
8751 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8752 * there is no instance to be advertised then we have no HCI
8753 * communication to make. Simply return.
8754 */
8755 if (!hdev_is_powered(hdev) ||
8756 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8757 !schedule_instance) {
8758 rp.instance = cp->instance;
8759 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8760 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8761 goto unlock;
8762 }
8763
8764 /* We're good to go, update advertising data, parameters, and start
8765 * advertising.
8766 */
8767 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8768 data_len);
8769 if (!cmd) {
8770 err = -ENOMEM;
8771 goto unlock;
8772 }
8773
8774 cp->instance = schedule_instance;
8775
8776 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8777 add_advertising_complete);
8778 if (err < 0)
8779 mgmt_pending_free(cmd);
8780
8781unlock:
8782 hci_dev_unlock(hdev);
8783
8784 return err;
8785}
8786
8787static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8788 int err)
8789{
8790 struct mgmt_pending_cmd *cmd = data;
8791 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8792 struct mgmt_rp_add_ext_adv_params rp;
8793 struct adv_info *adv;
8794 u32 flags;
8795
8796 BT_DBG("%s", hdev->name);
8797
8798 hci_dev_lock(hdev);
8799
8800 adv = hci_find_adv_instance(hdev, cp->instance);
8801 if (!adv)
8802 goto unlock;
8803
8804 rp.instance = cp->instance;
8805 rp.tx_power = adv->tx_power;
8806
8807 /* While we're at it, inform userspace of the available space for this
8808 * advertisement, given the flags that will be used.
8809 */
8810 flags = __le32_to_cpu(cp->flags);
8811 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8812 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8813
8814 if (err) {
8815 /* If this advertisement was previously advertising and we
8816 * failed to update it, we signal that it has been removed and
8817 * delete its structure
8818 */
8819 if (!adv->pending)
8820 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8821
8822 hci_remove_adv_instance(hdev, cp->instance);
8823
8824 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8825 mgmt_status(err));
8826 } else {
8827 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8828 mgmt_status(err), &rp, sizeof(rp));
8829 }
8830
8831unlock:
8832 if (cmd)
8833 mgmt_pending_free(cmd);
8834
8835 hci_dev_unlock(hdev);
8836}
8837
8838static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8839{
8840 struct mgmt_pending_cmd *cmd = data;
8841 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8842
8843 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8844}
8845
8846static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8847 void *data, u16 data_len)
8848{
8849 struct mgmt_cp_add_ext_adv_params *cp = data;
8850 struct mgmt_rp_add_ext_adv_params rp;
8851 struct mgmt_pending_cmd *cmd = NULL;
8852 struct adv_info *adv;
8853 u32 flags, min_interval, max_interval;
8854 u16 timeout, duration;
8855 u8 status;
8856 s8 tx_power;
8857 int err;
8858
8859 BT_DBG("%s", hdev->name);
8860
8861 status = mgmt_le_support(hdev);
8862 if (status)
8863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8864 status);
8865
8866 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8868 MGMT_STATUS_INVALID_PARAMS);
8869
8870 /* The purpose of breaking add_advertising into two separate MGMT calls
8871 * for params and data is to allow more parameters to be added to this
8872 * structure in the future. For this reason, we verify that we have the
8873 * bare minimum structure we know of when the interface was defined. Any
8874 * extra parameters we don't know about will be ignored in this request.
8875 */
8876 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8877 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8878 MGMT_STATUS_INVALID_PARAMS);
8879
8880 flags = __le32_to_cpu(cp->flags);
8881
8882 if (!requested_adv_flags_are_valid(hdev, flags))
8883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8884 MGMT_STATUS_INVALID_PARAMS);
8885
8886 hci_dev_lock(hdev);
8887
8888 /* In new interface, we require that we are powered to register */
8889 if (!hdev_is_powered(hdev)) {
8890 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8891 MGMT_STATUS_REJECTED);
8892 goto unlock;
8893 }
8894
8895 if (adv_busy(hdev)) {
8896 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8897 MGMT_STATUS_BUSY);
8898 goto unlock;
8899 }
8900
8901 /* Parse defined parameters from request, use defaults otherwise */
8902 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8903 __le16_to_cpu(cp->timeout) : 0;
8904
8905 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8906 __le16_to_cpu(cp->duration) :
8907 hdev->def_multi_adv_rotation_duration;
8908
8909 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8910 __le32_to_cpu(cp->min_interval) :
8911 hdev->le_adv_min_interval;
8912
8913 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8914 __le32_to_cpu(cp->max_interval) :
8915 hdev->le_adv_max_interval;
8916
8917 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8918 cp->tx_power :
8919 HCI_ADV_TX_POWER_NO_PREFERENCE;
8920
8921 /* Create advertising instance with no advertising or response data */
8922 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8923 timeout, duration, tx_power, min_interval,
8924 max_interval, 0);
8925
8926 if (IS_ERR(adv)) {
8927 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8928 MGMT_STATUS_FAILED);
8929 goto unlock;
8930 }
8931
8932 /* Submit request for advertising params if ext adv available */
8933 if (ext_adv_capable(hdev)) {
8934 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8935 data, data_len);
8936 if (!cmd) {
8937 err = -ENOMEM;
8938 hci_remove_adv_instance(hdev, cp->instance);
8939 goto unlock;
8940 }
8941
8942 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8943 add_ext_adv_params_complete);
8944 if (err < 0)
8945 mgmt_pending_free(cmd);
8946 } else {
8947 rp.instance = cp->instance;
8948 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8949 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8950 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8951 err = mgmt_cmd_complete(sk, hdev->id,
8952 MGMT_OP_ADD_EXT_ADV_PARAMS,
8953 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8954 }
8955
8956unlock:
8957 hci_dev_unlock(hdev);
8958
8959 return err;
8960}
8961
8962static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8963{
8964 struct mgmt_pending_cmd *cmd = data;
8965 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8966 struct mgmt_rp_add_advertising rp;
8967
8968 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8969
8970 memset(&rp, 0, sizeof(rp));
8971
8972 rp.instance = cp->instance;
8973
8974 if (err)
8975 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8976 mgmt_status(err));
8977 else
8978 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8979 mgmt_status(err), &rp, sizeof(rp));
8980
8981 mgmt_pending_free(cmd);
8982}
8983
8984static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8985{
8986 struct mgmt_pending_cmd *cmd = data;
8987 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8988 int err;
8989
8990 if (ext_adv_capable(hdev)) {
8991 err = hci_update_adv_data_sync(hdev, cp->instance);
8992 if (err)
8993 return err;
8994
8995 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8996 if (err)
8997 return err;
8998
8999 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9000 }
9001
9002 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9003}
9004
9005static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9006 u16 data_len)
9007{
9008 struct mgmt_cp_add_ext_adv_data *cp = data;
9009 struct mgmt_rp_add_ext_adv_data rp;
9010 u8 schedule_instance = 0;
9011 struct adv_info *next_instance;
9012 struct adv_info *adv_instance;
9013 int err = 0;
9014 struct mgmt_pending_cmd *cmd;
9015
9016 BT_DBG("%s", hdev->name);
9017
9018 hci_dev_lock(hdev);
9019
9020 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9021
9022 if (!adv_instance) {
9023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9024 MGMT_STATUS_INVALID_PARAMS);
9025 goto unlock;
9026 }
9027
9028 /* In new interface, we require that we are powered to register */
9029 if (!hdev_is_powered(hdev)) {
9030 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9031 MGMT_STATUS_REJECTED);
9032 goto clear_new_instance;
9033 }
9034
9035 if (adv_busy(hdev)) {
9036 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9037 MGMT_STATUS_BUSY);
9038 goto clear_new_instance;
9039 }
9040
9041 /* Validate new data */
9042 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9043 cp->adv_data_len, true) ||
9044 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9045 cp->adv_data_len, cp->scan_rsp_len, false)) {
9046 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9047 MGMT_STATUS_INVALID_PARAMS);
9048 goto clear_new_instance;
9049 }
9050
9051 /* Set the data in the advertising instance */
9052 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9053 cp->data, cp->scan_rsp_len,
9054 cp->data + cp->adv_data_len);
9055
9056 /* If using software rotation, determine next instance to use */
9057 if (hdev->cur_adv_instance == cp->instance) {
9058 /* If the currently advertised instance is being changed
9059 * then cancel the current advertising and schedule the
9060 * next instance. If there is only one instance then the
9061 * overridden advertising data will be visible right
9062 * away
9063 */
9064 cancel_adv_timeout(hdev);
9065
9066 next_instance = hci_get_next_instance(hdev, cp->instance);
9067 if (next_instance)
9068 schedule_instance = next_instance->instance;
9069 } else if (!hdev->adv_instance_timeout) {
9070 /* Immediately advertise the new instance if no other
9071 * instance is currently being advertised.
9072 */
9073 schedule_instance = cp->instance;
9074 }
9075
9076 /* If the HCI_ADVERTISING flag is set or there is no instance to
9077 * be advertised then we have no HCI communication to make.
9078 * Simply return.
9079 */
9080 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9081 if (adv_instance->pending) {
9082 mgmt_advertising_added(sk, hdev, cp->instance);
9083 adv_instance->pending = false;
9084 }
9085 rp.instance = cp->instance;
9086 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9087 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9088 goto unlock;
9089 }
9090
9091 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9092 data_len);
9093 if (!cmd) {
9094 err = -ENOMEM;
9095 goto clear_new_instance;
9096 }
9097
9098 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9099 add_ext_adv_data_complete);
9100 if (err < 0) {
9101 mgmt_pending_free(cmd);
9102 goto clear_new_instance;
9103 }
9104
9105 /* We were successful in updating data, so trigger advertising_added
9106 * event if this is an instance that wasn't previously advertising. If
9107 * a failure occurs in the requests we initiated, we will remove the
9108 * instance again in add_advertising_complete
9109 */
9110 if (adv_instance->pending)
9111 mgmt_advertising_added(sk, hdev, cp->instance);
9112
9113 goto unlock;
9114
9115clear_new_instance:
9116 hci_remove_adv_instance(hdev, cp->instance);
9117
9118unlock:
9119 hci_dev_unlock(hdev);
9120
9121 return err;
9122}
9123
9124static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9125 int err)
9126{
9127 struct mgmt_pending_cmd *cmd = data;
9128 struct mgmt_cp_remove_advertising *cp = cmd->param;
9129 struct mgmt_rp_remove_advertising rp;
9130
9131 bt_dev_dbg(hdev, "err %d", err);
9132
9133 memset(&rp, 0, sizeof(rp));
9134 rp.instance = cp->instance;
9135
9136 if (err)
9137 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9138 mgmt_status(err));
9139 else
9140 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9141 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9142
9143 mgmt_pending_free(cmd);
9144}
9145
9146static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9147{
9148 struct mgmt_pending_cmd *cmd = data;
9149 struct mgmt_cp_remove_advertising *cp = cmd->param;
9150 int err;
9151
9152 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9153 if (err)
9154 return err;
9155
9156 if (list_empty(&hdev->adv_instances))
9157 err = hci_disable_advertising_sync(hdev);
9158
9159 return err;
9160}
9161
9162static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9163 void *data, u16 data_len)
9164{
9165 struct mgmt_cp_remove_advertising *cp = data;
9166 struct mgmt_pending_cmd *cmd;
9167 int err;
9168
9169 bt_dev_dbg(hdev, "sock %p", sk);
9170
9171 hci_dev_lock(hdev);
9172
9173 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9174 err = mgmt_cmd_status(sk, hdev->id,
9175 MGMT_OP_REMOVE_ADVERTISING,
9176 MGMT_STATUS_INVALID_PARAMS);
9177 goto unlock;
9178 }
9179
9180 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9181 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9182 MGMT_STATUS_BUSY);
9183 goto unlock;
9184 }
9185
9186 if (list_empty(&hdev->adv_instances)) {
9187 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9188 MGMT_STATUS_INVALID_PARAMS);
9189 goto unlock;
9190 }
9191
9192 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9193 data_len);
9194 if (!cmd) {
9195 err = -ENOMEM;
9196 goto unlock;
9197 }
9198
9199 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9200 remove_advertising_complete);
9201 if (err < 0)
9202 mgmt_pending_free(cmd);
9203
9204unlock:
9205 hci_dev_unlock(hdev);
9206
9207 return err;
9208}
9209
9210static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9211 void *data, u16 data_len)
9212{
9213 struct mgmt_cp_get_adv_size_info *cp = data;
9214 struct mgmt_rp_get_adv_size_info rp;
9215 u32 flags, supported_flags;
9216
9217 bt_dev_dbg(hdev, "sock %p", sk);
9218
9219 if (!lmp_le_capable(hdev))
9220 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9221 MGMT_STATUS_REJECTED);
9222
9223 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9224 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9225 MGMT_STATUS_INVALID_PARAMS);
9226
9227 flags = __le32_to_cpu(cp->flags);
9228
9229 /* The current implementation only supports a subset of the specified
9230 * flags.
9231 */
9232 supported_flags = get_supported_adv_flags(hdev);
9233 if (flags & ~supported_flags)
9234 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9235 MGMT_STATUS_INVALID_PARAMS);
9236
9237 rp.instance = cp->instance;
9238 rp.flags = cp->flags;
9239 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9240 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9241
9242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9243 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9244}
9245
9246static const struct hci_mgmt_handler mgmt_handlers[] = {
9247 { NULL }, /* 0x0000 (no command) */
9248 { read_version, MGMT_READ_VERSION_SIZE,
9249 HCI_MGMT_NO_HDEV |
9250 HCI_MGMT_UNTRUSTED },
9251 { read_commands, MGMT_READ_COMMANDS_SIZE,
9252 HCI_MGMT_NO_HDEV |
9253 HCI_MGMT_UNTRUSTED },
9254 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9255 HCI_MGMT_NO_HDEV |
9256 HCI_MGMT_UNTRUSTED },
9257 { read_controller_info, MGMT_READ_INFO_SIZE,
9258 HCI_MGMT_UNTRUSTED },
9259 { set_powered, MGMT_SETTING_SIZE },
9260 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9261 { set_connectable, MGMT_SETTING_SIZE },
9262 { set_fast_connectable, MGMT_SETTING_SIZE },
9263 { set_bondable, MGMT_SETTING_SIZE },
9264 { set_link_security, MGMT_SETTING_SIZE },
9265 { set_ssp, MGMT_SETTING_SIZE },
9266 { set_hs, MGMT_SETTING_SIZE },
9267 { set_le, MGMT_SETTING_SIZE },
9268 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9269 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9270 { add_uuid, MGMT_ADD_UUID_SIZE },
9271 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9272 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9273 HCI_MGMT_VAR_LEN },
9274 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9275 HCI_MGMT_VAR_LEN },
9276 { disconnect, MGMT_DISCONNECT_SIZE },
9277 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9278 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9279 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9280 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9281 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9282 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9283 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9284 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9285 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9286 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9287 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9288 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9289 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9290 HCI_MGMT_VAR_LEN },
9291 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9292 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9293 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9294 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9295 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9296 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9297 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9298 { set_advertising, MGMT_SETTING_SIZE },
9299 { set_bredr, MGMT_SETTING_SIZE },
9300 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9301 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9302 { set_secure_conn, MGMT_SETTING_SIZE },
9303 { set_debug_keys, MGMT_SETTING_SIZE },
9304 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9305 { load_irks, MGMT_LOAD_IRKS_SIZE,
9306 HCI_MGMT_VAR_LEN },
9307 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9308 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9309 { add_device, MGMT_ADD_DEVICE_SIZE },
9310 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9311 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9312 HCI_MGMT_VAR_LEN },
9313 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9314 HCI_MGMT_NO_HDEV |
9315 HCI_MGMT_UNTRUSTED },
9316 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9317 HCI_MGMT_UNCONFIGURED |
9318 HCI_MGMT_UNTRUSTED },
9319 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9320 HCI_MGMT_UNCONFIGURED },
9321 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9322 HCI_MGMT_UNCONFIGURED },
9323 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9324 HCI_MGMT_VAR_LEN },
9325 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9326 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9327 HCI_MGMT_NO_HDEV |
9328 HCI_MGMT_UNTRUSTED },
9329 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9330 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9331 HCI_MGMT_VAR_LEN },
9332 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9333 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9334 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9335 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9336 HCI_MGMT_UNTRUSTED },
9337 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9338 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9339 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9340 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9341 HCI_MGMT_VAR_LEN },
9342 { set_wideband_speech, MGMT_SETTING_SIZE },
9343 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9344 HCI_MGMT_UNTRUSTED },
9345 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9346 HCI_MGMT_UNTRUSTED |
9347 HCI_MGMT_HDEV_OPTIONAL },
9348 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9349 HCI_MGMT_VAR_LEN |
9350 HCI_MGMT_HDEV_OPTIONAL },
9351 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9352 HCI_MGMT_UNTRUSTED },
9353 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9354 HCI_MGMT_VAR_LEN },
9355 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9356 HCI_MGMT_UNTRUSTED },
9357 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9358 HCI_MGMT_VAR_LEN },
9359 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9360 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9361 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9362 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9363 HCI_MGMT_VAR_LEN },
9364 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9365 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9366 HCI_MGMT_VAR_LEN },
9367 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9368 HCI_MGMT_VAR_LEN },
9369 { add_adv_patterns_monitor_rssi,
9370 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9371 HCI_MGMT_VAR_LEN },
9372 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9373 HCI_MGMT_VAR_LEN },
9374 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9375 { mesh_send, MGMT_MESH_SEND_SIZE,
9376 HCI_MGMT_VAR_LEN },
9377 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9378};
9379
9380void mgmt_index_added(struct hci_dev *hdev)
9381{
9382 struct mgmt_ev_ext_index ev;
9383
9384 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9385 return;
9386
9387 switch (hdev->dev_type) {
9388 case HCI_PRIMARY:
9389 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9390 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9391 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9392 ev.type = 0x01;
9393 } else {
9394 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9395 HCI_MGMT_INDEX_EVENTS);
9396 ev.type = 0x00;
9397 }
9398 break;
9399 case HCI_AMP:
9400 ev.type = 0x02;
9401 break;
9402 default:
9403 return;
9404 }
9405
9406 ev.bus = hdev->bus;
9407
9408 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9409 HCI_MGMT_EXT_INDEX_EVENTS);
9410}
9411
9412void mgmt_index_removed(struct hci_dev *hdev)
9413{
9414 struct mgmt_ev_ext_index ev;
9415 u8 status = MGMT_STATUS_INVALID_INDEX;
9416
9417 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9418 return;
9419
9420 switch (hdev->dev_type) {
9421 case HCI_PRIMARY:
9422 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9423
9424 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9425 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9426 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9427 ev.type = 0x01;
9428 } else {
9429 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9430 HCI_MGMT_INDEX_EVENTS);
9431 ev.type = 0x00;
9432 }
9433 break;
9434 case HCI_AMP:
9435 ev.type = 0x02;
9436 break;
9437 default:
9438 return;
9439 }
9440
9441 ev.bus = hdev->bus;
9442
9443 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9444 HCI_MGMT_EXT_INDEX_EVENTS);
9445
9446 /* Cancel any remaining timed work */
9447 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9448 return;
9449 cancel_delayed_work_sync(&hdev->discov_off);
9450 cancel_delayed_work_sync(&hdev->service_cache);
9451 cancel_delayed_work_sync(&hdev->rpa_expired);
9452}
9453
9454void mgmt_power_on(struct hci_dev *hdev, int err)
9455{
9456 struct cmd_lookup match = { NULL, hdev };
9457
9458 bt_dev_dbg(hdev, "err %d", err);
9459
9460 hci_dev_lock(hdev);
9461
9462 if (!err) {
9463 restart_le_actions(hdev);
9464 hci_update_passive_scan(hdev);
9465 }
9466
9467 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9468
9469 new_settings(hdev, match.sk);
9470
9471 if (match.sk)
9472 sock_put(match.sk);
9473
9474 hci_dev_unlock(hdev);
9475}
9476
9477void __mgmt_power_off(struct hci_dev *hdev)
9478{
9479 struct cmd_lookup match = { NULL, hdev };
9480 u8 status, zero_cod[] = { 0, 0, 0 };
9481
9482 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9483
9484 /* If the power off is because of hdev unregistration let
9485 * use the appropriate INVALID_INDEX status. Otherwise use
9486 * NOT_POWERED. We cover both scenarios here since later in
9487 * mgmt_index_removed() any hci_conn callbacks will have already
9488 * been triggered, potentially causing misleading DISCONNECTED
9489 * status responses.
9490 */
9491 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9492 status = MGMT_STATUS_INVALID_INDEX;
9493 else
9494 status = MGMT_STATUS_NOT_POWERED;
9495
9496 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9497
9498 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9499 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9500 zero_cod, sizeof(zero_cod),
9501 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9502 ext_info_changed(hdev, NULL);
9503 }
9504
9505 new_settings(hdev, match.sk);
9506
9507 if (match.sk)
9508 sock_put(match.sk);
9509}
9510
9511void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9512{
9513 struct mgmt_pending_cmd *cmd;
9514 u8 status;
9515
9516 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9517 if (!cmd)
9518 return;
9519
9520 if (err == -ERFKILL)
9521 status = MGMT_STATUS_RFKILLED;
9522 else
9523 status = MGMT_STATUS_FAILED;
9524
9525 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9526
9527 mgmt_pending_remove(cmd);
9528}
9529
9530void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9531 bool persistent)
9532{
9533 struct mgmt_ev_new_link_key ev;
9534
9535 memset(&ev, 0, sizeof(ev));
9536
9537 ev.store_hint = persistent;
9538 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9539 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9540 ev.key.type = key->type;
9541 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9542 ev.key.pin_len = key->pin_len;
9543
9544 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9545}
9546
9547static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9548{
9549 switch (ltk->type) {
9550 case SMP_LTK:
9551 case SMP_LTK_RESPONDER:
9552 if (ltk->authenticated)
9553 return MGMT_LTK_AUTHENTICATED;
9554 return MGMT_LTK_UNAUTHENTICATED;
9555 case SMP_LTK_P256:
9556 if (ltk->authenticated)
9557 return MGMT_LTK_P256_AUTH;
9558 return MGMT_LTK_P256_UNAUTH;
9559 case SMP_LTK_P256_DEBUG:
9560 return MGMT_LTK_P256_DEBUG;
9561 }
9562
9563 return MGMT_LTK_UNAUTHENTICATED;
9564}
9565
9566void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9567{
9568 struct mgmt_ev_new_long_term_key ev;
9569
9570 memset(&ev, 0, sizeof(ev));
9571
9572 /* Devices using resolvable or non-resolvable random addresses
9573 * without providing an identity resolving key don't require
9574 * to store long term keys. Their addresses will change the
9575 * next time around.
9576 *
9577 * Only when a remote device provides an identity address
9578 * make sure the long term key is stored. If the remote
9579 * identity is known, the long term keys are internally
9580 * mapped to the identity address. So allow static random
9581 * and public addresses here.
9582 */
9583 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9584 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9585 ev.store_hint = 0x00;
9586 else
9587 ev.store_hint = persistent;
9588
9589 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9590 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9591 ev.key.type = mgmt_ltk_type(key);
9592 ev.key.enc_size = key->enc_size;
9593 ev.key.ediv = key->ediv;
9594 ev.key.rand = key->rand;
9595
9596 if (key->type == SMP_LTK)
9597 ev.key.initiator = 1;
9598
9599 /* Make sure we copy only the significant bytes based on the
9600 * encryption key size, and set the rest of the value to zeroes.
9601 */
9602 memcpy(ev.key.val, key->val, key->enc_size);
9603 memset(ev.key.val + key->enc_size, 0,
9604 sizeof(ev.key.val) - key->enc_size);
9605
9606 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9607}
9608
9609void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9610{
9611 struct mgmt_ev_new_irk ev;
9612
9613 memset(&ev, 0, sizeof(ev));
9614
9615 ev.store_hint = persistent;
9616
9617 bacpy(&ev.rpa, &irk->rpa);
9618 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9619 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9620 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9621
9622 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9623}
9624
9625void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9626 bool persistent)
9627{
9628 struct mgmt_ev_new_csrk ev;
9629
9630 memset(&ev, 0, sizeof(ev));
9631
9632 /* Devices using resolvable or non-resolvable random addresses
9633 * without providing an identity resolving key don't require
9634 * to store signature resolving keys. Their addresses will change
9635 * the next time around.
9636 *
9637 * Only when a remote device provides an identity address
9638 * make sure the signature resolving key is stored. So allow
9639 * static random and public addresses here.
9640 */
9641 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9642 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9643 ev.store_hint = 0x00;
9644 else
9645 ev.store_hint = persistent;
9646
9647 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9648 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9649 ev.key.type = csrk->type;
9650 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9651
9652 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9653}
9654
9655void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9656 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9657 u16 max_interval, u16 latency, u16 timeout)
9658{
9659 struct mgmt_ev_new_conn_param ev;
9660
9661 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9662 return;
9663
9664 memset(&ev, 0, sizeof(ev));
9665 bacpy(&ev.addr.bdaddr, bdaddr);
9666 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9667 ev.store_hint = store_hint;
9668 ev.min_interval = cpu_to_le16(min_interval);
9669 ev.max_interval = cpu_to_le16(max_interval);
9670 ev.latency = cpu_to_le16(latency);
9671 ev.timeout = cpu_to_le16(timeout);
9672
9673 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9674}
9675
9676void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9677 u8 *name, u8 name_len)
9678{
9679 struct sk_buff *skb;
9680 struct mgmt_ev_device_connected *ev;
9681 u16 eir_len = 0;
9682 u32 flags = 0;
9683
9684 /* allocate buff for LE or BR/EDR adv */
9685 if (conn->le_adv_data_len > 0)
9686 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9687 sizeof(*ev) + conn->le_adv_data_len);
9688 else
9689 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9690 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9691 eir_precalc_len(sizeof(conn->dev_class)));
9692
9693 ev = skb_put(skb, sizeof(*ev));
9694 bacpy(&ev->addr.bdaddr, &conn->dst);
9695 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9696
9697 if (conn->out)
9698 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9699
9700 ev->flags = __cpu_to_le32(flags);
9701
9702 /* We must ensure that the EIR Data fields are ordered and
9703 * unique. Keep it simple for now and avoid the problem by not
9704 * adding any BR/EDR data to the LE adv.
9705 */
9706 if (conn->le_adv_data_len > 0) {
9707 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9708 eir_len = conn->le_adv_data_len;
9709 } else {
9710 if (name)
9711 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9712
9713 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9714 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9715 conn->dev_class, sizeof(conn->dev_class));
9716 }
9717
9718 ev->eir_len = cpu_to_le16(eir_len);
9719
9720 mgmt_event_skb(skb, NULL);
9721}
9722
9723static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9724{
9725 struct sock **sk = data;
9726
9727 cmd->cmd_complete(cmd, 0);
9728
9729 *sk = cmd->sk;
9730 sock_hold(*sk);
9731
9732 mgmt_pending_remove(cmd);
9733}
9734
9735static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9736{
9737 struct hci_dev *hdev = data;
9738 struct mgmt_cp_unpair_device *cp = cmd->param;
9739
9740 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9741
9742 cmd->cmd_complete(cmd, 0);
9743 mgmt_pending_remove(cmd);
9744}
9745
9746bool mgmt_powering_down(struct hci_dev *hdev)
9747{
9748 struct mgmt_pending_cmd *cmd;
9749 struct mgmt_mode *cp;
9750
9751 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9752 if (!cmd)
9753 return false;
9754
9755 cp = cmd->param;
9756 if (!cp->val)
9757 return true;
9758
9759 return false;
9760}
9761
9762void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9763 u8 link_type, u8 addr_type, u8 reason,
9764 bool mgmt_connected)
9765{
9766 struct mgmt_ev_device_disconnected ev;
9767 struct sock *sk = NULL;
9768
9769 /* The connection is still in hci_conn_hash so test for 1
9770 * instead of 0 to know if this is the last one.
9771 */
9772 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9773 cancel_delayed_work(&hdev->power_off);
9774 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9775 }
9776
9777 if (!mgmt_connected)
9778 return;
9779
9780 if (link_type != ACL_LINK && link_type != LE_LINK)
9781 return;
9782
9783 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9784
9785 bacpy(&ev.addr.bdaddr, bdaddr);
9786 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9787 ev.reason = reason;
9788
9789 /* Report disconnects due to suspend */
9790 if (hdev->suspended)
9791 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9792
9793 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9794
9795 if (sk)
9796 sock_put(sk);
9797
9798 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9799 hdev);
9800}
9801
9802void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9803 u8 link_type, u8 addr_type, u8 status)
9804{
9805 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9806 struct mgmt_cp_disconnect *cp;
9807 struct mgmt_pending_cmd *cmd;
9808
9809 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9810 hdev);
9811
9812 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9813 if (!cmd)
9814 return;
9815
9816 cp = cmd->param;
9817
9818 if (bacmp(bdaddr, &cp->addr.bdaddr))
9819 return;
9820
9821 if (cp->addr.type != bdaddr_type)
9822 return;
9823
9824 cmd->cmd_complete(cmd, mgmt_status(status));
9825 mgmt_pending_remove(cmd);
9826}
9827
9828void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9829 u8 addr_type, u8 status)
9830{
9831 struct mgmt_ev_connect_failed ev;
9832
9833 /* The connection is still in hci_conn_hash so test for 1
9834 * instead of 0 to know if this is the last one.
9835 */
9836 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9837 cancel_delayed_work(&hdev->power_off);
9838 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9839 }
9840
9841 bacpy(&ev.addr.bdaddr, bdaddr);
9842 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9843 ev.status = mgmt_status(status);
9844
9845 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9846}
9847
9848void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9849{
9850 struct mgmt_ev_pin_code_request ev;
9851
9852 bacpy(&ev.addr.bdaddr, bdaddr);
9853 ev.addr.type = BDADDR_BREDR;
9854 ev.secure = secure;
9855
9856 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9857}
9858
9859void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9860 u8 status)
9861{
9862 struct mgmt_pending_cmd *cmd;
9863
9864 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9865 if (!cmd)
9866 return;
9867
9868 cmd->cmd_complete(cmd, mgmt_status(status));
9869 mgmt_pending_remove(cmd);
9870}
9871
9872void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9873 u8 status)
9874{
9875 struct mgmt_pending_cmd *cmd;
9876
9877 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9878 if (!cmd)
9879 return;
9880
9881 cmd->cmd_complete(cmd, mgmt_status(status));
9882 mgmt_pending_remove(cmd);
9883}
9884
9885int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9886 u8 link_type, u8 addr_type, u32 value,
9887 u8 confirm_hint)
9888{
9889 struct mgmt_ev_user_confirm_request ev;
9890
9891 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9892
9893 bacpy(&ev.addr.bdaddr, bdaddr);
9894 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9895 ev.confirm_hint = confirm_hint;
9896 ev.value = cpu_to_le32(value);
9897
9898 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9899 NULL);
9900}
9901
9902int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9903 u8 link_type, u8 addr_type)
9904{
9905 struct mgmt_ev_user_passkey_request ev;
9906
9907 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9908
9909 bacpy(&ev.addr.bdaddr, bdaddr);
9910 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9911
9912 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9913 NULL);
9914}
9915
9916static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9917 u8 link_type, u8 addr_type, u8 status,
9918 u8 opcode)
9919{
9920 struct mgmt_pending_cmd *cmd;
9921
9922 cmd = pending_find(opcode, hdev);
9923 if (!cmd)
9924 return -ENOENT;
9925
9926 cmd->cmd_complete(cmd, mgmt_status(status));
9927 mgmt_pending_remove(cmd);
9928
9929 return 0;
9930}
9931
9932int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9933 u8 link_type, u8 addr_type, u8 status)
9934{
9935 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9936 status, MGMT_OP_USER_CONFIRM_REPLY);
9937}
9938
9939int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9940 u8 link_type, u8 addr_type, u8 status)
9941{
9942 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9943 status,
9944 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9945}
9946
9947int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9948 u8 link_type, u8 addr_type, u8 status)
9949{
9950 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9951 status, MGMT_OP_USER_PASSKEY_REPLY);
9952}
9953
9954int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9955 u8 link_type, u8 addr_type, u8 status)
9956{
9957 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9958 status,
9959 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9960}
9961
9962int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9963 u8 link_type, u8 addr_type, u32 passkey,
9964 u8 entered)
9965{
9966 struct mgmt_ev_passkey_notify ev;
9967
9968 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9969
9970 bacpy(&ev.addr.bdaddr, bdaddr);
9971 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9972 ev.passkey = __cpu_to_le32(passkey);
9973 ev.entered = entered;
9974
9975 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9976}
9977
9978void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9979{
9980 struct mgmt_ev_auth_failed ev;
9981 struct mgmt_pending_cmd *cmd;
9982 u8 status = mgmt_status(hci_status);
9983
9984 bacpy(&ev.addr.bdaddr, &conn->dst);
9985 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9986 ev.status = status;
9987
9988 cmd = find_pairing(conn);
9989
9990 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9991 cmd ? cmd->sk : NULL);
9992
9993 if (cmd) {
9994 cmd->cmd_complete(cmd, status);
9995 mgmt_pending_remove(cmd);
9996 }
9997}
9998
9999void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10000{
10001 struct cmd_lookup match = { NULL, hdev };
10002 bool changed;
10003
10004 if (status) {
10005 u8 mgmt_err = mgmt_status(status);
10006 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10007 cmd_status_rsp, &mgmt_err);
10008 return;
10009 }
10010
10011 if (test_bit(HCI_AUTH, &hdev->flags))
10012 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10013 else
10014 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10015
10016 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10017 &match);
10018
10019 if (changed)
10020 new_settings(hdev, match.sk);
10021
10022 if (match.sk)
10023 sock_put(match.sk);
10024}
10025
10026static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10027{
10028 struct cmd_lookup *match = data;
10029
10030 if (match->sk == NULL) {
10031 match->sk = cmd->sk;
10032 sock_hold(match->sk);
10033 }
10034}
10035
10036void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10037 u8 status)
10038{
10039 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10040
10041 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10042 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10043 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10044
10045 if (!status) {
10046 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10047 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10048 ext_info_changed(hdev, NULL);
10049 }
10050
10051 if (match.sk)
10052 sock_put(match.sk);
10053}
10054
10055void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10056{
10057 struct mgmt_cp_set_local_name ev;
10058 struct mgmt_pending_cmd *cmd;
10059
10060 if (status)
10061 return;
10062
10063 memset(&ev, 0, sizeof(ev));
10064 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10065 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10066
10067 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10068 if (!cmd) {
10069 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10070
10071 /* If this is a HCI command related to powering on the
10072 * HCI dev don't send any mgmt signals.
10073 */
10074 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10075 return;
10076 }
10077
10078 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10079 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10080 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10081}
10082
10083static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10084{
10085 int i;
10086
10087 for (i = 0; i < uuid_count; i++) {
10088 if (!memcmp(uuid, uuids[i], 16))
10089 return true;
10090 }
10091
10092 return false;
10093}
10094
10095static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10096{
10097 u16 parsed = 0;
10098
10099 while (parsed < eir_len) {
10100 u8 field_len = eir[0];
10101 u8 uuid[16];
10102 int i;
10103
10104 if (field_len == 0)
10105 break;
10106
10107 if (eir_len - parsed < field_len + 1)
10108 break;
10109
10110 switch (eir[1]) {
10111 case EIR_UUID16_ALL:
10112 case EIR_UUID16_SOME:
10113 for (i = 0; i + 3 <= field_len; i += 2) {
10114 memcpy(uuid, bluetooth_base_uuid, 16);
10115 uuid[13] = eir[i + 3];
10116 uuid[12] = eir[i + 2];
10117 if (has_uuid(uuid, uuid_count, uuids))
10118 return true;
10119 }
10120 break;
10121 case EIR_UUID32_ALL:
10122 case EIR_UUID32_SOME:
10123 for (i = 0; i + 5 <= field_len; i += 4) {
10124 memcpy(uuid, bluetooth_base_uuid, 16);
10125 uuid[15] = eir[i + 5];
10126 uuid[14] = eir[i + 4];
10127 uuid[13] = eir[i + 3];
10128 uuid[12] = eir[i + 2];
10129 if (has_uuid(uuid, uuid_count, uuids))
10130 return true;
10131 }
10132 break;
10133 case EIR_UUID128_ALL:
10134 case EIR_UUID128_SOME:
10135 for (i = 0; i + 17 <= field_len; i += 16) {
10136 memcpy(uuid, eir + i + 2, 16);
10137 if (has_uuid(uuid, uuid_count, uuids))
10138 return true;
10139 }
10140 break;
10141 }
10142
10143 parsed += field_len + 1;
10144 eir += field_len + 1;
10145 }
10146
10147 return false;
10148}
10149
10150static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10151 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10152{
10153 /* If a RSSI threshold has been specified, and
10154 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10155 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10156 * is set, let it through for further processing, as we might need to
10157 * restart the scan.
10158 *
10159 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10160 * the results are also dropped.
10161 */
10162 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10163 (rssi == HCI_RSSI_INVALID ||
10164 (rssi < hdev->discovery.rssi &&
10165 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10166 return false;
10167
10168 if (hdev->discovery.uuid_count != 0) {
10169 /* If a list of UUIDs is provided in filter, results with no
10170 * matching UUID should be dropped.
10171 */
10172 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10173 hdev->discovery.uuids) &&
10174 !eir_has_uuids(scan_rsp, scan_rsp_len,
10175 hdev->discovery.uuid_count,
10176 hdev->discovery.uuids))
10177 return false;
10178 }
10179
10180 /* If duplicate filtering does not report RSSI changes, then restart
10181 * scanning to ensure updated result with updated RSSI values.
10182 */
10183 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10184 /* Validate RSSI value against the RSSI threshold once more. */
10185 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10186 rssi < hdev->discovery.rssi)
10187 return false;
10188 }
10189
10190 return true;
10191}
10192
10193void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10194 bdaddr_t *bdaddr, u8 addr_type)
10195{
10196 struct mgmt_ev_adv_monitor_device_lost ev;
10197
10198 ev.monitor_handle = cpu_to_le16(handle);
10199 bacpy(&ev.addr.bdaddr, bdaddr);
10200 ev.addr.type = addr_type;
10201
10202 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10203 NULL);
10204}
10205
10206static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10207 struct sk_buff *skb,
10208 struct sock *skip_sk,
10209 u16 handle)
10210{
10211 struct sk_buff *advmon_skb;
10212 size_t advmon_skb_len;
10213 __le16 *monitor_handle;
10214
10215 if (!skb)
10216 return;
10217
10218 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10219 sizeof(struct mgmt_ev_device_found)) + skb->len;
10220 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10221 advmon_skb_len);
10222 if (!advmon_skb)
10223 return;
10224
10225 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10226 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10227 * store monitor_handle of the matched monitor.
10228 */
10229 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10230 *monitor_handle = cpu_to_le16(handle);
10231 skb_put_data(advmon_skb, skb->data, skb->len);
10232
10233 mgmt_event_skb(advmon_skb, skip_sk);
10234}
10235
10236static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10237 bdaddr_t *bdaddr, bool report_device,
10238 struct sk_buff *skb,
10239 struct sock *skip_sk)
10240{
10241 struct monitored_device *dev, *tmp;
10242 bool matched = false;
10243 bool notified = false;
10244
10245 /* We have received the Advertisement Report because:
10246 * 1. the kernel has initiated active discovery
10247 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10248 * passive scanning
10249 * 3. if none of the above is true, we have one or more active
10250 * Advertisement Monitor
10251 *
10252 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10253 * and report ONLY one advertisement per device for the matched Monitor
10254 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10255 *
10256 * For case 3, since we are not active scanning and all advertisements
10257 * received are due to a matched Advertisement Monitor, report all
10258 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10259 */
10260 if (report_device && !hdev->advmon_pend_notify) {
10261 mgmt_event_skb(skb, skip_sk);
10262 return;
10263 }
10264
10265 hdev->advmon_pend_notify = false;
10266
10267 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10268 if (!bacmp(&dev->bdaddr, bdaddr)) {
10269 matched = true;
10270
10271 if (!dev->notified) {
10272 mgmt_send_adv_monitor_device_found(hdev, skb,
10273 skip_sk,
10274 dev->handle);
10275 notified = true;
10276 dev->notified = true;
10277 }
10278 }
10279
10280 if (!dev->notified)
10281 hdev->advmon_pend_notify = true;
10282 }
10283
10284 if (!report_device &&
10285 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10286 /* Handle 0 indicates that we are not active scanning and this
10287 * is a subsequent advertisement report for an already matched
10288 * Advertisement Monitor or the controller offloading support
10289 * is not available.
10290 */
10291 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10292 }
10293
10294 if (report_device)
10295 mgmt_event_skb(skb, skip_sk);
10296 else
10297 kfree_skb(skb);
10298}
10299
10300static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10301 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10302 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10303 u64 instant)
10304{
10305 struct sk_buff *skb;
10306 struct mgmt_ev_mesh_device_found *ev;
10307 int i, j;
10308
10309 if (!hdev->mesh_ad_types[0])
10310 goto accepted;
10311
10312 /* Scan for requested AD types */
10313 if (eir_len > 0) {
10314 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10315 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10316 if (!hdev->mesh_ad_types[j])
10317 break;
10318
10319 if (hdev->mesh_ad_types[j] == eir[i + 1])
10320 goto accepted;
10321 }
10322 }
10323 }
10324
10325 if (scan_rsp_len > 0) {
10326 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10327 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10328 if (!hdev->mesh_ad_types[j])
10329 break;
10330
10331 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10332 goto accepted;
10333 }
10334 }
10335 }
10336
10337 return;
10338
10339accepted:
10340 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10341 sizeof(*ev) + eir_len + scan_rsp_len);
10342 if (!skb)
10343 return;
10344
10345 ev = skb_put(skb, sizeof(*ev));
10346
10347 bacpy(&ev->addr.bdaddr, bdaddr);
10348 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10349 ev->rssi = rssi;
10350 ev->flags = cpu_to_le32(flags);
10351 ev->instant = cpu_to_le64(instant);
10352
10353 if (eir_len > 0)
10354 /* Copy EIR or advertising data into event */
10355 skb_put_data(skb, eir, eir_len);
10356
10357 if (scan_rsp_len > 0)
10358 /* Append scan response data to event */
10359 skb_put_data(skb, scan_rsp, scan_rsp_len);
10360
10361 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10362
10363 mgmt_event_skb(skb, NULL);
10364}
10365
10366void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10367 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10368 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10369 u64 instant)
10370{
10371 struct sk_buff *skb;
10372 struct mgmt_ev_device_found *ev;
10373 bool report_device = hci_discovery_active(hdev);
10374
10375 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10376 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10377 eir, eir_len, scan_rsp, scan_rsp_len,
10378 instant);
10379
10380 /* Don't send events for a non-kernel initiated discovery. With
10381 * LE one exception is if we have pend_le_reports > 0 in which
10382 * case we're doing passive scanning and want these events.
10383 */
10384 if (!hci_discovery_active(hdev)) {
10385 if (link_type == ACL_LINK)
10386 return;
10387 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10388 report_device = true;
10389 else if (!hci_is_adv_monitoring(hdev))
10390 return;
10391 }
10392
10393 if (hdev->discovery.result_filtering) {
10394 /* We are using service discovery */
10395 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10396 scan_rsp_len))
10397 return;
10398 }
10399
10400 if (hdev->discovery.limited) {
10401 /* Check for limited discoverable bit */
10402 if (dev_class) {
10403 if (!(dev_class[1] & 0x20))
10404 return;
10405 } else {
10406 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10407 if (!flags || !(flags[0] & LE_AD_LIMITED))
10408 return;
10409 }
10410 }
10411
10412 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10413 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10414 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10415 if (!skb)
10416 return;
10417
10418 ev = skb_put(skb, sizeof(*ev));
10419
10420 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10421 * RSSI value was reported as 0 when not available. This behavior
10422 * is kept when using device discovery. This is required for full
10423 * backwards compatibility with the API.
10424 *
10425 * However when using service discovery, the value 127 will be
10426 * returned when the RSSI is not available.
10427 */
10428 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10429 link_type == ACL_LINK)
10430 rssi = 0;
10431
10432 bacpy(&ev->addr.bdaddr, bdaddr);
10433 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10434 ev->rssi = rssi;
10435 ev->flags = cpu_to_le32(flags);
10436
10437 if (eir_len > 0)
10438 /* Copy EIR or advertising data into event */
10439 skb_put_data(skb, eir, eir_len);
10440
10441 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10442 u8 eir_cod[5];
10443
10444 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10445 dev_class, 3);
10446 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10447 }
10448
10449 if (scan_rsp_len > 0)
10450 /* Append scan response data to event */
10451 skb_put_data(skb, scan_rsp, scan_rsp_len);
10452
10453 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10454
10455 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10456}
10457
10458void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10459 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10460{
10461 struct sk_buff *skb;
10462 struct mgmt_ev_device_found *ev;
10463 u16 eir_len = 0;
10464 u32 flags = 0;
10465
10466 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10467 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10468
10469 ev = skb_put(skb, sizeof(*ev));
10470 bacpy(&ev->addr.bdaddr, bdaddr);
10471 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10472 ev->rssi = rssi;
10473
10474 if (name)
10475 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10476 else
10477 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10478
10479 ev->eir_len = cpu_to_le16(eir_len);
10480 ev->flags = cpu_to_le32(flags);
10481
10482 mgmt_event_skb(skb, NULL);
10483}
10484
10485void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10486{
10487 struct mgmt_ev_discovering ev;
10488
10489 bt_dev_dbg(hdev, "discovering %u", discovering);
10490
10491 memset(&ev, 0, sizeof(ev));
10492 ev.type = hdev->discovery.type;
10493 ev.discovering = discovering;
10494
10495 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10496}
10497
10498void mgmt_suspending(struct hci_dev *hdev, u8 state)
10499{
10500 struct mgmt_ev_controller_suspend ev;
10501
10502 ev.suspend_state = state;
10503 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10504}
10505
10506void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10507 u8 addr_type)
10508{
10509 struct mgmt_ev_controller_resume ev;
10510
10511 ev.wake_reason = reason;
10512 if (bdaddr) {
10513 bacpy(&ev.addr.bdaddr, bdaddr);
10514 ev.addr.type = addr_type;
10515 } else {
10516 memset(&ev.addr, 0, sizeof(ev.addr));
10517 }
10518
10519 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10520}
10521
10522static struct hci_mgmt_chan chan = {
10523 .channel = HCI_CHANNEL_CONTROL,
10524 .handler_count = ARRAY_SIZE(mgmt_handlers),
10525 .handlers = mgmt_handlers,
10526 .hdev_init = mgmt_init_hdev,
10527};
10528
10529int mgmt_init(void)
10530{
10531 return hci_mgmt_chan_register(&chan);
10532}
10533
10534void mgmt_exit(void)
10535{
10536 hci_mgmt_chan_unregister(&chan);
10537}
10538
10539void mgmt_cleanup(struct sock *sk)
10540{
10541 struct mgmt_mesh_tx *mesh_tx;
10542 struct hci_dev *hdev;
10543
10544 read_lock(&hci_dev_list_lock);
10545
10546 list_for_each_entry(hdev, &hci_dev_list, list) {
10547 do {
10548 mesh_tx = mgmt_mesh_next(hdev, sk);
10549
10550 if (mesh_tx)
10551 mesh_send_complete(hdev, mesh_tx, true);
10552 } while (mesh_tx);
10553 }
10554
10555 read_unlock(&hci_dev_list_lock);
10556}
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2010 Nokia Corporation
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as
7 published by the Free Software Foundation;
8
9 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED.
21*/
22
23/* Bluetooth HCI Management interface */
24
25#include <linux/uaccess.h>
26#include <asm/unaligned.h>
27
28#include <net/bluetooth/bluetooth.h>
29#include <net/bluetooth/hci_core.h>
30#include <net/bluetooth/mgmt.h>
31
32#define MGMT_VERSION 0
33#define MGMT_REVISION 1
34
35struct pending_cmd {
36 struct list_head list;
37 __u16 opcode;
38 int index;
39 void *param;
40 struct sock *sk;
41 void *user_data;
42};
43
44static LIST_HEAD(cmd_list);
45
46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
47{
48 struct sk_buff *skb;
49 struct mgmt_hdr *hdr;
50 struct mgmt_ev_cmd_status *ev;
51
52 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
53
54 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
55 if (!skb)
56 return -ENOMEM;
57
58 hdr = (void *) skb_put(skb, sizeof(*hdr));
59
60 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
61 hdr->index = cpu_to_le16(index);
62 hdr->len = cpu_to_le16(sizeof(*ev));
63
64 ev = (void *) skb_put(skb, sizeof(*ev));
65 ev->status = status;
66 put_unaligned_le16(cmd, &ev->opcode);
67
68 if (sock_queue_rcv_skb(sk, skb) < 0)
69 kfree_skb(skb);
70
71 return 0;
72}
73
74static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
75 size_t rp_len)
76{
77 struct sk_buff *skb;
78 struct mgmt_hdr *hdr;
79 struct mgmt_ev_cmd_complete *ev;
80
81 BT_DBG("sock %p", sk);
82
83 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
84 if (!skb)
85 return -ENOMEM;
86
87 hdr = (void *) skb_put(skb, sizeof(*hdr));
88
89 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
90 hdr->index = cpu_to_le16(index);
91 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
92
93 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
94 put_unaligned_le16(cmd, &ev->opcode);
95
96 if (rp)
97 memcpy(ev->data, rp, rp_len);
98
99 if (sock_queue_rcv_skb(sk, skb) < 0)
100 kfree_skb(skb);
101
102 return 0;
103}
104
105static int read_version(struct sock *sk)
106{
107 struct mgmt_rp_read_version rp;
108
109 BT_DBG("sock %p", sk);
110
111 rp.version = MGMT_VERSION;
112 put_unaligned_le16(MGMT_REVISION, &rp.revision);
113
114 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp,
115 sizeof(rp));
116}
117
118static int read_index_list(struct sock *sk)
119{
120 struct mgmt_rp_read_index_list *rp;
121 struct list_head *p;
122 size_t rp_len;
123 u16 count;
124 int i, err;
125
126 BT_DBG("sock %p", sk);
127
128 read_lock(&hci_dev_list_lock);
129
130 count = 0;
131 list_for_each(p, &hci_dev_list) {
132 count++;
133 }
134
135 rp_len = sizeof(*rp) + (2 * count);
136 rp = kmalloc(rp_len, GFP_ATOMIC);
137 if (!rp) {
138 read_unlock(&hci_dev_list_lock);
139 return -ENOMEM;
140 }
141
142 put_unaligned_le16(count, &rp->num_controllers);
143
144 i = 0;
145 list_for_each(p, &hci_dev_list) {
146 struct hci_dev *d = list_entry(p, struct hci_dev, list);
147
148 hci_del_off_timer(d);
149
150 set_bit(HCI_MGMT, &d->flags);
151
152 if (test_bit(HCI_SETUP, &d->flags))
153 continue;
154
155 put_unaligned_le16(d->id, &rp->index[i++]);
156 BT_DBG("Added hci%u", d->id);
157 }
158
159 read_unlock(&hci_dev_list_lock);
160
161 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp,
162 rp_len);
163
164 kfree(rp);
165
166 return err;
167}
168
169static int read_controller_info(struct sock *sk, u16 index)
170{
171 struct mgmt_rp_read_info rp;
172 struct hci_dev *hdev;
173
174 BT_DBG("sock %p hci%u", sk, index);
175
176 hdev = hci_dev_get(index);
177 if (!hdev)
178 return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
179
180 hci_del_off_timer(hdev);
181
182 hci_dev_lock_bh(hdev);
183
184 set_bit(HCI_MGMT, &hdev->flags);
185
186 memset(&rp, 0, sizeof(rp));
187
188 rp.type = hdev->dev_type;
189
190 rp.powered = test_bit(HCI_UP, &hdev->flags);
191 rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
192 rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
193 rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
194
195 if (test_bit(HCI_AUTH, &hdev->flags))
196 rp.sec_mode = 3;
197 else if (hdev->ssp_mode > 0)
198 rp.sec_mode = 4;
199 else
200 rp.sec_mode = 2;
201
202 bacpy(&rp.bdaddr, &hdev->bdaddr);
203 memcpy(rp.features, hdev->features, 8);
204 memcpy(rp.dev_class, hdev->dev_class, 3);
205 put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
206 rp.hci_ver = hdev->hci_ver;
207 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
208
209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
210
211 hci_dev_unlock_bh(hdev);
212 hci_dev_put(hdev);
213
214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
215}
216
217static void mgmt_pending_free(struct pending_cmd *cmd)
218{
219 sock_put(cmd->sk);
220 kfree(cmd->param);
221 kfree(cmd);
222}
223
224static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
225 u16 index, void *data, u16 len)
226{
227 struct pending_cmd *cmd;
228
229 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
230 if (!cmd)
231 return NULL;
232
233 cmd->opcode = opcode;
234 cmd->index = index;
235
236 cmd->param = kmalloc(len, GFP_ATOMIC);
237 if (!cmd->param) {
238 kfree(cmd);
239 return NULL;
240 }
241
242 if (data)
243 memcpy(cmd->param, data, len);
244
245 cmd->sk = sk;
246 sock_hold(sk);
247
248 list_add(&cmd->list, &cmd_list);
249
250 return cmd;
251}
252
253static void mgmt_pending_foreach(u16 opcode, int index,
254 void (*cb)(struct pending_cmd *cmd, void *data),
255 void *data)
256{
257 struct list_head *p, *n;
258
259 list_for_each_safe(p, n, &cmd_list) {
260 struct pending_cmd *cmd;
261
262 cmd = list_entry(p, struct pending_cmd, list);
263
264 if (cmd->opcode != opcode)
265 continue;
266
267 if (index >= 0 && cmd->index != index)
268 continue;
269
270 cb(cmd, data);
271 }
272}
273
274static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
275{
276 struct list_head *p;
277
278 list_for_each(p, &cmd_list) {
279 struct pending_cmd *cmd;
280
281 cmd = list_entry(p, struct pending_cmd, list);
282
283 if (cmd->opcode != opcode)
284 continue;
285
286 if (index >= 0 && cmd->index != index)
287 continue;
288
289 return cmd;
290 }
291
292 return NULL;
293}
294
295static void mgmt_pending_remove(struct pending_cmd *cmd)
296{
297 list_del(&cmd->list);
298 mgmt_pending_free(cmd);
299}
300
301static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
302{
303 struct mgmt_mode *cp;
304 struct hci_dev *hdev;
305 struct pending_cmd *cmd;
306 int err, up;
307
308 cp = (void *) data;
309
310 BT_DBG("request for hci%u", index);
311
312 if (len != sizeof(*cp))
313 return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
314
315 hdev = hci_dev_get(index);
316 if (!hdev)
317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
318
319 hci_dev_lock_bh(hdev);
320
321 up = test_bit(HCI_UP, &hdev->flags);
322 if ((cp->val && up) || (!cp->val && !up)) {
323 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
324 goto failed;
325 }
326
327 if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
328 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
329 goto failed;
330 }
331
332 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
333 if (!cmd) {
334 err = -ENOMEM;
335 goto failed;
336 }
337
338 if (cp->val)
339 queue_work(hdev->workqueue, &hdev->power_on);
340 else
341 queue_work(hdev->workqueue, &hdev->power_off);
342
343 err = 0;
344
345failed:
346 hci_dev_unlock_bh(hdev);
347 hci_dev_put(hdev);
348 return err;
349}
350
351static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
352 u16 len)
353{
354 struct mgmt_mode *cp;
355 struct hci_dev *hdev;
356 struct pending_cmd *cmd;
357 u8 scan;
358 int err;
359
360 cp = (void *) data;
361
362 BT_DBG("request for hci%u", index);
363
364 if (len != sizeof(*cp))
365 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
366
367 hdev = hci_dev_get(index);
368 if (!hdev)
369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
370
371 hci_dev_lock_bh(hdev);
372
373 if (!test_bit(HCI_UP, &hdev->flags)) {
374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
375 goto failed;
376 }
377
378 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
379 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
380 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
381 goto failed;
382 }
383
384 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
385 test_bit(HCI_PSCAN, &hdev->flags)) {
386 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
387 goto failed;
388 }
389
390 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
391 if (!cmd) {
392 err = -ENOMEM;
393 goto failed;
394 }
395
396 scan = SCAN_PAGE;
397
398 if (cp->val)
399 scan |= SCAN_INQUIRY;
400
401 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
402 if (err < 0)
403 mgmt_pending_remove(cmd);
404
405failed:
406 hci_dev_unlock_bh(hdev);
407 hci_dev_put(hdev);
408
409 return err;
410}
411
412static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
413 u16 len)
414{
415 struct mgmt_mode *cp;
416 struct hci_dev *hdev;
417 struct pending_cmd *cmd;
418 u8 scan;
419 int err;
420
421 cp = (void *) data;
422
423 BT_DBG("request for hci%u", index);
424
425 if (len != sizeof(*cp))
426 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
427
428 hdev = hci_dev_get(index);
429 if (!hdev)
430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
431
432 hci_dev_lock_bh(hdev);
433
434 if (!test_bit(HCI_UP, &hdev->flags)) {
435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
436 goto failed;
437 }
438
439 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
440 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
441 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
442 goto failed;
443 }
444
445 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
446 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
447 goto failed;
448 }
449
450 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
451 if (!cmd) {
452 err = -ENOMEM;
453 goto failed;
454 }
455
456 if (cp->val)
457 scan = SCAN_PAGE;
458 else
459 scan = 0;
460
461 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
462 if (err < 0)
463 mgmt_pending_remove(cmd);
464
465failed:
466 hci_dev_unlock_bh(hdev);
467 hci_dev_put(hdev);
468
469 return err;
470}
471
472static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
473 struct sock *skip_sk)
474{
475 struct sk_buff *skb;
476 struct mgmt_hdr *hdr;
477
478 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
479 if (!skb)
480 return -ENOMEM;
481
482 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
483
484 hdr = (void *) skb_put(skb, sizeof(*hdr));
485 hdr->opcode = cpu_to_le16(event);
486 hdr->index = cpu_to_le16(index);
487 hdr->len = cpu_to_le16(data_len);
488
489 if (data)
490 memcpy(skb_put(skb, data_len), data, data_len);
491
492 hci_send_to_sock(NULL, skb, skip_sk);
493 kfree_skb(skb);
494
495 return 0;
496}
497
498static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
499{
500 struct mgmt_mode rp;
501
502 rp.val = val;
503
504 return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
505}
506
507static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
508 u16 len)
509{
510 struct mgmt_mode *cp, ev;
511 struct hci_dev *hdev;
512 int err;
513
514 cp = (void *) data;
515
516 BT_DBG("request for hci%u", index);
517
518 if (len != sizeof(*cp))
519 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
520
521 hdev = hci_dev_get(index);
522 if (!hdev)
523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
524
525 hci_dev_lock_bh(hdev);
526
527 if (cp->val)
528 set_bit(HCI_PAIRABLE, &hdev->flags);
529 else
530 clear_bit(HCI_PAIRABLE, &hdev->flags);
531
532 err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
533 if (err < 0)
534 goto failed;
535
536 ev.val = cp->val;
537
538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
539
540failed:
541 hci_dev_unlock_bh(hdev);
542 hci_dev_put(hdev);
543
544 return err;
545}
546
547#define EIR_FLAGS 0x01 /* flags */
548#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
549#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
550#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
551#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
552#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
553#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
554#define EIR_NAME_SHORT 0x08 /* shortened local name */
555#define EIR_NAME_COMPLETE 0x09 /* complete local name */
556#define EIR_TX_POWER 0x0A /* transmit power level */
557#define EIR_DEVICE_ID 0x10 /* device ID */
558
559#define PNP_INFO_SVCLASS_ID 0x1200
560
561static u8 bluetooth_base_uuid[] = {
562 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
563 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
564};
565
566static u16 get_uuid16(u8 *uuid128)
567{
568 u32 val;
569 int i;
570
571 for (i = 0; i < 12; i++) {
572 if (bluetooth_base_uuid[i] != uuid128[i])
573 return 0;
574 }
575
576 memcpy(&val, &uuid128[12], 4);
577
578 val = le32_to_cpu(val);
579 if (val > 0xffff)
580 return 0;
581
582 return (u16) val;
583}
584
585static void create_eir(struct hci_dev *hdev, u8 *data)
586{
587 u8 *ptr = data;
588 u16 eir_len = 0;
589 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
590 int i, truncated = 0;
591 struct list_head *p;
592 size_t name_len;
593
594 name_len = strlen(hdev->dev_name);
595
596 if (name_len > 0) {
597 /* EIR Data type */
598 if (name_len > 48) {
599 name_len = 48;
600 ptr[1] = EIR_NAME_SHORT;
601 } else
602 ptr[1] = EIR_NAME_COMPLETE;
603
604 /* EIR Data length */
605 ptr[0] = name_len + 1;
606
607 memcpy(ptr + 2, hdev->dev_name, name_len);
608
609 eir_len += (name_len + 2);
610 ptr += (name_len + 2);
611 }
612
613 memset(uuid16_list, 0, sizeof(uuid16_list));
614
615 /* Group all UUID16 types */
616 list_for_each(p, &hdev->uuids) {
617 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
618 u16 uuid16;
619
620 uuid16 = get_uuid16(uuid->uuid);
621 if (uuid16 == 0)
622 return;
623
624 if (uuid16 < 0x1100)
625 continue;
626
627 if (uuid16 == PNP_INFO_SVCLASS_ID)
628 continue;
629
630 /* Stop if not enough space to put next UUID */
631 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
632 truncated = 1;
633 break;
634 }
635
636 /* Check for duplicates */
637 for (i = 0; uuid16_list[i] != 0; i++)
638 if (uuid16_list[i] == uuid16)
639 break;
640
641 if (uuid16_list[i] == 0) {
642 uuid16_list[i] = uuid16;
643 eir_len += sizeof(u16);
644 }
645 }
646
647 if (uuid16_list[0] != 0) {
648 u8 *length = ptr;
649
650 /* EIR Data type */
651 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
652
653 ptr += 2;
654 eir_len += 2;
655
656 for (i = 0; uuid16_list[i] != 0; i++) {
657 *ptr++ = (uuid16_list[i] & 0x00ff);
658 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
659 }
660
661 /* EIR Data length */
662 *length = (i * sizeof(u16)) + 1;
663 }
664}
665
666static int update_eir(struct hci_dev *hdev)
667{
668 struct hci_cp_write_eir cp;
669
670 if (!(hdev->features[6] & LMP_EXT_INQ))
671 return 0;
672
673 if (hdev->ssp_mode == 0)
674 return 0;
675
676 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
677 return 0;
678
679 memset(&cp, 0, sizeof(cp));
680
681 create_eir(hdev, cp.data);
682
683 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
684 return 0;
685
686 memcpy(hdev->eir, cp.data, sizeof(cp.data));
687
688 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
689}
690
691static u8 get_service_classes(struct hci_dev *hdev)
692{
693 struct list_head *p;
694 u8 val = 0;
695
696 list_for_each(p, &hdev->uuids) {
697 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
698
699 val |= uuid->svc_hint;
700 }
701
702 return val;
703}
704
705static int update_class(struct hci_dev *hdev)
706{
707 u8 cod[3];
708
709 BT_DBG("%s", hdev->name);
710
711 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
712 return 0;
713
714 cod[0] = hdev->minor_class;
715 cod[1] = hdev->major_class;
716 cod[2] = get_service_classes(hdev);
717
718 if (memcmp(cod, hdev->dev_class, 3) == 0)
719 return 0;
720
721 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
722}
723
724static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
725{
726 struct mgmt_cp_add_uuid *cp;
727 struct hci_dev *hdev;
728 struct bt_uuid *uuid;
729 int err;
730
731 cp = (void *) data;
732
733 BT_DBG("request for hci%u", index);
734
735 if (len != sizeof(*cp))
736 return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
737
738 hdev = hci_dev_get(index);
739 if (!hdev)
740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
741
742 hci_dev_lock_bh(hdev);
743
744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
745 if (!uuid) {
746 err = -ENOMEM;
747 goto failed;
748 }
749
750 memcpy(uuid->uuid, cp->uuid, 16);
751 uuid->svc_hint = cp->svc_hint;
752
753 list_add(&uuid->list, &hdev->uuids);
754
755 err = update_class(hdev);
756 if (err < 0)
757 goto failed;
758
759 err = update_eir(hdev);
760 if (err < 0)
761 goto failed;
762
763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
764
765failed:
766 hci_dev_unlock_bh(hdev);
767 hci_dev_put(hdev);
768
769 return err;
770}
771
772static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
773{
774 struct list_head *p, *n;
775 struct mgmt_cp_remove_uuid *cp;
776 struct hci_dev *hdev;
777 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
778 int err, found;
779
780 cp = (void *) data;
781
782 BT_DBG("request for hci%u", index);
783
784 if (len != sizeof(*cp))
785 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
786
787 hdev = hci_dev_get(index);
788 if (!hdev)
789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
790
791 hci_dev_lock_bh(hdev);
792
793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
794 err = hci_uuids_clear(hdev);
795 goto unlock;
796 }
797
798 found = 0;
799
800 list_for_each_safe(p, n, &hdev->uuids) {
801 struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
802
803 if (memcmp(match->uuid, cp->uuid, 16) != 0)
804 continue;
805
806 list_del(&match->list);
807 found++;
808 }
809
810 if (found == 0) {
811 err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
812 goto unlock;
813 }
814
815 err = update_class(hdev);
816 if (err < 0)
817 goto unlock;
818
819 err = update_eir(hdev);
820 if (err < 0)
821 goto unlock;
822
823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
824
825unlock:
826 hci_dev_unlock_bh(hdev);
827 hci_dev_put(hdev);
828
829 return err;
830}
831
832static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
833 u16 len)
834{
835 struct hci_dev *hdev;
836 struct mgmt_cp_set_dev_class *cp;
837 int err;
838
839 cp = (void *) data;
840
841 BT_DBG("request for hci%u", index);
842
843 if (len != sizeof(*cp))
844 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
845
846 hdev = hci_dev_get(index);
847 if (!hdev)
848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
849
850 hci_dev_lock_bh(hdev);
851
852 hdev->major_class = cp->major;
853 hdev->minor_class = cp->minor;
854
855 err = update_class(hdev);
856
857 if (err == 0)
858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
859
860 hci_dev_unlock_bh(hdev);
861 hci_dev_put(hdev);
862
863 return err;
864}
865
866static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
867 u16 len)
868{
869 struct hci_dev *hdev;
870 struct mgmt_cp_set_service_cache *cp;
871 int err;
872
873 cp = (void *) data;
874
875 if (len != sizeof(*cp))
876 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
877
878 hdev = hci_dev_get(index);
879 if (!hdev)
880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
881
882 hci_dev_lock_bh(hdev);
883
884 BT_DBG("hci%u enable %d", index, cp->enable);
885
886 if (cp->enable) {
887 set_bit(HCI_SERVICE_CACHE, &hdev->flags);
888 err = 0;
889 } else {
890 clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
891 err = update_class(hdev);
892 if (err == 0)
893 err = update_eir(hdev);
894 }
895
896 if (err == 0)
897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
898 0);
899
900 hci_dev_unlock_bh(hdev);
901 hci_dev_put(hdev);
902
903 return err;
904}
905
906static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
907{
908 struct hci_dev *hdev;
909 struct mgmt_cp_load_keys *cp;
910 u16 key_count, expected_len;
911 int i, err;
912
913 cp = (void *) data;
914
915 if (len < sizeof(*cp))
916 return -EINVAL;
917
918 key_count = get_unaligned_le16(&cp->key_count);
919
920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
921 if (expected_len > len) {
922 BT_ERR("load_keys: expected at least %u bytes, got %u bytes",
923 expected_len, len);
924 return -EINVAL;
925 }
926
927 hdev = hci_dev_get(index);
928 if (!hdev)
929 return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
930
931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
932 key_count);
933
934 hci_dev_lock_bh(hdev);
935
936 hci_link_keys_clear(hdev);
937
938 set_bit(HCI_LINK_KEYS, &hdev->flags);
939
940 if (cp->debug_keys)
941 set_bit(HCI_DEBUG_KEYS, &hdev->flags);
942 else
943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
944
945 len -= sizeof(*cp);
946 i = 0;
947
948 while (i < len) {
949 struct mgmt_key_info *key = (void *) cp->keys + i;
950
951 i += sizeof(*key) + key->dlen;
952
953 if (key->type == HCI_LK_SMP_LTK) {
954 struct key_master_id *id = (void *) key->data;
955
956 if (key->dlen != sizeof(struct key_master_id))
957 continue;
958
959 hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len,
960 id->ediv, id->rand, key->val);
961
962 continue;
963 }
964
965 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
966 key->pin_len);
967 }
968
969 err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
970
971 hci_dev_unlock_bh(hdev);
972 hci_dev_put(hdev);
973
974 return err;
975}
976
977static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
978{
979 struct hci_dev *hdev;
980 struct mgmt_cp_remove_key *cp;
981 struct hci_conn *conn;
982 int err;
983
984 cp = (void *) data;
985
986 if (len != sizeof(*cp))
987 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
988
989 hdev = hci_dev_get(index);
990 if (!hdev)
991 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
992
993 hci_dev_lock_bh(hdev);
994
995 err = hci_remove_link_key(hdev, &cp->bdaddr);
996 if (err < 0) {
997 err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
998 goto unlock;
999 }
1000
1001 err = 0;
1002
1003 if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
1004 goto unlock;
1005
1006 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1007 if (conn) {
1008 struct hci_cp_disconnect dc;
1009
1010 put_unaligned_le16(conn->handle, &dc.handle);
1011 dc.reason = 0x13; /* Remote User Terminated Connection */
1012 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1013 }
1014
1015unlock:
1016 hci_dev_unlock_bh(hdev);
1017 hci_dev_put(hdev);
1018
1019 return err;
1020}
1021
1022static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1023{
1024 struct hci_dev *hdev;
1025 struct mgmt_cp_disconnect *cp;
1026 struct hci_cp_disconnect dc;
1027 struct pending_cmd *cmd;
1028 struct hci_conn *conn;
1029 int err;
1030
1031 BT_DBG("");
1032
1033 cp = (void *) data;
1034
1035 if (len != sizeof(*cp))
1036 return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
1037
1038 hdev = hci_dev_get(index);
1039 if (!hdev)
1040 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
1041
1042 hci_dev_lock_bh(hdev);
1043
1044 if (!test_bit(HCI_UP, &hdev->flags)) {
1045 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
1046 goto failed;
1047 }
1048
1049 if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
1050 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
1051 goto failed;
1052 }
1053
1054 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1055 if (!conn)
1056 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
1057
1058 if (!conn) {
1059 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
1060 goto failed;
1061 }
1062
1063 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
1064 if (!cmd) {
1065 err = -ENOMEM;
1066 goto failed;
1067 }
1068
1069 put_unaligned_le16(conn->handle, &dc.handle);
1070 dc.reason = 0x13; /* Remote User Terminated Connection */
1071
1072 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1073 if (err < 0)
1074 mgmt_pending_remove(cmd);
1075
1076failed:
1077 hci_dev_unlock_bh(hdev);
1078 hci_dev_put(hdev);
1079
1080 return err;
1081}
1082
1083static int get_connections(struct sock *sk, u16 index)
1084{
1085 struct mgmt_rp_get_connections *rp;
1086 struct hci_dev *hdev;
1087 struct list_head *p;
1088 size_t rp_len;
1089 u16 count;
1090 int i, err;
1091
1092 BT_DBG("");
1093
1094 hdev = hci_dev_get(index);
1095 if (!hdev)
1096 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
1097
1098 hci_dev_lock_bh(hdev);
1099
1100 count = 0;
1101 list_for_each(p, &hdev->conn_hash.list) {
1102 count++;
1103 }
1104
1105 rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
1106 rp = kmalloc(rp_len, GFP_ATOMIC);
1107 if (!rp) {
1108 err = -ENOMEM;
1109 goto unlock;
1110 }
1111
1112 put_unaligned_le16(count, &rp->conn_count);
1113
1114 i = 0;
1115 list_for_each(p, &hdev->conn_hash.list) {
1116 struct hci_conn *c = list_entry(p, struct hci_conn, list);
1117
1118 bacpy(&rp->conn[i++], &c->dst);
1119 }
1120
1121 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
1122
1123unlock:
1124 kfree(rp);
1125 hci_dev_unlock_bh(hdev);
1126 hci_dev_put(hdev);
1127 return err;
1128}
1129
1130static int send_pin_code_neg_reply(struct sock *sk, u16 index,
1131 struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp)
1132{
1133 struct pending_cmd *cmd;
1134 int err;
1135
1136 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
1137 sizeof(*cp));
1138 if (!cmd)
1139 return -ENOMEM;
1140
1141 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1142 &cp->bdaddr);
1143 if (err < 0)
1144 mgmt_pending_remove(cmd);
1145
1146 return err;
1147}
1148
1149static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1150 u16 len)
1151{
1152 struct hci_dev *hdev;
1153 struct hci_conn *conn;
1154 struct mgmt_cp_pin_code_reply *cp;
1155 struct mgmt_cp_pin_code_neg_reply ncp;
1156 struct hci_cp_pin_code_reply reply;
1157 struct pending_cmd *cmd;
1158 int err;
1159
1160 BT_DBG("");
1161
1162 cp = (void *) data;
1163
1164 if (len != sizeof(*cp))
1165 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
1166
1167 hdev = hci_dev_get(index);
1168 if (!hdev)
1169 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
1170
1171 hci_dev_lock_bh(hdev);
1172
1173 if (!test_bit(HCI_UP, &hdev->flags)) {
1174 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
1175 goto failed;
1176 }
1177
1178 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1179 if (!conn) {
1180 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
1181 goto failed;
1182 }
1183
1184 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1185 bacpy(&ncp.bdaddr, &cp->bdaddr);
1186
1187 BT_ERR("PIN code is not 16 bytes long");
1188
1189 err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
1190 if (err >= 0)
1191 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
1192 EINVAL);
1193
1194 goto failed;
1195 }
1196
1197 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
1198 if (!cmd) {
1199 err = -ENOMEM;
1200 goto failed;
1201 }
1202
1203 bacpy(&reply.bdaddr, &cp->bdaddr);
1204 reply.pin_len = cp->pin_len;
1205 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1206
1207 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
1208 if (err < 0)
1209 mgmt_pending_remove(cmd);
1210
1211failed:
1212 hci_dev_unlock_bh(hdev);
1213 hci_dev_put(hdev);
1214
1215 return err;
1216}
1217
1218static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1219 u16 len)
1220{
1221 struct hci_dev *hdev;
1222 struct mgmt_cp_pin_code_neg_reply *cp;
1223 int err;
1224
1225 BT_DBG("");
1226
1227 cp = (void *) data;
1228
1229 if (len != sizeof(*cp))
1230 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1231 EINVAL);
1232
1233 hdev = hci_dev_get(index);
1234 if (!hdev)
1235 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1236 ENODEV);
1237
1238 hci_dev_lock_bh(hdev);
1239
1240 if (!test_bit(HCI_UP, &hdev->flags)) {
1241 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1242 ENETDOWN);
1243 goto failed;
1244 }
1245
1246 err = send_pin_code_neg_reply(sk, index, hdev, cp);
1247
1248failed:
1249 hci_dev_unlock_bh(hdev);
1250 hci_dev_put(hdev);
1251
1252 return err;
1253}
1254
1255static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1256 u16 len)
1257{
1258 struct hci_dev *hdev;
1259 struct mgmt_cp_set_io_capability *cp;
1260
1261 BT_DBG("");
1262
1263 cp = (void *) data;
1264
1265 if (len != sizeof(*cp))
1266 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
1267
1268 hdev = hci_dev_get(index);
1269 if (!hdev)
1270 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1271
1272 hci_dev_lock_bh(hdev);
1273
1274 hdev->io_capability = cp->io_capability;
1275
1276 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1277 hdev->io_capability);
1278
1279 hci_dev_unlock_bh(hdev);
1280 hci_dev_put(hdev);
1281
1282 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
1283}
1284
1285static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
1286{
1287 struct hci_dev *hdev = conn->hdev;
1288 struct list_head *p;
1289
1290 list_for_each(p, &cmd_list) {
1291 struct pending_cmd *cmd;
1292
1293 cmd = list_entry(p, struct pending_cmd, list);
1294
1295 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
1296 continue;
1297
1298 if (cmd->index != hdev->id)
1299 continue;
1300
1301 if (cmd->user_data != conn)
1302 continue;
1303
1304 return cmd;
1305 }
1306
1307 return NULL;
1308}
1309
1310static void pairing_complete(struct pending_cmd *cmd, u8 status)
1311{
1312 struct mgmt_rp_pair_device rp;
1313 struct hci_conn *conn = cmd->user_data;
1314
1315 bacpy(&rp.bdaddr, &conn->dst);
1316 rp.status = status;
1317
1318 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
1319
1320 /* So we don't get further callbacks for this connection */
1321 conn->connect_cfm_cb = NULL;
1322 conn->security_cfm_cb = NULL;
1323 conn->disconn_cfm_cb = NULL;
1324
1325 hci_conn_put(conn);
1326
1327 mgmt_pending_remove(cmd);
1328}
1329
1330static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1331{
1332 struct pending_cmd *cmd;
1333
1334 BT_DBG("status %u", status);
1335
1336 cmd = find_pairing(conn);
1337 if (!cmd) {
1338 BT_DBG("Unable to find a pending command");
1339 return;
1340 }
1341
1342 pairing_complete(cmd, status);
1343}
1344
1345static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1346{
1347 struct hci_dev *hdev;
1348 struct mgmt_cp_pair_device *cp;
1349 struct pending_cmd *cmd;
1350 u8 sec_level, auth_type;
1351 struct hci_conn *conn;
1352 int err;
1353
1354 BT_DBG("");
1355
1356 cp = (void *) data;
1357
1358 if (len != sizeof(*cp))
1359 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
1360
1361 hdev = hci_dev_get(index);
1362 if (!hdev)
1363 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1364
1365 hci_dev_lock_bh(hdev);
1366
1367 if (cp->io_cap == 0x03) {
1368 sec_level = BT_SECURITY_MEDIUM;
1369 auth_type = HCI_AT_DEDICATED_BONDING;
1370 } else {
1371 sec_level = BT_SECURITY_HIGH;
1372 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1373 }
1374
1375 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
1376 if (IS_ERR(conn)) {
1377 err = PTR_ERR(conn);
1378 goto unlock;
1379 }
1380
1381 if (conn->connect_cfm_cb) {
1382 hci_conn_put(conn);
1383 err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
1384 goto unlock;
1385 }
1386
1387 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
1388 if (!cmd) {
1389 err = -ENOMEM;
1390 hci_conn_put(conn);
1391 goto unlock;
1392 }
1393
1394 conn->connect_cfm_cb = pairing_complete_cb;
1395 conn->security_cfm_cb = pairing_complete_cb;
1396 conn->disconn_cfm_cb = pairing_complete_cb;
1397 conn->io_capability = cp->io_cap;
1398 cmd->user_data = conn;
1399
1400 if (conn->state == BT_CONNECTED &&
1401 hci_conn_security(conn, sec_level, auth_type))
1402 pairing_complete(cmd, 0);
1403
1404 err = 0;
1405
1406unlock:
1407 hci_dev_unlock_bh(hdev);
1408 hci_dev_put(hdev);
1409
1410 return err;
1411}
1412
1413static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1414 u16 len, int success)
1415{
1416 struct mgmt_cp_user_confirm_reply *cp = (void *) data;
1417 u16 mgmt_op, hci_op;
1418 struct pending_cmd *cmd;
1419 struct hci_dev *hdev;
1420 int err;
1421
1422 BT_DBG("");
1423
1424 if (success) {
1425 mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
1426 hci_op = HCI_OP_USER_CONFIRM_REPLY;
1427 } else {
1428 mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
1429 hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
1430 }
1431
1432 if (len != sizeof(*cp))
1433 return cmd_status(sk, index, mgmt_op, EINVAL);
1434
1435 hdev = hci_dev_get(index);
1436 if (!hdev)
1437 return cmd_status(sk, index, mgmt_op, ENODEV);
1438
1439 hci_dev_lock_bh(hdev);
1440
1441 if (!test_bit(HCI_UP, &hdev->flags)) {
1442 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
1443 goto failed;
1444 }
1445
1446 cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
1447 if (!cmd) {
1448 err = -ENOMEM;
1449 goto failed;
1450 }
1451
1452 err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
1453 if (err < 0)
1454 mgmt_pending_remove(cmd);
1455
1456failed:
1457 hci_dev_unlock_bh(hdev);
1458 hci_dev_put(hdev);
1459
1460 return err;
1461}
1462
1463static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1464 u16 len)
1465{
1466 struct mgmt_cp_set_local_name *mgmt_cp = (void *) data;
1467 struct hci_cp_write_local_name hci_cp;
1468 struct hci_dev *hdev;
1469 struct pending_cmd *cmd;
1470 int err;
1471
1472 BT_DBG("");
1473
1474 if (len != sizeof(*mgmt_cp))
1475 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
1476
1477 hdev = hci_dev_get(index);
1478 if (!hdev)
1479 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
1480
1481 hci_dev_lock_bh(hdev);
1482
1483 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
1484 if (!cmd) {
1485 err = -ENOMEM;
1486 goto failed;
1487 }
1488
1489 memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name));
1490 err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
1491 &hci_cp);
1492 if (err < 0)
1493 mgmt_pending_remove(cmd);
1494
1495failed:
1496 hci_dev_unlock_bh(hdev);
1497 hci_dev_put(hdev);
1498
1499 return err;
1500}
1501
1502static int read_local_oob_data(struct sock *sk, u16 index)
1503{
1504 struct hci_dev *hdev;
1505 struct pending_cmd *cmd;
1506 int err;
1507
1508 BT_DBG("hci%u", index);
1509
1510 hdev = hci_dev_get(index);
1511 if (!hdev)
1512 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1513 ENODEV);
1514
1515 hci_dev_lock_bh(hdev);
1516
1517 if (!test_bit(HCI_UP, &hdev->flags)) {
1518 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1519 ENETDOWN);
1520 goto unlock;
1521 }
1522
1523 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1524 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1525 EOPNOTSUPP);
1526 goto unlock;
1527 }
1528
1529 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
1530 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
1531 goto unlock;
1532 }
1533
1534 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
1535 if (!cmd) {
1536 err = -ENOMEM;
1537 goto unlock;
1538 }
1539
1540 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
1541 if (err < 0)
1542 mgmt_pending_remove(cmd);
1543
1544unlock:
1545 hci_dev_unlock_bh(hdev);
1546 hci_dev_put(hdev);
1547
1548 return err;
1549}
1550
1551static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1552 u16 len)
1553{
1554 struct hci_dev *hdev;
1555 struct mgmt_cp_add_remote_oob_data *cp = (void *) data;
1556 int err;
1557
1558 BT_DBG("hci%u ", index);
1559
1560 if (len != sizeof(*cp))
1561 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1562 EINVAL);
1563
1564 hdev = hci_dev_get(index);
1565 if (!hdev)
1566 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1567 ENODEV);
1568
1569 hci_dev_lock_bh(hdev);
1570
1571 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
1572 cp->randomizer);
1573 if (err < 0)
1574 err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
1575 else
1576 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
1577 0);
1578
1579 hci_dev_unlock_bh(hdev);
1580 hci_dev_put(hdev);
1581
1582 return err;
1583}
1584
1585static int remove_remote_oob_data(struct sock *sk, u16 index,
1586 unsigned char *data, u16 len)
1587{
1588 struct hci_dev *hdev;
1589 struct mgmt_cp_remove_remote_oob_data *cp = (void *) data;
1590 int err;
1591
1592 BT_DBG("hci%u ", index);
1593
1594 if (len != sizeof(*cp))
1595 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1596 EINVAL);
1597
1598 hdev = hci_dev_get(index);
1599 if (!hdev)
1600 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1601 ENODEV);
1602
1603 hci_dev_lock_bh(hdev);
1604
1605 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
1606 if (err < 0)
1607 err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1608 -err);
1609 else
1610 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1611 NULL, 0);
1612
1613 hci_dev_unlock_bh(hdev);
1614 hci_dev_put(hdev);
1615
1616 return err;
1617}
1618
1619static int start_discovery(struct sock *sk, u16 index)
1620{
1621 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1622 struct hci_cp_inquiry cp;
1623 struct pending_cmd *cmd;
1624 struct hci_dev *hdev;
1625 int err;
1626
1627 BT_DBG("hci%u", index);
1628
1629 hdev = hci_dev_get(index);
1630 if (!hdev)
1631 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
1632
1633 hci_dev_lock_bh(hdev);
1634
1635 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
1636 if (!cmd) {
1637 err = -ENOMEM;
1638 goto failed;
1639 }
1640
1641 memset(&cp, 0, sizeof(cp));
1642 memcpy(&cp.lap, lap, 3);
1643 cp.length = 0x08;
1644 cp.num_rsp = 0x00;
1645
1646 err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1647 if (err < 0)
1648 mgmt_pending_remove(cmd);
1649
1650failed:
1651 hci_dev_unlock_bh(hdev);
1652 hci_dev_put(hdev);
1653
1654 return err;
1655}
1656
1657static int stop_discovery(struct sock *sk, u16 index)
1658{
1659 struct hci_dev *hdev;
1660 struct pending_cmd *cmd;
1661 int err;
1662
1663 BT_DBG("hci%u", index);
1664
1665 hdev = hci_dev_get(index);
1666 if (!hdev)
1667 return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
1668
1669 hci_dev_lock_bh(hdev);
1670
1671 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
1672 if (!cmd) {
1673 err = -ENOMEM;
1674 goto failed;
1675 }
1676
1677 err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1678 if (err < 0)
1679 mgmt_pending_remove(cmd);
1680
1681failed:
1682 hci_dev_unlock_bh(hdev);
1683 hci_dev_put(hdev);
1684
1685 return err;
1686}
1687
1688static int block_device(struct sock *sk, u16 index, unsigned char *data,
1689 u16 len)
1690{
1691 struct hci_dev *hdev;
1692 struct mgmt_cp_block_device *cp;
1693 int err;
1694
1695 BT_DBG("hci%u", index);
1696
1697 cp = (void *) data;
1698
1699 if (len != sizeof(*cp))
1700 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1701 EINVAL);
1702
1703 hdev = hci_dev_get(index);
1704 if (!hdev)
1705 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1706 ENODEV);
1707
1708 err = hci_blacklist_add(hdev, &cp->bdaddr);
1709
1710 if (err < 0)
1711 err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
1712 else
1713 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
1714 NULL, 0);
1715 hci_dev_put(hdev);
1716
1717 return err;
1718}
1719
1720static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1721 u16 len)
1722{
1723 struct hci_dev *hdev;
1724 struct mgmt_cp_unblock_device *cp;
1725 int err;
1726
1727 BT_DBG("hci%u", index);
1728
1729 cp = (void *) data;
1730
1731 if (len != sizeof(*cp))
1732 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1733 EINVAL);
1734
1735 hdev = hci_dev_get(index);
1736 if (!hdev)
1737 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1738 ENODEV);
1739
1740 err = hci_blacklist_del(hdev, &cp->bdaddr);
1741
1742 if (err < 0)
1743 err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
1744 else
1745 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1746 NULL, 0);
1747 hci_dev_put(hdev);
1748
1749 return err;
1750}
1751
1752int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1753{
1754 unsigned char *buf;
1755 struct mgmt_hdr *hdr;
1756 u16 opcode, index, len;
1757 int err;
1758
1759 BT_DBG("got %zu bytes", msglen);
1760
1761 if (msglen < sizeof(*hdr))
1762 return -EINVAL;
1763
1764 buf = kmalloc(msglen, GFP_KERNEL);
1765 if (!buf)
1766 return -ENOMEM;
1767
1768 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
1769 err = -EFAULT;
1770 goto done;
1771 }
1772
1773 hdr = (struct mgmt_hdr *) buf;
1774 opcode = get_unaligned_le16(&hdr->opcode);
1775 index = get_unaligned_le16(&hdr->index);
1776 len = get_unaligned_le16(&hdr->len);
1777
1778 if (len != msglen - sizeof(*hdr)) {
1779 err = -EINVAL;
1780 goto done;
1781 }
1782
1783 switch (opcode) {
1784 case MGMT_OP_READ_VERSION:
1785 err = read_version(sk);
1786 break;
1787 case MGMT_OP_READ_INDEX_LIST:
1788 err = read_index_list(sk);
1789 break;
1790 case MGMT_OP_READ_INFO:
1791 err = read_controller_info(sk, index);
1792 break;
1793 case MGMT_OP_SET_POWERED:
1794 err = set_powered(sk, index, buf + sizeof(*hdr), len);
1795 break;
1796 case MGMT_OP_SET_DISCOVERABLE:
1797 err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
1798 break;
1799 case MGMT_OP_SET_CONNECTABLE:
1800 err = set_connectable(sk, index, buf + sizeof(*hdr), len);
1801 break;
1802 case MGMT_OP_SET_PAIRABLE:
1803 err = set_pairable(sk, index, buf + sizeof(*hdr), len);
1804 break;
1805 case MGMT_OP_ADD_UUID:
1806 err = add_uuid(sk, index, buf + sizeof(*hdr), len);
1807 break;
1808 case MGMT_OP_REMOVE_UUID:
1809 err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
1810 break;
1811 case MGMT_OP_SET_DEV_CLASS:
1812 err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
1813 break;
1814 case MGMT_OP_SET_SERVICE_CACHE:
1815 err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
1816 break;
1817 case MGMT_OP_LOAD_KEYS:
1818 err = load_keys(sk, index, buf + sizeof(*hdr), len);
1819 break;
1820 case MGMT_OP_REMOVE_KEY:
1821 err = remove_key(sk, index, buf + sizeof(*hdr), len);
1822 break;
1823 case MGMT_OP_DISCONNECT:
1824 err = disconnect(sk, index, buf + sizeof(*hdr), len);
1825 break;
1826 case MGMT_OP_GET_CONNECTIONS:
1827 err = get_connections(sk, index);
1828 break;
1829 case MGMT_OP_PIN_CODE_REPLY:
1830 err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
1831 break;
1832 case MGMT_OP_PIN_CODE_NEG_REPLY:
1833 err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
1834 break;
1835 case MGMT_OP_SET_IO_CAPABILITY:
1836 err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
1837 break;
1838 case MGMT_OP_PAIR_DEVICE:
1839 err = pair_device(sk, index, buf + sizeof(*hdr), len);
1840 break;
1841 case MGMT_OP_USER_CONFIRM_REPLY:
1842 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
1843 break;
1844 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
1845 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
1846 break;
1847 case MGMT_OP_SET_LOCAL_NAME:
1848 err = set_local_name(sk, index, buf + sizeof(*hdr), len);
1849 break;
1850 case MGMT_OP_READ_LOCAL_OOB_DATA:
1851 err = read_local_oob_data(sk, index);
1852 break;
1853 case MGMT_OP_ADD_REMOTE_OOB_DATA:
1854 err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
1855 break;
1856 case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
1857 err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
1858 len);
1859 break;
1860 case MGMT_OP_START_DISCOVERY:
1861 err = start_discovery(sk, index);
1862 break;
1863 case MGMT_OP_STOP_DISCOVERY:
1864 err = stop_discovery(sk, index);
1865 break;
1866 case MGMT_OP_BLOCK_DEVICE:
1867 err = block_device(sk, index, buf + sizeof(*hdr), len);
1868 break;
1869 case MGMT_OP_UNBLOCK_DEVICE:
1870 err = unblock_device(sk, index, buf + sizeof(*hdr), len);
1871 break;
1872 default:
1873 BT_DBG("Unknown op %u", opcode);
1874 err = cmd_status(sk, index, opcode, 0x01);
1875 break;
1876 }
1877
1878 if (err < 0)
1879 goto done;
1880
1881 err = msglen;
1882
1883done:
1884 kfree(buf);
1885 return err;
1886}
1887
1888int mgmt_index_added(u16 index)
1889{
1890 return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
1891}
1892
1893int mgmt_index_removed(u16 index)
1894{
1895 return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
1896}
1897
1898struct cmd_lookup {
1899 u8 val;
1900 struct sock *sk;
1901};
1902
1903static void mode_rsp(struct pending_cmd *cmd, void *data)
1904{
1905 struct mgmt_mode *cp = cmd->param;
1906 struct cmd_lookup *match = data;
1907
1908 if (cp->val != match->val)
1909 return;
1910
1911 send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
1912
1913 list_del(&cmd->list);
1914
1915 if (match->sk == NULL) {
1916 match->sk = cmd->sk;
1917 sock_hold(match->sk);
1918 }
1919
1920 mgmt_pending_free(cmd);
1921}
1922
1923int mgmt_powered(u16 index, u8 powered)
1924{
1925 struct mgmt_mode ev;
1926 struct cmd_lookup match = { powered, NULL };
1927 int ret;
1928
1929 mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
1930
1931 ev.val = powered;
1932
1933 ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
1934
1935 if (match.sk)
1936 sock_put(match.sk);
1937
1938 return ret;
1939}
1940
1941int mgmt_discoverable(u16 index, u8 discoverable)
1942{
1943 struct mgmt_mode ev;
1944 struct cmd_lookup match = { discoverable, NULL };
1945 int ret;
1946
1947 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
1948
1949 ev.val = discoverable;
1950
1951 ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
1952 match.sk);
1953
1954 if (match.sk)
1955 sock_put(match.sk);
1956
1957 return ret;
1958}
1959
1960int mgmt_connectable(u16 index, u8 connectable)
1961{
1962 struct mgmt_mode ev;
1963 struct cmd_lookup match = { connectable, NULL };
1964 int ret;
1965
1966 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
1967
1968 ev.val = connectable;
1969
1970 ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
1971
1972 if (match.sk)
1973 sock_put(match.sk);
1974
1975 return ret;
1976}
1977
1978int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1979{
1980 struct mgmt_ev_new_key *ev;
1981 int err, total;
1982
1983 total = sizeof(struct mgmt_ev_new_key) + key->dlen;
1984 ev = kzalloc(total, GFP_ATOMIC);
1985 if (!ev)
1986 return -ENOMEM;
1987
1988 bacpy(&ev->key.bdaddr, &key->bdaddr);
1989 ev->key.type = key->type;
1990 memcpy(ev->key.val, key->val, 16);
1991 ev->key.pin_len = key->pin_len;
1992 ev->key.dlen = key->dlen;
1993 ev->store_hint = persistent;
1994
1995 memcpy(ev->key.data, key->data, key->dlen);
1996
1997 err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
1998
1999 kfree(ev);
2000
2001 return err;
2002}
2003
2004int mgmt_connected(u16 index, bdaddr_t *bdaddr)
2005{
2006 struct mgmt_ev_connected ev;
2007
2008 bacpy(&ev.bdaddr, bdaddr);
2009
2010 return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
2011}
2012
2013static void disconnect_rsp(struct pending_cmd *cmd, void *data)
2014{
2015 struct mgmt_cp_disconnect *cp = cmd->param;
2016 struct sock **sk = data;
2017 struct mgmt_rp_disconnect rp;
2018
2019 bacpy(&rp.bdaddr, &cp->bdaddr);
2020
2021 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
2022
2023 *sk = cmd->sk;
2024 sock_hold(*sk);
2025
2026 mgmt_pending_remove(cmd);
2027}
2028
2029int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
2030{
2031 struct mgmt_ev_disconnected ev;
2032 struct sock *sk = NULL;
2033 int err;
2034
2035 mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
2036
2037 bacpy(&ev.bdaddr, bdaddr);
2038
2039 err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
2040
2041 if (sk)
2042 sock_put(sk);
2043
2044 return err;
2045}
2046
2047int mgmt_disconnect_failed(u16 index)
2048{
2049 struct pending_cmd *cmd;
2050 int err;
2051
2052 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
2053 if (!cmd)
2054 return -ENOENT;
2055
2056 err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
2057
2058 mgmt_pending_remove(cmd);
2059
2060 return err;
2061}
2062
2063int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
2064{
2065 struct mgmt_ev_connect_failed ev;
2066
2067 bacpy(&ev.bdaddr, bdaddr);
2068 ev.status = status;
2069
2070 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
2071}
2072
2073int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
2074{
2075 struct mgmt_ev_pin_code_request ev;
2076
2077 bacpy(&ev.bdaddr, bdaddr);
2078 ev.secure = secure;
2079
2080 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
2081 NULL);
2082}
2083
2084int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
2085{
2086 struct pending_cmd *cmd;
2087 struct mgmt_rp_pin_code_reply rp;
2088 int err;
2089
2090 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
2091 if (!cmd)
2092 return -ENOENT;
2093
2094 bacpy(&rp.bdaddr, bdaddr);
2095 rp.status = status;
2096
2097 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
2098 sizeof(rp));
2099
2100 mgmt_pending_remove(cmd);
2101
2102 return err;
2103}
2104
2105int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
2106{
2107 struct pending_cmd *cmd;
2108 struct mgmt_rp_pin_code_reply rp;
2109 int err;
2110
2111 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
2112 if (!cmd)
2113 return -ENOENT;
2114
2115 bacpy(&rp.bdaddr, bdaddr);
2116 rp.status = status;
2117
2118 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
2119 sizeof(rp));
2120
2121 mgmt_pending_remove(cmd);
2122
2123 return err;
2124}
2125
2126int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
2127 u8 confirm_hint)
2128{
2129 struct mgmt_ev_user_confirm_request ev;
2130
2131 BT_DBG("hci%u", index);
2132
2133 bacpy(&ev.bdaddr, bdaddr);
2134 ev.confirm_hint = confirm_hint;
2135 put_unaligned_le32(value, &ev.value);
2136
2137 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
2138 NULL);
2139}
2140
2141static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
2142 u8 opcode)
2143{
2144 struct pending_cmd *cmd;
2145 struct mgmt_rp_user_confirm_reply rp;
2146 int err;
2147
2148 cmd = mgmt_pending_find(opcode, index);
2149 if (!cmd)
2150 return -ENOENT;
2151
2152 bacpy(&rp.bdaddr, bdaddr);
2153 rp.status = status;
2154 err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
2155
2156 mgmt_pending_remove(cmd);
2157
2158 return err;
2159}
2160
2161int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
2162{
2163 return confirm_reply_complete(index, bdaddr, status,
2164 MGMT_OP_USER_CONFIRM_REPLY);
2165}
2166
2167int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
2168{
2169 return confirm_reply_complete(index, bdaddr, status,
2170 MGMT_OP_USER_CONFIRM_NEG_REPLY);
2171}
2172
2173int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
2174{
2175 struct mgmt_ev_auth_failed ev;
2176
2177 bacpy(&ev.bdaddr, bdaddr);
2178 ev.status = status;
2179
2180 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
2181}
2182
2183int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
2184{
2185 struct pending_cmd *cmd;
2186 struct hci_dev *hdev;
2187 struct mgmt_cp_set_local_name ev;
2188 int err;
2189
2190 memset(&ev, 0, sizeof(ev));
2191 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2192
2193 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
2194 if (!cmd)
2195 goto send_event;
2196
2197 if (status) {
2198 err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
2199 goto failed;
2200 }
2201
2202 hdev = hci_dev_get(index);
2203 if (hdev) {
2204 hci_dev_lock_bh(hdev);
2205 update_eir(hdev);
2206 hci_dev_unlock_bh(hdev);
2207 hci_dev_put(hdev);
2208 }
2209
2210 err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
2211 sizeof(ev));
2212 if (err < 0)
2213 goto failed;
2214
2215send_event:
2216 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
2217 cmd ? cmd->sk : NULL);
2218
2219failed:
2220 if (cmd)
2221 mgmt_pending_remove(cmd);
2222 return err;
2223}
2224
2225int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
2226 u8 status)
2227{
2228 struct pending_cmd *cmd;
2229 int err;
2230
2231 BT_DBG("hci%u status %u", index, status);
2232
2233 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
2234 if (!cmd)
2235 return -ENOENT;
2236
2237 if (status) {
2238 err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2239 EIO);
2240 } else {
2241 struct mgmt_rp_read_local_oob_data rp;
2242
2243 memcpy(rp.hash, hash, sizeof(rp.hash));
2244 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
2245
2246 err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2247 &rp, sizeof(rp));
2248 }
2249
2250 mgmt_pending_remove(cmd);
2251
2252 return err;
2253}
2254
2255int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
2256 u8 *eir)
2257{
2258 struct mgmt_ev_device_found ev;
2259
2260 memset(&ev, 0, sizeof(ev));
2261
2262 bacpy(&ev.bdaddr, bdaddr);
2263 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2264 ev.rssi = rssi;
2265
2266 if (eir)
2267 memcpy(ev.eir, eir, sizeof(ev.eir));
2268
2269 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
2270}
2271
2272int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
2273{
2274 struct mgmt_ev_remote_name ev;
2275
2276 memset(&ev, 0, sizeof(ev));
2277
2278 bacpy(&ev.bdaddr, bdaddr);
2279 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2280
2281 return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
2282}
2283
2284int mgmt_discovering(u16 index, u8 discovering)
2285{
2286 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
2287 sizeof(discovering), NULL);
2288}