Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
 
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI event handling. */
  26
  27#include <asm/unaligned.h>
 
 
  28
  29#include <net/bluetooth/bluetooth.h>
  30#include <net/bluetooth/hci_core.h>
  31#include <net/bluetooth/mgmt.h>
  32
  33#include "hci_request.h"
  34#include "hci_debugfs.h"
  35#include "a2mp.h"
  36#include "amp.h"
  37#include "smp.h"
 
 
  38
  39#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
  40		 "\x00\x00\x00\x00\x00\x00\x00\x00"
  41
  42/* Handle HCI Event packets */
  43
  44static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
 
  45{
  46	__u8 status = *((__u8 *) skb->data);
  47
  48	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
 
  49
  50	if (status)
  51		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52
  53	clear_bit(HCI_INQUIRY, &hdev->flags);
  54	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
  55	wake_up_bit(&hdev->flags, HCI_INQUIRY);
  56
  57	hci_dev_lock(hdev);
  58	/* Set discovery state to stopped if we're not doing LE active
  59	 * scanning.
  60	 */
  61	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
  62	    hdev->le_scan_type != LE_SCAN_ACTIVE)
  63		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
  64	hci_dev_unlock(hdev);
  65
  66	hci_conn_check_pending(hdev);
  67}
  68
  69static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
 
  70{
  71	__u8 status = *((__u8 *) skb->data);
  72
  73	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  74
  75	if (status)
  76		return;
  77
  78	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
 
 
  79}
  80
  81static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
 
  82{
  83	__u8 status = *((__u8 *) skb->data);
  84
  85	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  86
  87	if (status)
  88		return;
  89
  90	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
  91
  92	hci_conn_check_pending(hdev);
  93}
  94
  95static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
  96					  struct sk_buff *skb)
  97{
  98	BT_DBG("%s", hdev->name);
 
 
 
 
  99}
 100
 101static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
 
 102{
 103	struct hci_rp_role_discovery *rp = (void *) skb->data;
 104	struct hci_conn *conn;
 105
 106	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 107
 108	if (rp->status)
 109		return;
 110
 111	hci_dev_lock(hdev);
 112
 113	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 114	if (conn)
 115		conn->role = rp->role;
 116
 117	hci_dev_unlock(hdev);
 
 
 118}
 119
 120static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
 
 121{
 122	struct hci_rp_read_link_policy *rp = (void *) skb->data;
 123	struct hci_conn *conn;
 124
 125	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 126
 127	if (rp->status)
 128		return;
 129
 130	hci_dev_lock(hdev);
 131
 132	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 133	if (conn)
 134		conn->link_policy = __le16_to_cpu(rp->policy);
 135
 136	hci_dev_unlock(hdev);
 
 
 137}
 138
 139static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
 
 140{
 141	struct hci_rp_write_link_policy *rp = (void *) skb->data;
 142	struct hci_conn *conn;
 143	void *sent;
 144
 145	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 146
 147	if (rp->status)
 148		return;
 149
 150	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
 151	if (!sent)
 152		return;
 153
 154	hci_dev_lock(hdev);
 155
 156	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 157	if (conn)
 158		conn->link_policy = get_unaligned_le16(sent + 2);
 159
 160	hci_dev_unlock(hdev);
 
 
 161}
 162
 163static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
 164					struct sk_buff *skb)
 165{
 166	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
 167
 168	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 169
 170	if (rp->status)
 171		return;
 172
 173	hdev->link_policy = __le16_to_cpu(rp->policy);
 
 
 174}
 175
 176static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
 177					 struct sk_buff *skb)
 178{
 179	__u8 status = *((__u8 *) skb->data);
 180	void *sent;
 181
 182	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 183
 184	if (status)
 185		return;
 186
 187	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
 188	if (!sent)
 189		return;
 190
 191	hdev->link_policy = get_unaligned_le16(sent);
 
 
 192}
 193
 194static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
 195{
 196	__u8 status = *((__u8 *) skb->data);
 197
 198	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 199
 200	clear_bit(HCI_RESET, &hdev->flags);
 201
 202	if (status)
 203		return;
 204
 205	/* Reset all non-persistent flags */
 206	hci_dev_clear_volatile_flags(hdev);
 207
 208	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 209
 210	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
 211	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 212
 213	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
 214	hdev->adv_data_len = 0;
 215
 216	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
 217	hdev->scan_rsp_data_len = 0;
 218
 219	hdev->le_scan_type = LE_SCAN_PASSIVE;
 220
 221	hdev->ssp_debug_mode = 0;
 222
 223	hci_bdaddr_list_clear(&hdev->le_white_list);
 
 
 
 224}
 225
 226static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
 227					struct sk_buff *skb)
 228{
 229	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
 230	struct hci_cp_read_stored_link_key *sent;
 231
 232	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 233
 234	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
 235	if (!sent)
 236		return;
 237
 238	if (!rp->status && sent->read_all == 0x01) {
 239		hdev->stored_max_keys = rp->max_keys;
 240		hdev->stored_num_keys = rp->num_keys;
 241	}
 
 
 242}
 243
 244static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
 245					  struct sk_buff *skb)
 246{
 247	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
 
 248
 249	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 250
 251	if (rp->status)
 252		return;
 
 
 253
 254	if (rp->num_keys <= hdev->stored_num_keys)
 255		hdev->stored_num_keys -= rp->num_keys;
 256	else
 257		hdev->stored_num_keys = 0;
 
 
 258}
 259
 260static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 
 261{
 262	__u8 status = *((__u8 *) skb->data);
 263	void *sent;
 264
 265	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 266
 267	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
 268	if (!sent)
 269		return;
 270
 271	hci_dev_lock(hdev);
 272
 273	if (hci_dev_test_flag(hdev, HCI_MGMT))
 274		mgmt_set_local_name_complete(hdev, sent, status);
 275	else if (!status)
 276		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
 277
 278	hci_dev_unlock(hdev);
 
 
 279}
 280
 281static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 
 282{
 283	struct hci_rp_read_local_name *rp = (void *) skb->data;
 284
 285	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 286
 287	if (rp->status)
 288		return;
 289
 290	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 291	    hci_dev_test_flag(hdev, HCI_CONFIG))
 292		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
 
 
 293}
 294
 295static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
 296{
 297	__u8 status = *((__u8 *) skb->data);
 298	void *sent;
 299
 300	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 301
 302	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
 303	if (!sent)
 304		return;
 305
 306	hci_dev_lock(hdev);
 307
 308	if (!status) {
 309		__u8 param = *((__u8 *) sent);
 310
 311		if (param == AUTH_ENABLED)
 312			set_bit(HCI_AUTH, &hdev->flags);
 313		else
 314			clear_bit(HCI_AUTH, &hdev->flags);
 315	}
 316
 317	if (hci_dev_test_flag(hdev, HCI_MGMT))
 318		mgmt_auth_enable_complete(hdev, status);
 319
 320	hci_dev_unlock(hdev);
 
 
 321}
 322
 323static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
 
 324{
 325	__u8 status = *((__u8 *) skb->data);
 326	__u8 param;
 327	void *sent;
 328
 329	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 330
 331	if (status)
 332		return;
 333
 334	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
 335	if (!sent)
 336		return;
 337
 338	param = *((__u8 *) sent);
 339
 340	if (param)
 341		set_bit(HCI_ENCRYPT, &hdev->flags);
 342	else
 343		clear_bit(HCI_ENCRYPT, &hdev->flags);
 
 
 344}
 345
 346static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
 347{
 348	__u8 status = *((__u8 *) skb->data);
 349	__u8 param;
 350	void *sent;
 351
 352	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 353
 354	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
 355	if (!sent)
 356		return;
 357
 358	param = *((__u8 *) sent);
 359
 360	hci_dev_lock(hdev);
 361
 362	if (status) {
 363		hdev->discov_timeout = 0;
 364		goto done;
 365	}
 366
 367	if (param & SCAN_INQUIRY)
 368		set_bit(HCI_ISCAN, &hdev->flags);
 369	else
 370		clear_bit(HCI_ISCAN, &hdev->flags);
 371
 372	if (param & SCAN_PAGE)
 373		set_bit(HCI_PSCAN, &hdev->flags);
 374	else
 375		clear_bit(HCI_PSCAN, &hdev->flags);
 376
 377done:
 378	hci_dev_unlock(hdev);
 
 
 379}
 380
 381static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
 
 382{
 383	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
 
 
 384
 385	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 386
 387	if (rp->status)
 388		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389
 390	memcpy(hdev->dev_class, rp->dev_class, 3);
 391
 392	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
 393	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
 
 
 394}
 395
 396static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
 
 397{
 398	__u8 status = *((__u8 *) skb->data);
 399	void *sent;
 400
 401	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 402
 403	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
 404	if (!sent)
 405		return;
 406
 407	hci_dev_lock(hdev);
 408
 409	if (status == 0)
 410		memcpy(hdev->dev_class, sent, 3);
 411
 412	if (hci_dev_test_flag(hdev, HCI_MGMT))
 413		mgmt_set_class_of_dev_complete(hdev, sent, status);
 414
 415	hci_dev_unlock(hdev);
 
 
 416}
 417
 418static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
 
 419{
 420	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
 421	__u16 setting;
 422
 423	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 424
 425	if (rp->status)
 426		return;
 427
 428	setting = __le16_to_cpu(rp->voice_setting);
 429
 430	if (hdev->voice_setting == setting)
 431		return;
 432
 433	hdev->voice_setting = setting;
 434
 435	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
 436
 437	if (hdev->notify)
 438		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 
 
 439}
 440
 441static void hci_cc_write_voice_setting(struct hci_dev *hdev,
 442				       struct sk_buff *skb)
 443{
 444	__u8 status = *((__u8 *) skb->data);
 445	__u16 setting;
 446	void *sent;
 447
 448	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 449
 450	if (status)
 451		return;
 452
 453	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
 454	if (!sent)
 455		return;
 456
 457	setting = get_unaligned_le16(sent);
 458
 459	if (hdev->voice_setting == setting)
 460		return;
 461
 462	hdev->voice_setting = setting;
 463
 464	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
 465
 466	if (hdev->notify)
 467		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 
 
 468}
 469
 470static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
 471					  struct sk_buff *skb)
 472{
 473	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
 474
 475	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 476
 477	if (rp->status)
 478		return;
 479
 480	hdev->num_iac = rp->num_iac;
 481
 482	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
 
 
 483}
 484
 485static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
 
 486{
 487	__u8 status = *((__u8 *) skb->data);
 488	struct hci_cp_write_ssp_mode *sent;
 489
 490	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 491
 492	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
 493	if (!sent)
 494		return;
 495
 496	hci_dev_lock(hdev);
 497
 498	if (!status) {
 499		if (sent->mode)
 500			hdev->features[1][0] |= LMP_HOST_SSP;
 501		else
 502			hdev->features[1][0] &= ~LMP_HOST_SSP;
 503	}
 504
 505	if (hci_dev_test_flag(hdev, HCI_MGMT))
 506		mgmt_ssp_enable_complete(hdev, sent->mode, status);
 507	else if (!status) {
 508		if (sent->mode)
 509			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
 510		else
 511			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
 512	}
 513
 514	hci_dev_unlock(hdev);
 
 
 515}
 516
 517static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
 
 518{
 519	u8 status = *((u8 *) skb->data);
 520	struct hci_cp_write_sc_support *sent;
 521
 522	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 523
 524	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
 525	if (!sent)
 526		return;
 527
 528	hci_dev_lock(hdev);
 529
 530	if (!status) {
 531		if (sent->support)
 532			hdev->features[1][0] |= LMP_HOST_SC;
 533		else
 534			hdev->features[1][0] &= ~LMP_HOST_SC;
 535	}
 536
 537	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
 538		if (sent->support)
 539			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
 540		else
 541			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
 542	}
 543
 544	hci_dev_unlock(hdev);
 
 
 545}
 546
 547static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
 
 548{
 549	struct hci_rp_read_local_version *rp = (void *) skb->data;
 550
 551	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 552
 553	if (rp->status)
 554		return;
 555
 556	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 557	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
 558		hdev->hci_ver = rp->hci_ver;
 559		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
 560		hdev->lmp_ver = rp->lmp_ver;
 561		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
 562		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
 563	}
 
 
 564}
 565
 566static void hci_cc_read_local_commands(struct hci_dev *hdev,
 567				       struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 568{
 569	struct hci_rp_read_local_commands *rp = (void *) skb->data;
 570
 571	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 572
 573	if (rp->status)
 574		return;
 575
 576	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 577	    hci_dev_test_flag(hdev, HCI_CONFIG))
 578		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
 
 
 579}
 580
 581static void hci_cc_read_local_features(struct hci_dev *hdev,
 582				       struct sk_buff *skb)
 583{
 584	struct hci_rp_read_local_features *rp = (void *) skb->data;
 
 585
 586	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 587
 588	if (rp->status)
 589		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 590
 591	memcpy(hdev->features, rp->features, 8);
 592
 593	/* Adjust default settings according to features
 594	 * supported by device. */
 595
 596	if (hdev->features[0][0] & LMP_3SLOT)
 597		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
 598
 599	if (hdev->features[0][0] & LMP_5SLOT)
 600		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
 601
 602	if (hdev->features[0][1] & LMP_HV2) {
 603		hdev->pkt_type  |= (HCI_HV2);
 604		hdev->esco_type |= (ESCO_HV2);
 605	}
 606
 607	if (hdev->features[0][1] & LMP_HV3) {
 608		hdev->pkt_type  |= (HCI_HV3);
 609		hdev->esco_type |= (ESCO_HV3);
 610	}
 611
 612	if (lmp_esco_capable(hdev))
 613		hdev->esco_type |= (ESCO_EV3);
 614
 615	if (hdev->features[0][4] & LMP_EV4)
 616		hdev->esco_type |= (ESCO_EV4);
 617
 618	if (hdev->features[0][4] & LMP_EV5)
 619		hdev->esco_type |= (ESCO_EV5);
 620
 621	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
 622		hdev->esco_type |= (ESCO_2EV3);
 623
 624	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
 625		hdev->esco_type |= (ESCO_3EV3);
 626
 627	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
 628		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
 
 
 629}
 630
 631static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
 632					   struct sk_buff *skb)
 633{
 634	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
 635
 636	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 637
 638	if (rp->status)
 639		return;
 640
 641	if (hdev->max_page < rp->max_page)
 642		hdev->max_page = rp->max_page;
 
 
 
 
 
 643
 644	if (rp->page < HCI_MAX_PAGES)
 645		memcpy(hdev->features[rp->page], rp->features, 8);
 646}
 647
 648static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
 649					  struct sk_buff *skb)
 650{
 651	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
 652
 653	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 654
 655	if (rp->status)
 656		return;
 657
 658	hdev->flow_ctl_mode = rp->mode;
 659}
 660
 661static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
 
 662{
 663	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
 664
 665	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 666
 667	if (rp->status)
 668		return;
 669
 670	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
 671	hdev->sco_mtu  = rp->sco_mtu;
 672	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
 673	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
 674
 675	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
 676		hdev->sco_mtu  = 64;
 677		hdev->sco_pkts = 8;
 678	}
 679
 680	hdev->acl_cnt = hdev->acl_pkts;
 681	hdev->sco_cnt = hdev->sco_pkts;
 682
 683	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
 684	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
 
 
 
 
 
 685}
 686
 687static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
 
 688{
 689	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
 690
 691	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 692
 693	if (rp->status)
 694		return;
 695
 696	if (test_bit(HCI_INIT, &hdev->flags))
 697		bacpy(&hdev->bdaddr, &rp->bdaddr);
 698
 699	if (hci_dev_test_flag(hdev, HCI_SETUP))
 700		bacpy(&hdev->setup_addr, &rp->bdaddr);
 
 
 701}
 702
 703static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
 704					   struct sk_buff *skb)
 705{
 706	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
 707
 708	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 709
 710	if (rp->status)
 711		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 712
 713	if (test_bit(HCI_INIT, &hdev->flags)) {
 714		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
 715		hdev->page_scan_window = __le16_to_cpu(rp->window);
 716	}
 
 
 717}
 718
 719static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
 720					    struct sk_buff *skb)
 721{
 722	u8 status = *((u8 *) skb->data);
 723	struct hci_cp_write_page_scan_activity *sent;
 724
 725	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 726
 727	if (status)
 728		return;
 729
 730	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
 731	if (!sent)
 732		return;
 733
 734	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
 735	hdev->page_scan_window = __le16_to_cpu(sent->window);
 
 
 736}
 737
 738static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
 739					   struct sk_buff *skb)
 740{
 741	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
 742
 743	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 744
 745	if (rp->status)
 746		return;
 747
 748	if (test_bit(HCI_INIT, &hdev->flags))
 749		hdev->page_scan_type = rp->type;
 
 
 750}
 751
 752static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
 753					struct sk_buff *skb)
 754{
 755	u8 status = *((u8 *) skb->data);
 756	u8 *type;
 757
 758	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 759
 760	if (status)
 761		return;
 762
 763	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
 764	if (type)
 765		hdev->page_scan_type = *type;
 766}
 767
 768static void hci_cc_read_data_block_size(struct hci_dev *hdev,
 769					struct sk_buff *skb)
 770{
 771	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
 772
 773	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 774
 775	if (rp->status)
 776		return;
 777
 778	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
 779	hdev->block_len = __le16_to_cpu(rp->block_len);
 780	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
 781
 782	hdev->block_cnt = hdev->num_blocks;
 783
 784	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
 785	       hdev->block_cnt, hdev->block_len);
 786}
 787
 788static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
 
 789{
 790	struct hci_rp_read_clock *rp = (void *) skb->data;
 791	struct hci_cp_read_clock *cp;
 792	struct hci_conn *conn;
 793
 794	BT_DBG("%s", hdev->name);
 795
 796	if (skb->len < sizeof(*rp))
 797		return;
 798
 799	if (rp->status)
 800		return;
 801
 802	hci_dev_lock(hdev);
 803
 804	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
 805	if (!cp)
 806		goto unlock;
 807
 808	if (cp->which == 0x00) {
 809		hdev->clock = le32_to_cpu(rp->clock);
 810		goto unlock;
 811	}
 812
 813	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 814	if (conn) {
 815		conn->clock = le32_to_cpu(rp->clock);
 816		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
 817	}
 818
 819unlock:
 820	hci_dev_unlock(hdev);
 
 821}
 822
 823static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
 824				       struct sk_buff *skb)
 825{
 826	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
 827
 828	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 829
 830	if (rp->status)
 831		return;
 832
 833	hdev->amp_status = rp->amp_status;
 834	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
 835	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
 836	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
 837	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
 838	hdev->amp_type = rp->amp_type;
 839	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
 840	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
 841	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
 842	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
 843}
 844
 845static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
 846					 struct sk_buff *skb)
 847{
 848	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
 849
 850	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 851
 852	if (rp->status)
 853		return;
 854
 855	hdev->inq_tx_power = rp->tx_power;
 
 
 856}
 857
 858static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
 859{
 860	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 861	struct hci_cp_pin_code_reply *cp;
 862	struct hci_conn *conn;
 863
 864	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 865
 866	hci_dev_lock(hdev);
 867
 868	if (hci_dev_test_flag(hdev, HCI_MGMT))
 869		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
 870
 871	if (rp->status)
 872		goto unlock;
 873
 874	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
 875	if (!cp)
 876		goto unlock;
 877
 878	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
 879	if (conn)
 880		conn->pin_length = cp->pin_len;
 881
 882unlock:
 883	hci_dev_unlock(hdev);
 
 884}
 885
 886static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
 887{
 888	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
 889
 890	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 891
 892	hci_dev_lock(hdev);
 893
 894	if (hci_dev_test_flag(hdev, HCI_MGMT))
 895		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
 896						 rp->status);
 897
 898	hci_dev_unlock(hdev);
 
 
 899}
 900
 901static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
 902				       struct sk_buff *skb)
 903{
 904	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
 905
 906	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 907
 908	if (rp->status)
 909		return;
 910
 911	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
 912	hdev->le_pkts = rp->le_max_pkt;
 913
 914	hdev->le_cnt = hdev->le_pkts;
 915
 916	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
 
 
 
 
 
 917}
 918
 919static void hci_cc_le_read_local_features(struct hci_dev *hdev,
 920					  struct sk_buff *skb)
 921{
 922	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
 923
 924	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 925
 926	if (rp->status)
 927		return;
 928
 929	memcpy(hdev->le_features, rp->features, 8);
 
 
 930}
 931
 932static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
 933					struct sk_buff *skb)
 934{
 935	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
 936
 937	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 938
 939	if (rp->status)
 940		return;
 941
 942	hdev->adv_tx_power = rp->tx_power;
 
 
 943}
 944
 945static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
 946{
 947	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 948
 949	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 950
 951	hci_dev_lock(hdev);
 952
 953	if (hci_dev_test_flag(hdev, HCI_MGMT))
 954		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
 955						 rp->status);
 956
 957	hci_dev_unlock(hdev);
 
 
 958}
 959
 960static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
 961					  struct sk_buff *skb)
 962{
 963	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 964
 965	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 966
 967	hci_dev_lock(hdev);
 968
 969	if (hci_dev_test_flag(hdev, HCI_MGMT))
 970		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
 971						     ACL_LINK, 0, rp->status);
 972
 973	hci_dev_unlock(hdev);
 
 
 974}
 975
 976static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
 977{
 978	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 979
 980	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 981
 982	hci_dev_lock(hdev);
 983
 984	if (hci_dev_test_flag(hdev, HCI_MGMT))
 985		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
 986						 0, rp->status);
 987
 988	hci_dev_unlock(hdev);
 
 
 989}
 990
 991static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
 992					  struct sk_buff *skb)
 993{
 994	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 995
 996	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 997
 998	hci_dev_lock(hdev);
 999
1000	if (hci_dev_test_flag(hdev, HCI_MGMT))
1001		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1002						     ACL_LINK, 0, rp->status);
1003
1004	hci_dev_unlock(hdev);
 
 
1005}
1006
1007static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1008				       struct sk_buff *skb)
1009{
1010	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1011
1012	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
 
1013}
1014
1015static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1016					   struct sk_buff *skb)
1017{
1018	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1019
1020	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
 
1021}
1022
1023static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
 
1024{
1025	__u8 status = *((__u8 *) skb->data);
1026	bdaddr_t *sent;
1027
1028	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1029
1030	if (status)
1031		return;
1032
1033	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1034	if (!sent)
1035		return;
1036
1037	hci_dev_lock(hdev);
1038
1039	bacpy(&hdev->random_addr, sent);
1040
 
 
 
 
 
 
1041	hci_dev_unlock(hdev);
 
 
1042}
1043
1044static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
1045{
1046	__u8 *sent, status = *((__u8 *) skb->data);
 
1047
1048	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1049
1050	if (status)
1051		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1052
1053	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1054	if (!sent)
1055		return;
1056
1057	hci_dev_lock(hdev);
1058
1059	/* If we're doing connection initiation as peripheral. Set a
1060	 * timeout in case something goes wrong.
1061	 */
1062	if (*sent) {
1063		struct hci_conn *conn;
1064
1065		hci_dev_set_flag(hdev, HCI_LE_ADV);
1066
1067		conn = hci_lookup_le_connect(hdev);
1068		if (conn)
1069			queue_delayed_work(hdev->workqueue,
1070					   &conn->le_conn_timeout,
1071					   conn->conn_timeout);
1072	} else {
1073		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1074	}
1075
1076	hci_dev_unlock(hdev);
 
 
1077}
1078
1079static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1080{
1081	struct hci_cp_le_set_scan_param *cp;
1082	__u8 status = *((__u8 *) skb->data);
1083
1084	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1085
1086	if (status)
1087		return;
1088
1089	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1090	if (!cp)
1091		return;
1092
1093	hci_dev_lock(hdev);
1094
1095	hdev->le_scan_type = cp->type;
1096
1097	hci_dev_unlock(hdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098}
1099
1100static bool has_pending_adv_report(struct hci_dev *hdev)
1101{
1102	struct discovery_state *d = &hdev->discovery;
1103
1104	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1105}
1106
1107static void clear_pending_adv_report(struct hci_dev *hdev)
1108{
1109	struct discovery_state *d = &hdev->discovery;
1110
1111	bacpy(&d->last_adv_addr, BDADDR_ANY);
1112	d->last_adv_data_len = 0;
1113}
1114
1115static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1116				     u8 bdaddr_type, s8 rssi, u32 flags,
1117				     u8 *data, u8 len)
1118{
1119	struct discovery_state *d = &hdev->discovery;
1120
 
 
 
1121	bacpy(&d->last_adv_addr, bdaddr);
1122	d->last_adv_addr_type = bdaddr_type;
1123	d->last_adv_rssi = rssi;
1124	d->last_adv_flags = flags;
1125	memcpy(d->last_adv_data, data, len);
1126	d->last_adv_data_len = len;
1127}
1128
1129static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1130				      struct sk_buff *skb)
1131{
1132	struct hci_cp_le_set_scan_enable *cp;
1133	__u8 status = *((__u8 *) skb->data);
1134
1135	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1136
1137	if (status)
1138		return;
1139
1140	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1141	if (!cp)
1142		return;
1143
1144	hci_dev_lock(hdev);
1145
1146	switch (cp->enable) {
1147	case LE_SCAN_ENABLE:
1148		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1149		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1150			clear_pending_adv_report(hdev);
 
 
1151		break;
1152
1153	case LE_SCAN_DISABLE:
1154		/* We do this here instead of when setting DISCOVERY_STOPPED
1155		 * since the latter would potentially require waiting for
1156		 * inquiry to stop too.
1157		 */
1158		if (has_pending_adv_report(hdev)) {
1159			struct discovery_state *d = &hdev->discovery;
1160
1161			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1162					  d->last_adv_addr_type, NULL,
1163					  d->last_adv_rssi, d->last_adv_flags,
1164					  d->last_adv_data,
1165					  d->last_adv_data_len, NULL, 0);
1166		}
1167
1168		/* Cancel this timer so that we don't try to disable scanning
1169		 * when it's already disabled.
1170		 */
1171		cancel_delayed_work(&hdev->le_scan_disable);
1172
1173		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1174
1175		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1176		 * interrupted scanning due to a connect request. Mark
1177		 * therefore discovery as stopped. If this was not
1178		 * because of a connect request advertising might have
1179		 * been disabled because of active scanning, so
1180		 * re-enable it again if necessary.
1181		 */
1182		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1183			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1184		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1185			 hdev->discovery.state == DISCOVERY_FINDING)
1186			hci_req_reenable_advertising(hdev);
1187
1188		break;
1189
1190	default:
1191		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
 
1192		break;
1193	}
1194
1195	hci_dev_unlock(hdev);
1196}
1197
1198static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1199					   struct sk_buff *skb)
1200{
1201	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
 
1202
1203	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1204
1205	if (rp->status)
1206		return;
 
 
 
 
1207
1208	hdev->le_white_list_size = rp->size;
 
 
1209}
1210
1211static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1212				       struct sk_buff *skb)
1213{
1214	__u8 status = *((__u8 *) skb->data);
 
1215
1216	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1217
1218	if (status)
1219		return;
 
 
 
 
 
 
1220
1221	hci_bdaddr_list_clear(&hdev->le_white_list);
1222}
1223
1224static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1225					struct sk_buff *skb)
1226{
1227	struct hci_cp_le_add_to_white_list *sent;
1228	__u8 status = *((__u8 *) skb->data);
1229
1230	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
1231
1232	if (status)
1233		return;
1234
1235	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1236	if (!sent)
1237		return;
1238
1239	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1240			   sent->bdaddr_type);
1241}
1242
1243static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1244					  struct sk_buff *skb)
1245{
1246	struct hci_cp_le_del_from_white_list *sent;
1247	__u8 status = *((__u8 *) skb->data);
1248
1249	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1250
1251	if (status)
1252		return;
 
 
 
 
 
1253
1254	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255	if (!sent)
1256		return;
1257
1258	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
 
1259			    sent->bdaddr_type);
 
 
 
1260}
1261
1262static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1263					    struct sk_buff *skb)
1264{
1265	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
 
1266
1267	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1268
1269	if (rp->status)
1270		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1271
1272	memcpy(hdev->le_states, rp->le_states, 8);
 
 
1273}
1274
1275static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1276					struct sk_buff *skb)
1277{
1278	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1279
1280	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1281
1282	if (rp->status)
1283		return;
1284
1285	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1286	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
 
 
1287}
1288
1289static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1290					 struct sk_buff *skb)
1291{
1292	struct hci_cp_le_write_def_data_len *sent;
1293	__u8 status = *((__u8 *) skb->data);
1294
1295	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1296
1297	if (status)
1298		return;
1299
1300	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1301	if (!sent)
1302		return;
1303
1304	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1305	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
 
 
1306}
1307
1308static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1309					struct sk_buff *skb)
1310{
1311	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
 
1312
1313	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1314
1315	if (rp->status)
1316		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1317
1318	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1319	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1320	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1321	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
 
 
1322}
1323
1324static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1325					   struct sk_buff *skb)
1326{
1327	struct hci_cp_write_le_host_supported *sent;
1328	__u8 status = *((__u8 *) skb->data);
1329
1330	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1331
1332	if (status)
1333		return;
1334
1335	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1336	if (!sent)
1337		return;
1338
1339	hci_dev_lock(hdev);
1340
1341	if (sent->le) {
1342		hdev->features[1][0] |= LMP_HOST_LE;
1343		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1344	} else {
1345		hdev->features[1][0] &= ~LMP_HOST_LE;
1346		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1347		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1348	}
1349
1350	if (sent->simul)
1351		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1352	else
1353		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1354
1355	hci_dev_unlock(hdev);
 
 
1356}
1357
1358static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
 
1359{
1360	struct hci_cp_le_set_adv_param *cp;
1361	u8 status = *((u8 *) skb->data);
1362
1363	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364
1365	if (status)
1366		return;
1367
1368	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1369	if (!cp)
1370		return;
1371
1372	hci_dev_lock(hdev);
1373	hdev->adv_addr_type = cp->own_address_type;
1374	hci_dev_unlock(hdev);
 
 
1375}
1376
1377static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
 
1378{
1379	struct hci_rp_read_rssi *rp = (void *) skb->data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1380	struct hci_conn *conn;
1381
1382	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1383
1384	if (rp->status)
1385		return;
1386
1387	hci_dev_lock(hdev);
1388
1389	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1390	if (conn)
1391		conn->rssi = rp->rssi;
1392
1393	hci_dev_unlock(hdev);
 
 
1394}
1395
1396static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
 
1397{
1398	struct hci_cp_read_tx_power *sent;
1399	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1400	struct hci_conn *conn;
1401
1402	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1403
1404	if (rp->status)
1405		return;
1406
1407	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1408	if (!sent)
1409		return;
1410
1411	hci_dev_lock(hdev);
1412
1413	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1414	if (!conn)
1415		goto unlock;
1416
1417	switch (sent->type) {
1418	case 0x00:
1419		conn->tx_power = rp->tx_power;
1420		break;
1421	case 0x01:
1422		conn->max_tx_power = rp->tx_power;
1423		break;
1424	}
1425
1426unlock:
1427	hci_dev_unlock(hdev);
 
1428}
1429
1430static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
 
1431{
1432	u8 status = *((u8 *) skb->data);
1433	u8 *mode;
1434
1435	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1436
1437	if (status)
1438		return;
1439
1440	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1441	if (mode)
1442		hdev->ssp_debug_mode = *mode;
 
 
1443}
1444
1445static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1446{
1447	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1448
1449	if (status) {
1450		hci_conn_check_pending(hdev);
1451		return;
1452	}
1453
1454	set_bit(HCI_INQUIRY, &hdev->flags);
 
1455}
1456
1457static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1458{
1459	struct hci_cp_create_conn *cp;
1460	struct hci_conn *conn;
1461
1462	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1463
1464	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1465	if (!cp)
1466		return;
1467
1468	hci_dev_lock(hdev);
1469
1470	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1471
1472	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1473
1474	if (status) {
1475		if (conn && conn->state == BT_CONNECT) {
1476			if (status != 0x0c || conn->attempt > 2) {
1477				conn->state = BT_CLOSED;
1478				hci_connect_cfm(conn, status);
1479				hci_conn_del(conn);
1480			} else
1481				conn->state = BT_CONNECT2;
1482		}
1483	} else {
1484		if (!conn) {
1485			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1486					    HCI_ROLE_MASTER);
1487			if (!conn)
1488				BT_ERR("No memory for new connection");
1489		}
1490	}
1491
1492	hci_dev_unlock(hdev);
1493}
1494
1495static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1496{
1497	struct hci_cp_add_sco *cp;
1498	struct hci_conn *acl, *sco;
 
1499	__u16 handle;
1500
1501	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1502
1503	if (!status)
1504		return;
1505
1506	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1507	if (!cp)
1508		return;
1509
1510	handle = __le16_to_cpu(cp->handle);
1511
1512	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1513
1514	hci_dev_lock(hdev);
1515
1516	acl = hci_conn_hash_lookup_handle(hdev, handle);
1517	if (acl) {
1518		sco = acl->link;
1519		if (sco) {
1520			sco->state = BT_CLOSED;
 
1521
1522			hci_connect_cfm(sco, status);
1523			hci_conn_del(sco);
1524		}
1525	}
1526
1527	hci_dev_unlock(hdev);
1528}
1529
1530static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1531{
1532	struct hci_cp_auth_requested *cp;
1533	struct hci_conn *conn;
1534
1535	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1536
1537	if (!status)
1538		return;
1539
1540	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1541	if (!cp)
1542		return;
1543
1544	hci_dev_lock(hdev);
1545
1546	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1547	if (conn) {
1548		if (conn->state == BT_CONFIG) {
1549			hci_connect_cfm(conn, status);
1550			hci_conn_drop(conn);
1551		}
1552	}
1553
1554	hci_dev_unlock(hdev);
1555}
1556
1557static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1558{
1559	struct hci_cp_set_conn_encrypt *cp;
1560	struct hci_conn *conn;
1561
1562	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1563
1564	if (!status)
1565		return;
1566
1567	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1568	if (!cp)
1569		return;
1570
1571	hci_dev_lock(hdev);
1572
1573	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1574	if (conn) {
1575		if (conn->state == BT_CONFIG) {
1576			hci_connect_cfm(conn, status);
1577			hci_conn_drop(conn);
1578		}
1579	}
1580
1581	hci_dev_unlock(hdev);
1582}
1583
1584static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1585				    struct hci_conn *conn)
1586{
1587	if (conn->state != BT_CONFIG || !conn->out)
1588		return 0;
1589
1590	if (conn->pending_sec_level == BT_SECURITY_SDP)
1591		return 0;
1592
1593	/* Only request authentication for SSP connections or non-SSP
1594	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1595	 * is requested.
1596	 */
1597	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1598	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1599	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1600	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1601		return 0;
1602
1603	return 1;
1604}
1605
1606static int hci_resolve_name(struct hci_dev *hdev,
1607				   struct inquiry_entry *e)
1608{
1609	struct hci_cp_remote_name_req cp;
1610
1611	memset(&cp, 0, sizeof(cp));
1612
1613	bacpy(&cp.bdaddr, &e->data.bdaddr);
1614	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1615	cp.pscan_mode = e->data.pscan_mode;
1616	cp.clock_offset = e->data.clock_offset;
1617
1618	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1619}
1620
1621static bool hci_resolve_next_name(struct hci_dev *hdev)
1622{
1623	struct discovery_state *discov = &hdev->discovery;
1624	struct inquiry_entry *e;
1625
1626	if (list_empty(&discov->resolve))
1627		return false;
1628
 
 
 
 
 
 
1629	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1630	if (!e)
1631		return false;
1632
1633	if (hci_resolve_name(hdev, e) == 0) {
1634		e->name_state = NAME_PENDING;
1635		return true;
1636	}
1637
1638	return false;
1639}
1640
1641static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1642				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1643{
1644	struct discovery_state *discov = &hdev->discovery;
1645	struct inquiry_entry *e;
1646
1647	/* Update the mgmt connected state if necessary. Be careful with
1648	 * conn objects that exist but are not (yet) connected however.
1649	 * Only those in BT_CONFIG or BT_CONNECTED states can be
1650	 * considered connected.
1651	 */
1652	if (conn &&
1653	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1654	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1655		mgmt_device_connected(hdev, conn, 0, name, name_len);
1656
1657	if (discov->state == DISCOVERY_STOPPED)
1658		return;
1659
1660	if (discov->state == DISCOVERY_STOPPING)
1661		goto discov_complete;
1662
1663	if (discov->state != DISCOVERY_RESOLVING)
1664		return;
1665
1666	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1667	/* If the device was not found in a list of found devices names of which
1668	 * are pending. there is no need to continue resolving a next name as it
1669	 * will be done upon receiving another Remote Name Request Complete
1670	 * Event */
1671	if (!e)
1672		return;
1673
1674	list_del(&e->list);
1675	if (name) {
1676		e->name_state = NAME_KNOWN;
1677		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1678				 e->data.rssi, name, name_len);
1679	} else {
1680		e->name_state = NAME_NOT_KNOWN;
1681	}
1682
1683	if (hci_resolve_next_name(hdev))
1684		return;
1685
1686discov_complete:
1687	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1688}
1689
1690static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1691{
1692	struct hci_cp_remote_name_req *cp;
1693	struct hci_conn *conn;
1694
1695	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1696
1697	/* If successful wait for the name req complete event before
1698	 * checking for the need to do authentication */
1699	if (!status)
1700		return;
1701
1702	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1703	if (!cp)
1704		return;
1705
1706	hci_dev_lock(hdev);
1707
1708	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1709
1710	if (hci_dev_test_flag(hdev, HCI_MGMT))
1711		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1712
1713	if (!conn)
1714		goto unlock;
1715
1716	if (!hci_outgoing_auth_needed(hdev, conn))
1717		goto unlock;
1718
1719	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1720		struct hci_cp_auth_requested auth_cp;
1721
1722		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1723
1724		auth_cp.handle = __cpu_to_le16(conn->handle);
1725		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1726			     sizeof(auth_cp), &auth_cp);
1727	}
1728
1729unlock:
1730	hci_dev_unlock(hdev);
1731}
1732
1733static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1734{
1735	struct hci_cp_read_remote_features *cp;
1736	struct hci_conn *conn;
1737
1738	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1739
1740	if (!status)
1741		return;
1742
1743	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1744	if (!cp)
1745		return;
1746
1747	hci_dev_lock(hdev);
1748
1749	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1750	if (conn) {
1751		if (conn->state == BT_CONFIG) {
1752			hci_connect_cfm(conn, status);
1753			hci_conn_drop(conn);
1754		}
1755	}
1756
1757	hci_dev_unlock(hdev);
1758}
1759
1760static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1761{
1762	struct hci_cp_read_remote_ext_features *cp;
1763	struct hci_conn *conn;
1764
1765	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1766
1767	if (!status)
1768		return;
1769
1770	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1771	if (!cp)
1772		return;
1773
1774	hci_dev_lock(hdev);
1775
1776	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1777	if (conn) {
1778		if (conn->state == BT_CONFIG) {
1779			hci_connect_cfm(conn, status);
1780			hci_conn_drop(conn);
1781		}
1782	}
1783
1784	hci_dev_unlock(hdev);
1785}
1786
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1787static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1788{
1789	struct hci_cp_setup_sync_conn *cp;
1790	struct hci_conn *acl, *sco;
1791	__u16 handle;
1792
1793	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1794
1795	if (!status)
1796		return;
1797
1798	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1799	if (!cp)
1800		return;
1801
1802	handle = __le16_to_cpu(cp->handle);
 
1803
1804	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
 
 
1805
1806	hci_dev_lock(hdev);
1807
1808	acl = hci_conn_hash_lookup_handle(hdev, handle);
1809	if (acl) {
1810		sco = acl->link;
1811		if (sco) {
1812			sco->state = BT_CLOSED;
1813
1814			hci_connect_cfm(sco, status);
1815			hci_conn_del(sco);
1816		}
1817	}
1818
1819	hci_dev_unlock(hdev);
1820}
1821
1822static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1823{
1824	struct hci_cp_sniff_mode *cp;
1825	struct hci_conn *conn;
1826
1827	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1828
1829	if (!status)
1830		return;
1831
1832	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1833	if (!cp)
1834		return;
1835
1836	hci_dev_lock(hdev);
1837
1838	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1839	if (conn) {
1840		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1841
1842		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1843			hci_sco_setup(conn, status);
1844	}
1845
1846	hci_dev_unlock(hdev);
1847}
1848
1849static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1850{
1851	struct hci_cp_exit_sniff_mode *cp;
1852	struct hci_conn *conn;
1853
1854	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1855
1856	if (!status)
1857		return;
1858
1859	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1860	if (!cp)
1861		return;
1862
1863	hci_dev_lock(hdev);
1864
1865	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1866	if (conn) {
1867		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1868
1869		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1870			hci_sco_setup(conn, status);
1871	}
1872
1873	hci_dev_unlock(hdev);
1874}
1875
1876static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1877{
1878	struct hci_cp_disconnect *cp;
 
1879	struct hci_conn *conn;
 
1880
1881	if (!status)
 
 
 
 
 
1882		return;
1883
1884	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1885	if (!cp)
1886		return;
1887
1888	hci_dev_lock(hdev);
1889
1890	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1891	if (conn)
 
 
 
1892		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1893				       conn->dst_type, status);
1894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1895	hci_dev_unlock(hdev);
1896}
1897
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1898static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1899{
1900	struct hci_cp_le_create_conn *cp;
1901	struct hci_conn *conn;
1902
1903	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1904
1905	/* All connection failure handling is taken care of by the
1906	 * hci_le_conn_failed function which is triggered by the HCI
1907	 * request completion callbacks used for connecting.
1908	 */
1909	if (status)
1910		return;
1911
1912	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1913	if (!cp)
1914		return;
1915
1916	hci_dev_lock(hdev);
1917
1918	conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
1919				       cp->peer_addr_type);
1920	if (!conn)
1921		goto unlock;
1922
1923	/* Store the initiator and responder address information which
1924	 * is needed for SMP. These values will not change during the
1925	 * lifetime of the connection.
 
 
 
 
 
 
 
 
 
1926	 */
1927	conn->init_addr_type = cp->own_address_type;
1928	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1929		bacpy(&conn->init_addr, &hdev->random_addr);
1930	else
1931		bacpy(&conn->init_addr, &hdev->bdaddr);
1932
1933	conn->resp_addr_type = cp->peer_addr_type;
1934	bacpy(&conn->resp_addr, &cp->peer_addr);
 
1935
1936	/* We don't want the connection attempt to stick around
1937	 * indefinitely since LE doesn't have a page timeout concept
1938	 * like BR/EDR. Set a timer for any connection that doesn't use
1939	 * the white list for connecting.
1940	 */
1941	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1942		queue_delayed_work(conn->hdev->workqueue,
1943				   &conn->le_conn_timeout,
1944				   conn->conn_timeout);
1945
1946unlock:
1947	hci_dev_unlock(hdev);
1948}
1949
1950static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1951{
1952	struct hci_cp_le_read_remote_features *cp;
1953	struct hci_conn *conn;
1954
1955	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1956
1957	if (!status)
1958		return;
1959
1960	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1961	if (!cp)
1962		return;
1963
1964	hci_dev_lock(hdev);
1965
1966	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1967	if (conn) {
1968		if (conn->state == BT_CONFIG) {
1969			hci_connect_cfm(conn, status);
1970			hci_conn_drop(conn);
1971		}
1972	}
1973
1974	hci_dev_unlock(hdev);
1975}
1976
1977static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1978{
1979	struct hci_cp_le_start_enc *cp;
1980	struct hci_conn *conn;
1981
1982	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1983
1984	if (!status)
1985		return;
1986
1987	hci_dev_lock(hdev);
1988
1989	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1990	if (!cp)
1991		goto unlock;
1992
1993	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1994	if (!conn)
1995		goto unlock;
1996
1997	if (conn->state != BT_CONNECTED)
1998		goto unlock;
1999
2000	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2001	hci_conn_drop(conn);
2002
2003unlock:
2004	hci_dev_unlock(hdev);
2005}
2006
2007static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2008{
2009	struct hci_cp_switch_role *cp;
2010	struct hci_conn *conn;
2011
2012	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2013
2014	if (!status)
2015		return;
2016
2017	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2018	if (!cp)
2019		return;
2020
2021	hci_dev_lock(hdev);
2022
2023	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2024	if (conn)
2025		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2026
2027	hci_dev_unlock(hdev);
2028}
2029
2030static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2031{
2032	__u8 status = *((__u8 *) skb->data);
2033	struct discovery_state *discov = &hdev->discovery;
2034	struct inquiry_entry *e;
2035
2036	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037
2038	hci_conn_check_pending(hdev);
2039
2040	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2041		return;
2042
2043	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2044	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2045
2046	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2047		return;
2048
2049	hci_dev_lock(hdev);
2050
2051	if (discov->state != DISCOVERY_FINDING)
2052		goto unlock;
2053
2054	if (list_empty(&discov->resolve)) {
2055		/* When BR/EDR inquiry is active and no LE scanning is in
2056		 * progress, then change discovery state to indicate completion.
2057		 *
2058		 * When running LE scanning and BR/EDR inquiry simultaneously
2059		 * and the LE scan already finished, then change the discovery
2060		 * state to indicate completion.
2061		 */
2062		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2063		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2064			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2065		goto unlock;
2066	}
2067
2068	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2069	if (e && hci_resolve_name(hdev, e) == 0) {
2070		e->name_state = NAME_PENDING;
2071		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
 
2072	} else {
2073		/* When BR/EDR inquiry is active and no LE scanning is in
2074		 * progress, then change discovery state to indicate completion.
2075		 *
2076		 * When running LE scanning and BR/EDR inquiry simultaneously
2077		 * and the LE scan already finished, then change the discovery
2078		 * state to indicate completion.
2079		 */
2080		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2081		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2082			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2083	}
2084
2085unlock:
2086	hci_dev_unlock(hdev);
2087}
2088
2089static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2090{
 
2091	struct inquiry_data data;
2092	struct inquiry_info *info = (void *) (skb->data + 1);
2093	int num_rsp = *((__u8 *) skb->data);
 
 
 
2094
2095	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2096
2097	if (!num_rsp)
2098		return;
2099
2100	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2101		return;
2102
2103	hci_dev_lock(hdev);
2104
2105	for (; num_rsp; num_rsp--, info++) {
 
2106		u32 flags;
2107
2108		bacpy(&data.bdaddr, &info->bdaddr);
2109		data.pscan_rep_mode	= info->pscan_rep_mode;
2110		data.pscan_period_mode	= info->pscan_period_mode;
2111		data.pscan_mode		= info->pscan_mode;
2112		memcpy(data.dev_class, info->dev_class, 3);
2113		data.clock_offset	= info->clock_offset;
2114		data.rssi		= HCI_RSSI_INVALID;
2115		data.ssp_mode		= 0x00;
2116
2117		flags = hci_inquiry_cache_update(hdev, &data, false);
2118
2119		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2120				  info->dev_class, HCI_RSSI_INVALID,
2121				  flags, NULL, 0, NULL, 0);
2122	}
2123
2124	hci_dev_unlock(hdev);
2125}
2126
2127static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2128{
2129	struct hci_ev_conn_complete *ev = (void *) skb->data;
2130	struct hci_conn *conn;
 
2131
2132	BT_DBG("%s", hdev->name);
2133
2134	hci_dev_lock(hdev);
2135
2136	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2137	if (!conn) {
2138		if (ev->link_type != SCO_LINK)
 
 
 
2139			goto unlock;
2140
2141		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2142		if (!conn)
2143			goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2144
2145		conn->type = SCO_LINK;
 
2146	}
2147
2148	if (!ev->status) {
2149		conn->handle = __le16_to_cpu(ev->handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
2150
2151		if (conn->type == ACL_LINK) {
2152			conn->state = BT_CONFIG;
2153			hci_conn_hold(conn);
2154
2155			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2156			    !hci_find_link_key(hdev, &ev->bdaddr))
2157				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2158			else
2159				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2160		} else
2161			conn->state = BT_CONNECTED;
2162
2163		hci_debugfs_create_conn(conn);
2164		hci_conn_add_sysfs(conn);
2165
2166		if (test_bit(HCI_AUTH, &hdev->flags))
2167			set_bit(HCI_CONN_AUTH, &conn->flags);
2168
2169		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2170			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2172		/* Get remote features */
2173		if (conn->type == ACL_LINK) {
2174			struct hci_cp_read_remote_features cp;
2175			cp.handle = ev->handle;
2176			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2177				     sizeof(cp), &cp);
2178
2179			hci_req_update_scan(hdev);
2180		}
2181
2182		/* Set packet type for incoming connection */
2183		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2184			struct hci_cp_change_conn_ptype cp;
2185			cp.handle = ev->handle;
2186			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2187			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2188				     &cp);
2189		}
2190	} else {
2191		conn->state = BT_CLOSED;
2192		if (conn->type == ACL_LINK)
2193			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2194					    conn->dst_type, ev->status);
2195	}
2196
2197	if (conn->type == ACL_LINK)
2198		hci_sco_setup(conn, ev->status);
2199
2200	if (ev->status) {
2201		hci_connect_cfm(conn, ev->status);
2202		hci_conn_del(conn);
2203	} else if (ev->link_type != ACL_LINK)
2204		hci_connect_cfm(conn, ev->status);
 
 
 
 
 
 
 
 
2205
2206unlock:
2207	hci_dev_unlock(hdev);
2208
2209	hci_conn_check_pending(hdev);
2210}
2211
2212static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2213{
2214	struct hci_cp_reject_conn_req cp;
2215
2216	bacpy(&cp.bdaddr, bdaddr);
2217	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2218	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2219}
2220
2221static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2222{
2223	struct hci_ev_conn_request *ev = (void *) skb->data;
2224	int mask = hdev->link_mode;
2225	struct inquiry_entry *ie;
2226	struct hci_conn *conn;
2227	__u8 flags = 0;
2228
2229	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2230	       ev->link_type);
 
 
 
 
 
 
 
 
 
2231
2232	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2233				      &flags);
2234
2235	if (!(mask & HCI_LM_ACCEPT)) {
2236		hci_reject_conn(hdev, &ev->bdaddr);
2237		return;
2238	}
2239
2240	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
 
 
2241				   BDADDR_BREDR)) {
2242		hci_reject_conn(hdev, &ev->bdaddr);
2243		return;
2244	}
2245
2246	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2247	 * connection. These features are only touched through mgmt so
2248	 * only do the checks if HCI_MGMT is set.
2249	 */
2250	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2251	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2252	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2253				    BDADDR_BREDR)) {
2254		    hci_reject_conn(hdev, &ev->bdaddr);
2255		    return;
2256	}
2257
2258	/* Connection accepted */
2259
2260	hci_dev_lock(hdev);
2261
2262	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2263	if (ie)
2264		memcpy(ie->data.dev_class, ev->dev_class, 3);
2265
2266	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2267			&ev->bdaddr);
2268	if (!conn) {
2269		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2270				    HCI_ROLE_SLAVE);
2271		if (!conn) {
2272			BT_ERR("No memory for new connection");
2273			hci_dev_unlock(hdev);
2274			return;
2275		}
2276	}
2277
2278	memcpy(conn->dev_class, ev->dev_class, 3);
2279
2280	hci_dev_unlock(hdev);
2281
2282	if (ev->link_type == ACL_LINK ||
2283	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2284		struct hci_cp_accept_conn_req cp;
2285		conn->state = BT_CONNECT;
2286
2287		bacpy(&cp.bdaddr, &ev->bdaddr);
2288
2289		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2290			cp.role = 0x00; /* Become master */
2291		else
2292			cp.role = 0x01; /* Remain slave */
2293
2294		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2295	} else if (!(flags & HCI_PROTO_DEFER)) {
2296		struct hci_cp_accept_sync_conn_req cp;
2297		conn->state = BT_CONNECT;
2298
2299		bacpy(&cp.bdaddr, &ev->bdaddr);
2300		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2301
2302		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2303		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2304		cp.max_latency    = cpu_to_le16(0xffff);
2305		cp.content_format = cpu_to_le16(hdev->voice_setting);
2306		cp.retrans_effort = 0xff;
2307
2308		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2309			     &cp);
2310	} else {
2311		conn->state = BT_CONNECT2;
2312		hci_connect_cfm(conn, 0);
2313	}
 
 
 
 
2314}
2315
2316static u8 hci_to_mgmt_reason(u8 err)
2317{
2318	switch (err) {
2319	case HCI_ERROR_CONNECTION_TIMEOUT:
2320		return MGMT_DEV_DISCONN_TIMEOUT;
2321	case HCI_ERROR_REMOTE_USER_TERM:
2322	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2323	case HCI_ERROR_REMOTE_POWER_OFF:
2324		return MGMT_DEV_DISCONN_REMOTE;
2325	case HCI_ERROR_LOCAL_HOST_TERM:
2326		return MGMT_DEV_DISCONN_LOCAL_HOST;
2327	default:
2328		return MGMT_DEV_DISCONN_UNKNOWN;
2329	}
2330}
2331
2332static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2333{
2334	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2335	u8 reason = hci_to_mgmt_reason(ev->reason);
2336	struct hci_conn_params *params;
2337	struct hci_conn *conn;
2338	bool mgmt_connected;
2339	u8 type;
2340
2341	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2342
2343	hci_dev_lock(hdev);
2344
2345	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2346	if (!conn)
2347		goto unlock;
2348
2349	if (ev->status) {
2350		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2351				       conn->dst_type, ev->status);
2352		goto unlock;
2353	}
2354
2355	conn->state = BT_CLOSED;
2356
2357	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
 
 
 
 
 
 
2358	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2359				reason, mgmt_connected);
2360
2361	if (conn->type == ACL_LINK) {
2362		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2363			hci_remove_link_key(hdev, &conn->dst);
2364
2365		hci_req_update_scan(hdev);
2366	}
2367
2368	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2369	if (params) {
2370		switch (params->auto_connect) {
2371		case HCI_AUTO_CONN_LINK_LOSS:
2372			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2373				break;
2374			/* Fall through */
2375
2376		case HCI_AUTO_CONN_DIRECT:
2377		case HCI_AUTO_CONN_ALWAYS:
2378			list_del_init(&params->action);
2379			list_add(&params->action, &hdev->pend_le_conns);
2380			hci_update_background_scan(hdev);
2381			break;
2382
2383		default:
2384			break;
2385		}
2386	}
2387
2388	type = conn->type;
2389
2390	hci_disconn_cfm(conn, ev->reason);
2391	hci_conn_del(conn);
2392
2393	/* Re-enable advertising if necessary, since it might
2394	 * have been disabled by the connection. From the
2395	 * HCI_LE_Set_Advertise_Enable command description in
2396	 * the core specification (v4.0):
2397	 * "The Controller shall continue advertising until the Host
2398	 * issues an LE_Set_Advertise_Enable command with
2399	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2400	 * or until a connection is created or until the Advertising
2401	 * is timed out due to Directed Advertising."
2402	 */
2403	if (type == LE_LINK)
2404		hci_req_reenable_advertising(hdev);
 
 
 
 
2405
2406unlock:
2407	hci_dev_unlock(hdev);
2408}
2409
2410static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2411{
2412	struct hci_ev_auth_complete *ev = (void *) skb->data;
2413	struct hci_conn *conn;
2414
2415	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2416
2417	hci_dev_lock(hdev);
2418
2419	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2420	if (!conn)
2421		goto unlock;
2422
2423	if (!ev->status) {
2424		if (!hci_conn_ssp_enabled(conn) &&
2425		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2426			BT_INFO("re-auth of legacy device is not possible.");
2427		} else {
2428			set_bit(HCI_CONN_AUTH, &conn->flags);
2429			conn->sec_level = conn->pending_sec_level;
2430		}
2431	} else {
 
 
 
2432		mgmt_auth_failed(conn, ev->status);
2433	}
2434
2435	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2436	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2437
2438	if (conn->state == BT_CONFIG) {
2439		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2440			struct hci_cp_set_conn_encrypt cp;
2441			cp.handle  = ev->handle;
2442			cp.encrypt = 0x01;
2443			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2444				     &cp);
2445		} else {
2446			conn->state = BT_CONNECTED;
2447			hci_connect_cfm(conn, ev->status);
2448			hci_conn_drop(conn);
2449		}
2450	} else {
2451		hci_auth_cfm(conn, ev->status);
2452
2453		hci_conn_hold(conn);
2454		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2455		hci_conn_drop(conn);
2456	}
2457
2458	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2459		if (!ev->status) {
2460			struct hci_cp_set_conn_encrypt cp;
2461			cp.handle  = ev->handle;
2462			cp.encrypt = 0x01;
2463			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2464				     &cp);
2465		} else {
2466			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2467			hci_encrypt_cfm(conn, ev->status, 0x00);
2468		}
2469	}
2470
2471unlock:
2472	hci_dev_unlock(hdev);
2473}
2474
2475static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2476{
2477	struct hci_ev_remote_name *ev = (void *) skb->data;
2478	struct hci_conn *conn;
2479
2480	BT_DBG("%s", hdev->name);
2481
2482	hci_conn_check_pending(hdev);
2483
2484	hci_dev_lock(hdev);
2485
2486	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2487
2488	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2489		goto check_auth;
2490
2491	if (ev->status == 0)
2492		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2493				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2494	else
2495		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2496
2497check_auth:
2498	if (!conn)
2499		goto unlock;
2500
2501	if (!hci_outgoing_auth_needed(hdev, conn))
2502		goto unlock;
2503
2504	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2505		struct hci_cp_auth_requested cp;
2506
2507		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2508
2509		cp.handle = __cpu_to_le16(conn->handle);
2510		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2511	}
2512
2513unlock:
2514	hci_dev_unlock(hdev);
2515}
2516
2517static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2518				       u16 opcode, struct sk_buff *skb)
2519{
2520	const struct hci_rp_read_enc_key_size *rp;
2521	struct hci_conn *conn;
2522	u16 handle;
2523
2524	BT_DBG("%s status 0x%02x", hdev->name, status);
2525
2526	if (!skb || skb->len < sizeof(*rp)) {
2527		BT_ERR("%s invalid HCI Read Encryption Key Size response",
2528		       hdev->name);
2529		return;
2530	}
2531
2532	rp = (void *)skb->data;
2533	handle = le16_to_cpu(rp->handle);
2534
2535	hci_dev_lock(hdev);
2536
2537	conn = hci_conn_hash_lookup_handle(hdev, handle);
2538	if (!conn)
2539		goto unlock;
2540
2541	/* If we fail to read the encryption key size, assume maximum
2542	 * (which is the same we do also when this HCI command isn't
2543	 * supported.
2544	 */
2545	if (rp->status) {
2546		BT_ERR("%s failed to read key size for handle %u", hdev->name,
2547		       handle);
2548		conn->enc_key_size = HCI_LINK_KEY_SIZE;
2549	} else {
2550		conn->enc_key_size = rp->key_size;
2551	}
2552
2553	if (conn->state == BT_CONFIG) {
2554		conn->state = BT_CONNECTED;
2555		hci_connect_cfm(conn, 0);
2556		hci_conn_drop(conn);
2557	} else {
2558		u8 encrypt;
2559
2560		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2561			encrypt = 0x00;
2562		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2563			encrypt = 0x02;
2564		else
2565			encrypt = 0x01;
2566
2567		hci_encrypt_cfm(conn, 0, encrypt);
2568	}
2569
2570unlock:
2571	hci_dev_unlock(hdev);
2572}
2573
2574static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2575{
2576	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2577	struct hci_conn *conn;
2578
2579	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2580
2581	hci_dev_lock(hdev);
2582
2583	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2584	if (!conn)
2585		goto unlock;
2586
2587	if (!ev->status) {
2588		if (ev->encrypt) {
2589			/* Encryption implies authentication */
2590			set_bit(HCI_CONN_AUTH, &conn->flags);
2591			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2592			conn->sec_level = conn->pending_sec_level;
2593
2594			/* P-256 authentication key implies FIPS */
2595			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2596				set_bit(HCI_CONN_FIPS, &conn->flags);
2597
2598			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2599			    conn->type == LE_LINK)
2600				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2601		} else {
2602			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2603			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2604		}
2605	}
2606
2607	/* We should disregard the current RPA and generate a new one
2608	 * whenever the encryption procedure fails.
2609	 */
2610	if (ev->status && conn->type == LE_LINK)
2611		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 
 
2612
2613	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2614
 
 
 
 
2615	if (ev->status && conn->state == BT_CONNECTED) {
2616		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2617		hci_conn_drop(conn);
2618		goto unlock;
2619	}
2620
2621	/* In Secure Connections Only mode, do not allow any connections
2622	 * that are not encrypted with AES-CCM using a P-256 authenticated
2623	 * combination key.
2624	 */
2625	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2626	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2627	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2628		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2629		hci_conn_drop(conn);
2630		goto unlock;
2631	}
2632
2633	/* Try reading the encryption key size for encrypted ACL links */
2634	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2635		struct hci_cp_read_enc_key_size cp;
2636		struct hci_request req;
2637
2638		/* Only send HCI_Read_Encryption_Key_Size if the
2639		 * controller really supports it. If it doesn't, assume
2640		 * the default size (16).
2641		 */
2642		if (!(hdev->commands[20] & 0x10)) {
2643			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2644			goto notify;
2645		}
2646
2647		hci_req_init(&req, hdev);
2648
2649		cp.handle = cpu_to_le16(conn->handle);
2650		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2651
2652		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2653			BT_ERR("Sending HCI Read Encryption Key Size failed");
2654			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2655			goto notify;
2656		}
2657
2658		goto unlock;
2659	}
2660
2661notify:
2662	if (conn->state == BT_CONFIG) {
2663		if (!ev->status)
2664			conn->state = BT_CONNECTED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2665
2666		hci_connect_cfm(conn, ev->status);
2667		hci_conn_drop(conn);
2668	} else
2669		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
 
 
 
 
 
2670
2671unlock:
2672	hci_dev_unlock(hdev);
2673}
2674
2675static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2676					     struct sk_buff *skb)
2677{
2678	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2679	struct hci_conn *conn;
2680
2681	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2682
2683	hci_dev_lock(hdev);
2684
2685	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2686	if (conn) {
2687		if (!ev->status)
2688			set_bit(HCI_CONN_SECURE, &conn->flags);
2689
2690		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2691
2692		hci_key_change_cfm(conn, ev->status);
2693	}
2694
2695	hci_dev_unlock(hdev);
2696}
2697
2698static void hci_remote_features_evt(struct hci_dev *hdev,
2699				    struct sk_buff *skb)
2700{
2701	struct hci_ev_remote_features *ev = (void *) skb->data;
2702	struct hci_conn *conn;
2703
2704	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2705
2706	hci_dev_lock(hdev);
2707
2708	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2709	if (!conn)
2710		goto unlock;
2711
2712	if (!ev->status)
2713		memcpy(conn->features[0], ev->features, 8);
2714
2715	if (conn->state != BT_CONFIG)
2716		goto unlock;
2717
2718	if (!ev->status && lmp_ext_feat_capable(hdev) &&
2719	    lmp_ext_feat_capable(conn)) {
2720		struct hci_cp_read_remote_ext_features cp;
2721		cp.handle = ev->handle;
2722		cp.page = 0x01;
2723		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2724			     sizeof(cp), &cp);
2725		goto unlock;
2726	}
2727
2728	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2729		struct hci_cp_remote_name_req cp;
2730		memset(&cp, 0, sizeof(cp));
2731		bacpy(&cp.bdaddr, &conn->dst);
2732		cp.pscan_rep_mode = 0x02;
2733		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2734	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2735		mgmt_device_connected(hdev, conn, 0, NULL, 0);
 
2736
2737	if (!hci_outgoing_auth_needed(hdev, conn)) {
2738		conn->state = BT_CONNECTED;
2739		hci_connect_cfm(conn, ev->status);
2740		hci_conn_drop(conn);
2741	}
2742
2743unlock:
2744	hci_dev_unlock(hdev);
2745}
2746
2747static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2748				 u16 *opcode, u8 *status,
2749				 hci_req_complete_t *req_complete,
2750				 hci_req_complete_skb_t *req_complete_skb)
2751{
2752	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2753
2754	*opcode = __le16_to_cpu(ev->opcode);
2755	*status = skb->data[sizeof(*ev)];
2756
2757	skb_pull(skb, sizeof(*ev));
2758
2759	switch (*opcode) {
2760	case HCI_OP_INQUIRY_CANCEL:
2761		hci_cc_inquiry_cancel(hdev, skb);
2762		break;
2763
2764	case HCI_OP_PERIODIC_INQ:
2765		hci_cc_periodic_inq(hdev, skb);
2766		break;
2767
2768	case HCI_OP_EXIT_PERIODIC_INQ:
2769		hci_cc_exit_periodic_inq(hdev, skb);
2770		break;
2771
2772	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2773		hci_cc_remote_name_req_cancel(hdev, skb);
2774		break;
2775
2776	case HCI_OP_ROLE_DISCOVERY:
2777		hci_cc_role_discovery(hdev, skb);
2778		break;
2779
2780	case HCI_OP_READ_LINK_POLICY:
2781		hci_cc_read_link_policy(hdev, skb);
2782		break;
2783
2784	case HCI_OP_WRITE_LINK_POLICY:
2785		hci_cc_write_link_policy(hdev, skb);
2786		break;
2787
2788	case HCI_OP_READ_DEF_LINK_POLICY:
2789		hci_cc_read_def_link_policy(hdev, skb);
2790		break;
2791
2792	case HCI_OP_WRITE_DEF_LINK_POLICY:
2793		hci_cc_write_def_link_policy(hdev, skb);
2794		break;
 
2795
2796	case HCI_OP_RESET:
2797		hci_cc_reset(hdev, skb);
2798		break;
2799
2800	case HCI_OP_READ_STORED_LINK_KEY:
2801		hci_cc_read_stored_link_key(hdev, skb);
2802		break;
2803
2804	case HCI_OP_DELETE_STORED_LINK_KEY:
2805		hci_cc_delete_stored_link_key(hdev, skb);
2806		break;
 
2807
2808	case HCI_OP_WRITE_LOCAL_NAME:
2809		hci_cc_write_local_name(hdev, skb);
2810		break;
2811
2812	case HCI_OP_READ_LOCAL_NAME:
2813		hci_cc_read_local_name(hdev, skb);
2814		break;
2815
2816	case HCI_OP_WRITE_AUTH_ENABLE:
2817		hci_cc_write_auth_enable(hdev, skb);
2818		break;
2819
2820	case HCI_OP_WRITE_ENCRYPT_MODE:
2821		hci_cc_write_encrypt_mode(hdev, skb);
2822		break;
2823
2824	case HCI_OP_WRITE_SCAN_ENABLE:
2825		hci_cc_write_scan_enable(hdev, skb);
2826		break;
2827
2828	case HCI_OP_READ_CLASS_OF_DEV:
2829		hci_cc_read_class_of_dev(hdev, skb);
2830		break;
2831
2832	case HCI_OP_WRITE_CLASS_OF_DEV:
2833		hci_cc_write_class_of_dev(hdev, skb);
2834		break;
2835
2836	case HCI_OP_READ_VOICE_SETTING:
2837		hci_cc_read_voice_setting(hdev, skb);
2838		break;
 
2839
2840	case HCI_OP_WRITE_VOICE_SETTING:
2841		hci_cc_write_voice_setting(hdev, skb);
2842		break;
 
2843
2844	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2845		hci_cc_read_num_supported_iac(hdev, skb);
2846		break;
 
 
 
 
 
 
2847
2848	case HCI_OP_WRITE_SSP_MODE:
2849		hci_cc_write_ssp_mode(hdev, skb);
2850		break;
2851
2852	case HCI_OP_WRITE_SC_SUPPORT:
2853		hci_cc_write_sc_support(hdev, skb);
2854		break;
 
 
 
2855
2856	case HCI_OP_READ_LOCAL_VERSION:
2857		hci_cc_read_local_version(hdev, skb);
2858		break;
2859
2860	case HCI_OP_READ_LOCAL_COMMANDS:
2861		hci_cc_read_local_commands(hdev, skb);
2862		break;
 
 
 
 
 
 
 
 
2863
2864	case HCI_OP_READ_LOCAL_FEATURES:
2865		hci_cc_read_local_features(hdev, skb);
2866		break;
 
 
 
 
 
 
 
 
 
2867
2868	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2869		hci_cc_read_local_ext_features(hdev, skb);
2870		break;
2871
2872	case HCI_OP_READ_BUFFER_SIZE:
2873		hci_cc_read_buffer_size(hdev, skb);
2874		break;
2875
2876	case HCI_OP_READ_BD_ADDR:
2877		hci_cc_read_bd_addr(hdev, skb);
2878		break;
2879
2880	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2881		hci_cc_read_page_scan_activity(hdev, skb);
2882		break;
2883
2884	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2885		hci_cc_write_page_scan_activity(hdev, skb);
2886		break;
2887
2888	case HCI_OP_READ_PAGE_SCAN_TYPE:
2889		hci_cc_read_page_scan_type(hdev, skb);
2890		break;
2891
2892	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2893		hci_cc_write_page_scan_type(hdev, skb);
2894		break;
 
 
 
2895
2896	case HCI_OP_READ_DATA_BLOCK_SIZE:
2897		hci_cc_read_data_block_size(hdev, skb);
2898		break;
2899
2900	case HCI_OP_READ_FLOW_CONTROL_MODE:
2901		hci_cc_read_flow_control_mode(hdev, skb);
2902		break;
2903
2904	case HCI_OP_READ_LOCAL_AMP_INFO:
2905		hci_cc_read_local_amp_info(hdev, skb);
2906		break;
2907
2908	case HCI_OP_READ_CLOCK:
2909		hci_cc_read_clock(hdev, skb);
2910		break;
2911
2912	case HCI_OP_READ_INQ_RSP_TX_POWER:
2913		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2914		break;
 
 
2915
2916	case HCI_OP_PIN_CODE_REPLY:
2917		hci_cc_pin_code_reply(hdev, skb);
 
 
 
 
2918		break;
 
 
 
 
 
 
2919
2920	case HCI_OP_PIN_CODE_NEG_REPLY:
2921		hci_cc_pin_code_neg_reply(hdev, skb);
2922		break;
2923
2924	case HCI_OP_READ_LOCAL_OOB_DATA:
2925		hci_cc_read_local_oob_data(hdev, skb);
2926		break;
 
2927
2928	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2929		hci_cc_read_local_oob_ext_data(hdev, skb);
2930		break;
 
2931
2932	case HCI_OP_LE_READ_BUFFER_SIZE:
2933		hci_cc_le_read_buffer_size(hdev, skb);
2934		break;
 
2935
2936	case HCI_OP_LE_READ_LOCAL_FEATURES:
2937		hci_cc_le_read_local_features(hdev, skb);
2938		break;
 
 
2939
2940	case HCI_OP_LE_READ_ADV_TX_POWER:
2941		hci_cc_le_read_adv_tx_power(hdev, skb);
2942		break;
2943
2944	case HCI_OP_USER_CONFIRM_REPLY:
2945		hci_cc_user_confirm_reply(hdev, skb);
2946		break;
2947
2948	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2949		hci_cc_user_confirm_neg_reply(hdev, skb);
2950		break;
2951
2952	case HCI_OP_USER_PASSKEY_REPLY:
2953		hci_cc_user_passkey_reply(hdev, skb);
2954		break;
2955
2956	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2957		hci_cc_user_passkey_neg_reply(hdev, skb);
2958		break;
 
 
 
 
2959
2960	case HCI_OP_LE_SET_RANDOM_ADDR:
2961		hci_cc_le_set_random_addr(hdev, skb);
2962		break;
2963
2964	case HCI_OP_LE_SET_ADV_ENABLE:
2965		hci_cc_le_set_adv_enable(hdev, skb);
2966		break;
2967
2968	case HCI_OP_LE_SET_SCAN_PARAM:
2969		hci_cc_le_set_scan_param(hdev, skb);
2970		break;
2971
2972	case HCI_OP_LE_SET_SCAN_ENABLE:
2973		hci_cc_le_set_scan_enable(hdev, skb);
2974		break;
2975
2976	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2977		hci_cc_le_read_white_list_size(hdev, skb);
2978		break;
2979
2980	case HCI_OP_LE_CLEAR_WHITE_LIST:
2981		hci_cc_le_clear_white_list(hdev, skb);
2982		break;
2983
2984	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2985		hci_cc_le_add_to_white_list(hdev, skb);
2986		break;
 
 
 
 
 
 
 
 
 
 
2987
2988	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2989		hci_cc_le_del_from_white_list(hdev, skb);
2990		break;
2991
2992	case HCI_OP_LE_READ_SUPPORTED_STATES:
2993		hci_cc_le_read_supported_states(hdev, skb);
2994		break;
2995
2996	case HCI_OP_LE_READ_DEF_DATA_LEN:
2997		hci_cc_le_read_def_data_len(hdev, skb);
2998		break;
2999
3000	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3001		hci_cc_le_write_def_data_len(hdev, skb);
3002		break;
3003
3004	case HCI_OP_LE_READ_MAX_DATA_LEN:
3005		hci_cc_le_read_max_data_len(hdev, skb);
3006		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3007
3008	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3009		hci_cc_write_le_host_supported(hdev, skb);
3010		break;
3011
3012	case HCI_OP_LE_SET_ADV_PARAM:
3013		hci_cc_set_adv_param(hdev, skb);
3014		break;
3015
3016	case HCI_OP_READ_RSSI:
3017		hci_cc_read_rssi(hdev, skb);
3018		break;
 
 
 
 
3019
3020	case HCI_OP_READ_TX_POWER:
3021		hci_cc_read_tx_power(hdev, skb);
3022		break;
3023
3024	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3025		hci_cc_write_ssp_debug_mode(hdev, skb);
3026		break;
3027
3028	default:
3029		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3030		break;
 
 
3031	}
3032
3033	if (*opcode != HCI_OP_NOP)
3034		cancel_delayed_work(&hdev->cmd_timer);
 
 
 
 
 
 
 
 
3035
3036	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3037		atomic_set(&hdev->cmd_cnt, 1);
3038
3039	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3040			     req_complete_skb);
3041
 
 
 
 
 
 
3042	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3043		queue_work(hdev->workqueue, &hdev->cmd_work);
3044}
3045
3046static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3047			       u16 *opcode, u8 *status,
3048			       hci_req_complete_t *req_complete,
3049			       hci_req_complete_skb_t *req_complete_skb)
3050{
3051	struct hci_ev_cmd_status *ev = (void *) skb->data;
3052
3053	skb_pull(skb, sizeof(*ev));
3054
3055	*opcode = __le16_to_cpu(ev->opcode);
3056	*status = ev->status;
3057
3058	switch (*opcode) {
3059	case HCI_OP_INQUIRY:
3060		hci_cs_inquiry(hdev, ev->status);
3061		break;
3062
3063	case HCI_OP_CREATE_CONN:
3064		hci_cs_create_conn(hdev, ev->status);
3065		break;
3066
3067	case HCI_OP_DISCONNECT:
3068		hci_cs_disconnect(hdev, ev->status);
3069		break;
3070
3071	case HCI_OP_ADD_SCO:
3072		hci_cs_add_sco(hdev, ev->status);
3073		break;
3074
3075	case HCI_OP_AUTH_REQUESTED:
3076		hci_cs_auth_requested(hdev, ev->status);
3077		break;
3078
3079	case HCI_OP_SET_CONN_ENCRYPT:
3080		hci_cs_set_conn_encrypt(hdev, ev->status);
3081		break;
3082
3083	case HCI_OP_REMOTE_NAME_REQ:
3084		hci_cs_remote_name_req(hdev, ev->status);
3085		break;
3086
3087	case HCI_OP_READ_REMOTE_FEATURES:
3088		hci_cs_read_remote_features(hdev, ev->status);
3089		break;
 
3090
3091	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3092		hci_cs_read_remote_ext_features(hdev, ev->status);
3093		break;
3094
3095	case HCI_OP_SETUP_SYNC_CONN:
3096		hci_cs_setup_sync_conn(hdev, ev->status);
3097		break;
 
 
 
 
 
 
 
 
3098
3099	case HCI_OP_SNIFF_MODE:
3100		hci_cs_sniff_mode(hdev, ev->status);
3101		break;
3102
3103	case HCI_OP_EXIT_SNIFF_MODE:
3104		hci_cs_exit_sniff_mode(hdev, ev->status);
3105		break;
3106
3107	case HCI_OP_SWITCH_ROLE:
3108		hci_cs_switch_role(hdev, ev->status);
3109		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3110
3111	case HCI_OP_LE_CREATE_CONN:
3112		hci_cs_le_create_conn(hdev, ev->status);
3113		break;
 
 
 
 
3114
3115	case HCI_OP_LE_READ_REMOTE_FEATURES:
3116		hci_cs_le_read_remote_features(hdev, ev->status);
3117		break;
3118
3119	case HCI_OP_LE_START_ENC:
3120		hci_cs_le_start_enc(hdev, ev->status);
3121		break;
3122
3123	default:
3124		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3125		break;
 
 
3126	}
3127
3128	if (*opcode != HCI_OP_NOP)
3129		cancel_delayed_work(&hdev->cmd_timer);
3130
3131	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3132		atomic_set(&hdev->cmd_cnt, 1);
3133
3134	/* Indicate request completion if the command failed. Also, if
3135	 * we're not waiting for a special event and we get a success
3136	 * command status we should try to flag the request as completed
3137	 * (since for this kind of commands there will not be a command
3138	 * complete event).
3139	 */
3140	if (ev->status ||
3141	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3142		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3143				     req_complete_skb);
 
 
 
 
 
 
3144
3145	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3146		queue_work(hdev->workqueue, &hdev->cmd_work);
3147}
3148
3149static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3150{
3151	struct hci_ev_hardware_error *ev = (void *) skb->data;
 
 
3152
3153	hdev->hw_error_code = ev->code;
3154
3155	queue_work(hdev->req_workqueue, &hdev->error_reset);
3156}
3157
3158static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3159{
3160	struct hci_ev_role_change *ev = (void *) skb->data;
3161	struct hci_conn *conn;
3162
3163	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3164
3165	hci_dev_lock(hdev);
3166
3167	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3168	if (conn) {
3169		if (!ev->status)
3170			conn->role = ev->role;
3171
3172		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3173
3174		hci_role_switch_cfm(conn, ev->status, ev->role);
3175	}
3176
3177	hci_dev_unlock(hdev);
3178}
3179
3180static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3181{
3182	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3183	int i;
3184
3185	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3186		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3187		return;
3188	}
3189
3190	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3191	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3192		BT_DBG("%s bad parameters", hdev->name);
3193		return;
3194	}
3195
3196	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3197
3198	for (i = 0; i < ev->num_hndl; i++) {
3199		struct hci_comp_pkts_info *info = &ev->handles[i];
3200		struct hci_conn *conn;
3201		__u16  handle, count;
3202
3203		handle = __le16_to_cpu(info->handle);
3204		count  = __le16_to_cpu(info->count);
3205
3206		conn = hci_conn_hash_lookup_handle(hdev, handle);
3207		if (!conn)
3208			continue;
3209
3210		conn->sent -= count;
3211
3212		switch (conn->type) {
3213		case ACL_LINK:
3214			hdev->acl_cnt += count;
3215			if (hdev->acl_cnt > hdev->acl_pkts)
3216				hdev->acl_cnt = hdev->acl_pkts;
3217			break;
3218
3219		case LE_LINK:
3220			if (hdev->le_pkts) {
3221				hdev->le_cnt += count;
3222				if (hdev->le_cnt > hdev->le_pkts)
3223					hdev->le_cnt = hdev->le_pkts;
3224			} else {
3225				hdev->acl_cnt += count;
3226				if (hdev->acl_cnt > hdev->acl_pkts)
3227					hdev->acl_cnt = hdev->acl_pkts;
3228			}
3229			break;
3230
3231		case SCO_LINK:
3232			hdev->sco_cnt += count;
3233			if (hdev->sco_cnt > hdev->sco_pkts)
3234				hdev->sco_cnt = hdev->sco_pkts;
3235			break;
3236
3237		default:
3238			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3239			break;
3240		}
3241	}
3242
3243	queue_work(hdev->workqueue, &hdev->tx_work);
3244}
3245
3246static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3247						 __u16 handle)
3248{
3249	struct hci_chan *chan;
3250
3251	switch (hdev->dev_type) {
3252	case HCI_BREDR:
3253		return hci_conn_hash_lookup_handle(hdev, handle);
3254	case HCI_AMP:
3255		chan = hci_chan_lookup_handle(hdev, handle);
3256		if (chan)
3257			return chan->conn;
3258		break;
3259	default:
3260		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3261		break;
3262	}
3263
3264	return NULL;
3265}
3266
3267static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3268{
3269	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3270	int i;
3271
3272	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3273		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3274		return;
3275	}
3276
3277	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3278	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3279		BT_DBG("%s bad parameters", hdev->name);
3280		return;
3281	}
3282
3283	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3284	       ev->num_hndl);
3285
3286	for (i = 0; i < ev->num_hndl; i++) {
3287		struct hci_comp_blocks_info *info = &ev->handles[i];
3288		struct hci_conn *conn = NULL;
3289		__u16  handle, block_count;
3290
3291		handle = __le16_to_cpu(info->handle);
3292		block_count = __le16_to_cpu(info->blocks);
3293
3294		conn = __hci_conn_lookup_handle(hdev, handle);
3295		if (!conn)
3296			continue;
3297
3298		conn->sent -= block_count;
3299
3300		switch (conn->type) {
3301		case ACL_LINK:
3302		case AMP_LINK:
3303			hdev->block_cnt += block_count;
3304			if (hdev->block_cnt > hdev->num_blocks)
3305				hdev->block_cnt = hdev->num_blocks;
3306			break;
3307
3308		default:
3309			BT_ERR("Unknown type %d conn %p", conn->type, conn);
 
3310			break;
3311		}
3312	}
3313
3314	queue_work(hdev->workqueue, &hdev->tx_work);
3315}
3316
3317static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3318{
3319	struct hci_ev_mode_change *ev = (void *) skb->data;
3320	struct hci_conn *conn;
3321
3322	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3323
3324	hci_dev_lock(hdev);
3325
3326	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3327	if (conn) {
3328		conn->mode = ev->mode;
3329
3330		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3331					&conn->flags)) {
3332			if (conn->mode == HCI_CM_ACTIVE)
3333				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3334			else
3335				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3336		}
3337
3338		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3339			hci_sco_setup(conn, ev->status);
3340	}
3341
3342	hci_dev_unlock(hdev);
3343}
3344
3345static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3346{
3347	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3348	struct hci_conn *conn;
3349
3350	BT_DBG("%s", hdev->name);
3351
3352	hci_dev_lock(hdev);
3353
3354	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3355	if (!conn)
3356		goto unlock;
3357
3358	if (conn->state == BT_CONNECTED) {
3359		hci_conn_hold(conn);
3360		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3361		hci_conn_drop(conn);
3362	}
3363
3364	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3365	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3366		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3367			     sizeof(ev->bdaddr), &ev->bdaddr);
3368	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3369		u8 secure;
3370
3371		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3372			secure = 1;
3373		else
3374			secure = 0;
3375
3376		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3377	}
3378
3379unlock:
3380	hci_dev_unlock(hdev);
3381}
3382
3383static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3384{
3385	if (key_type == HCI_LK_CHANGED_COMBINATION)
3386		return;
3387
3388	conn->pin_length = pin_len;
3389	conn->key_type = key_type;
3390
3391	switch (key_type) {
3392	case HCI_LK_LOCAL_UNIT:
3393	case HCI_LK_REMOTE_UNIT:
3394	case HCI_LK_DEBUG_COMBINATION:
3395		return;
3396	case HCI_LK_COMBINATION:
3397		if (pin_len == 16)
3398			conn->pending_sec_level = BT_SECURITY_HIGH;
3399		else
3400			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3401		break;
3402	case HCI_LK_UNAUTH_COMBINATION_P192:
3403	case HCI_LK_UNAUTH_COMBINATION_P256:
3404		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3405		break;
3406	case HCI_LK_AUTH_COMBINATION_P192:
3407		conn->pending_sec_level = BT_SECURITY_HIGH;
3408		break;
3409	case HCI_LK_AUTH_COMBINATION_P256:
3410		conn->pending_sec_level = BT_SECURITY_FIPS;
3411		break;
3412	}
3413}
3414
3415static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3416{
3417	struct hci_ev_link_key_req *ev = (void *) skb->data;
3418	struct hci_cp_link_key_reply cp;
3419	struct hci_conn *conn;
3420	struct link_key *key;
3421
3422	BT_DBG("%s", hdev->name);
3423
3424	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3425		return;
3426
3427	hci_dev_lock(hdev);
3428
3429	key = hci_find_link_key(hdev, &ev->bdaddr);
3430	if (!key) {
3431		BT_DBG("%s link key not found for %pMR", hdev->name,
3432		       &ev->bdaddr);
3433		goto not_found;
3434	}
3435
3436	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3437	       &ev->bdaddr);
3438
3439	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3440	if (conn) {
3441		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3442
3443		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3444		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3445		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3446			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3447			goto not_found;
3448		}
3449
3450		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3451		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3452		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3453			BT_DBG("%s ignoring key unauthenticated for high security",
3454			       hdev->name);
3455			goto not_found;
3456		}
3457
3458		conn_set_key(conn, key->type, key->pin_len);
3459	}
3460
3461	bacpy(&cp.bdaddr, &ev->bdaddr);
3462	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3463
3464	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3465
3466	hci_dev_unlock(hdev);
3467
3468	return;
3469
3470not_found:
3471	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3472	hci_dev_unlock(hdev);
3473}
3474
3475static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3476{
3477	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3478	struct hci_conn *conn;
3479	struct link_key *key;
3480	bool persistent;
3481	u8 pin_len = 0;
3482
3483	BT_DBG("%s", hdev->name);
3484
3485	hci_dev_lock(hdev);
3486
3487	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3488	if (!conn)
3489		goto unlock;
3490
 
 
 
 
 
 
 
 
 
3491	hci_conn_hold(conn);
3492	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3493	hci_conn_drop(conn);
3494
3495	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3496	conn_set_key(conn, ev->key_type, conn->pin_length);
3497
3498	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3499		goto unlock;
3500
3501	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3502			        ev->key_type, pin_len, &persistent);
3503	if (!key)
3504		goto unlock;
3505
3506	/* Update connection information since adding the key will have
3507	 * fixed up the type in the case of changed combination keys.
3508	 */
3509	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3510		conn_set_key(conn, key->type, key->pin_len);
3511
3512	mgmt_new_link_key(hdev, key, persistent);
3513
3514	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3515	 * is set. If it's not set simply remove the key from the kernel
3516	 * list (we've still notified user space about it but with
3517	 * store_hint being 0).
3518	 */
3519	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3520	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3521		list_del_rcu(&key->list);
3522		kfree_rcu(key, rcu);
3523		goto unlock;
3524	}
3525
3526	if (persistent)
3527		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3528	else
3529		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3530
3531unlock:
3532	hci_dev_unlock(hdev);
3533}
3534
3535static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3536{
3537	struct hci_ev_clock_offset *ev = (void *) skb->data;
3538	struct hci_conn *conn;
3539
3540	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3541
3542	hci_dev_lock(hdev);
3543
3544	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3545	if (conn && !ev->status) {
3546		struct inquiry_entry *ie;
3547
3548		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3549		if (ie) {
3550			ie->data.clock_offset = ev->clock_offset;
3551			ie->timestamp = jiffies;
3552		}
3553	}
3554
3555	hci_dev_unlock(hdev);
3556}
3557
3558static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3559{
3560	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3561	struct hci_conn *conn;
3562
3563	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3564
3565	hci_dev_lock(hdev);
3566
3567	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3568	if (conn && !ev->status)
3569		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3570
3571	hci_dev_unlock(hdev);
3572}
3573
3574static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3575{
3576	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3577	struct inquiry_entry *ie;
3578
3579	BT_DBG("%s", hdev->name);
3580
3581	hci_dev_lock(hdev);
3582
3583	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3584	if (ie) {
3585		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3586		ie->timestamp = jiffies;
3587	}
3588
3589	hci_dev_unlock(hdev);
3590}
3591
3592static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3593					     struct sk_buff *skb)
3594{
 
3595	struct inquiry_data data;
3596	int num_rsp = *((__u8 *) skb->data);
3597
3598	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3599
3600	if (!num_rsp)
3601		return;
3602
3603	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3604		return;
3605
3606	hci_dev_lock(hdev);
3607
3608	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3609		struct inquiry_info_with_rssi_and_pscan_mode *info;
3610		info = (void *) (skb->data + 1);
3611
3612		for (; num_rsp; num_rsp--, info++) {
3613			u32 flags;
3614
 
 
 
 
 
 
 
 
 
3615			bacpy(&data.bdaddr, &info->bdaddr);
3616			data.pscan_rep_mode	= info->pscan_rep_mode;
3617			data.pscan_period_mode	= info->pscan_period_mode;
3618			data.pscan_mode		= info->pscan_mode;
3619			memcpy(data.dev_class, info->dev_class, 3);
3620			data.clock_offset	= info->clock_offset;
3621			data.rssi		= info->rssi;
3622			data.ssp_mode		= 0x00;
3623
3624			flags = hci_inquiry_cache_update(hdev, &data, false);
3625
3626			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3627					  info->dev_class, info->rssi,
3628					  flags, NULL, 0, NULL, 0);
3629		}
3630	} else {
3631		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
 
3632
3633		for (; num_rsp; num_rsp--, info++) {
3634			u32 flags;
3635
 
 
 
 
 
 
 
 
 
3636			bacpy(&data.bdaddr, &info->bdaddr);
3637			data.pscan_rep_mode	= info->pscan_rep_mode;
3638			data.pscan_period_mode	= info->pscan_period_mode;
3639			data.pscan_mode		= 0x00;
3640			memcpy(data.dev_class, info->dev_class, 3);
3641			data.clock_offset	= info->clock_offset;
3642			data.rssi		= info->rssi;
3643			data.ssp_mode		= 0x00;
3644
3645			flags = hci_inquiry_cache_update(hdev, &data, false);
3646
3647			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3648					  info->dev_class, info->rssi,
3649					  flags, NULL, 0, NULL, 0);
3650		}
 
 
 
3651	}
3652
3653	hci_dev_unlock(hdev);
3654}
3655
3656static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3657					struct sk_buff *skb)
3658{
3659	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3660	struct hci_conn *conn;
3661
3662	BT_DBG("%s", hdev->name);
3663
3664	hci_dev_lock(hdev);
3665
3666	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3667	if (!conn)
3668		goto unlock;
3669
3670	if (ev->page < HCI_MAX_PAGES)
3671		memcpy(conn->features[ev->page], ev->features, 8);
3672
3673	if (!ev->status && ev->page == 0x01) {
3674		struct inquiry_entry *ie;
3675
3676		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3677		if (ie)
3678			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3679
3680		if (ev->features[0] & LMP_HOST_SSP) {
3681			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3682		} else {
3683			/* It is mandatory by the Bluetooth specification that
3684			 * Extended Inquiry Results are only used when Secure
3685			 * Simple Pairing is enabled, but some devices violate
3686			 * this.
3687			 *
3688			 * To make these devices work, the internal SSP
3689			 * enabled flag needs to be cleared if the remote host
3690			 * features do not indicate SSP support */
3691			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3692		}
3693
3694		if (ev->features[0] & LMP_HOST_SC)
3695			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3696	}
3697
3698	if (conn->state != BT_CONFIG)
3699		goto unlock;
3700
3701	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3702		struct hci_cp_remote_name_req cp;
3703		memset(&cp, 0, sizeof(cp));
3704		bacpy(&cp.bdaddr, &conn->dst);
3705		cp.pscan_rep_mode = 0x02;
3706		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3707	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3708		mgmt_device_connected(hdev, conn, 0, NULL, 0);
 
3709
3710	if (!hci_outgoing_auth_needed(hdev, conn)) {
3711		conn->state = BT_CONNECTED;
3712		hci_connect_cfm(conn, ev->status);
3713		hci_conn_drop(conn);
3714	}
3715
3716unlock:
3717	hci_dev_unlock(hdev);
3718}
3719
3720static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3721				       struct sk_buff *skb)
3722{
3723	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3724	struct hci_conn *conn;
 
3725
3726	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
 
 
 
 
 
 
 
 
 
 
 
 
 
3727
3728	hci_dev_lock(hdev);
3729
3730	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3731	if (!conn) {
3732		if (ev->link_type == ESCO_LINK)
3733			goto unlock;
3734
3735		/* When the link type in the event indicates SCO connection
3736		 * and lookup of the connection object fails, then check
3737		 * if an eSCO connection object exists.
3738		 *
3739		 * The core limits the synchronous connections to either
3740		 * SCO or eSCO. The eSCO connection is preferred and tried
3741		 * to be setup first and until successfully established,
3742		 * the link type will be hinted as eSCO.
3743		 */
3744		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3745		if (!conn)
3746			goto unlock;
3747	}
3748
3749	switch (ev->status) {
 
 
 
 
 
 
 
 
 
 
 
3750	case 0x00:
3751		conn->handle = __le16_to_cpu(ev->handle);
 
 
 
 
 
3752		conn->state  = BT_CONNECTED;
3753		conn->type   = ev->link_type;
3754
3755		hci_debugfs_create_conn(conn);
3756		hci_conn_add_sysfs(conn);
3757		break;
3758
3759	case 0x10:	/* Connection Accept Timeout */
3760	case 0x0d:	/* Connection Rejected due to Limited Resources */
3761	case 0x11:	/* Unsupported Feature or Parameter Value */
3762	case 0x1c:	/* SCO interval rejected */
3763	case 0x1a:	/* Unsupported Remote Feature */
 
3764	case 0x1f:	/* Unspecified error */
3765	case 0x20:	/* Unsupported LMP Parameter value */
3766		if (conn->out) {
3767			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3768					(hdev->esco_type & EDR_ESCO_MASK);
3769			if (hci_setup_sync(conn, conn->link->handle))
3770				goto unlock;
3771		}
3772		/* fall through */
3773
3774	default:
3775		conn->state = BT_CLOSED;
3776		break;
3777	}
3778
3779	hci_connect_cfm(conn, ev->status);
3780	if (ev->status)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3781		hci_conn_del(conn);
3782
3783unlock:
3784	hci_dev_unlock(hdev);
3785}
3786
3787static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3788{
3789	size_t parsed = 0;
3790
3791	while (parsed < eir_len) {
3792		u8 field_len = eir[0];
3793
3794		if (field_len == 0)
3795			return parsed;
3796
3797		parsed += field_len + 1;
3798		eir += field_len + 1;
3799	}
3800
3801	return eir_len;
3802}
3803
3804static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3805					    struct sk_buff *skb)
3806{
 
3807	struct inquiry_data data;
3808	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3809	int num_rsp = *((__u8 *) skb->data);
3810	size_t eir_len;
 
 
 
 
 
3811
3812	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3813
3814	if (!num_rsp)
3815		return;
3816
3817	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3818		return;
3819
3820	hci_dev_lock(hdev);
3821
3822	for (; num_rsp; num_rsp--, info++) {
 
3823		u32 flags;
3824		bool name_known;
3825
3826		bacpy(&data.bdaddr, &info->bdaddr);
3827		data.pscan_rep_mode	= info->pscan_rep_mode;
3828		data.pscan_period_mode	= info->pscan_period_mode;
3829		data.pscan_mode		= 0x00;
3830		memcpy(data.dev_class, info->dev_class, 3);
3831		data.clock_offset	= info->clock_offset;
3832		data.rssi		= info->rssi;
3833		data.ssp_mode		= 0x01;
3834
3835		if (hci_dev_test_flag(hdev, HCI_MGMT))
3836			name_known = eir_get_data(info->data,
3837						  sizeof(info->data),
3838						  EIR_NAME_COMPLETE, NULL);
3839		else
3840			name_known = true;
3841
3842		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3843
3844		eir_len = eir_get_length(info->data, sizeof(info->data));
3845
3846		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3847				  info->dev_class, info->rssi,
3848				  flags, info->data, eir_len, NULL, 0);
3849	}
3850
3851	hci_dev_unlock(hdev);
3852}
3853
3854static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3855					 struct sk_buff *skb)
3856{
3857	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3858	struct hci_conn *conn;
3859
3860	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3861	       __le16_to_cpu(ev->handle));
3862
3863	hci_dev_lock(hdev);
3864
3865	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3866	if (!conn)
3867		goto unlock;
3868
3869	/* For BR/EDR the necessary steps are taken through the
3870	 * auth_complete event.
3871	 */
3872	if (conn->type != LE_LINK)
3873		goto unlock;
3874
3875	if (!ev->status)
3876		conn->sec_level = conn->pending_sec_level;
3877
3878	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3879
3880	if (ev->status && conn->state == BT_CONNECTED) {
3881		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3882		hci_conn_drop(conn);
3883		goto unlock;
3884	}
3885
3886	if (conn->state == BT_CONFIG) {
3887		if (!ev->status)
3888			conn->state = BT_CONNECTED;
3889
3890		hci_connect_cfm(conn, ev->status);
3891		hci_conn_drop(conn);
3892	} else {
3893		hci_auth_cfm(conn, ev->status);
3894
3895		hci_conn_hold(conn);
3896		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3897		hci_conn_drop(conn);
3898	}
3899
3900unlock:
3901	hci_dev_unlock(hdev);
3902}
3903
3904static u8 hci_get_auth_req(struct hci_conn *conn)
3905{
3906	/* If remote requests no-bonding follow that lead */
3907	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3908	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3909		return conn->remote_auth | (conn->auth_type & 0x01);
3910
3911	/* If both remote and local have enough IO capabilities, require
3912	 * MITM protection
3913	 */
3914	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3915	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3916		return conn->remote_auth | 0x01;
3917
3918	/* No MITM protection possible so ignore remote requirement */
3919	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3920}
3921
3922static u8 bredr_oob_data_present(struct hci_conn *conn)
3923{
3924	struct hci_dev *hdev = conn->hdev;
3925	struct oob_data *data;
3926
3927	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3928	if (!data)
3929		return 0x00;
3930
3931	if (bredr_sc_enabled(hdev)) {
3932		/* When Secure Connections is enabled, then just
3933		 * return the present value stored with the OOB
3934		 * data. The stored value contains the right present
3935		 * information. However it can only be trusted when
3936		 * not in Secure Connection Only mode.
3937		 */
3938		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3939			return data->present;
3940
3941		/* When Secure Connections Only mode is enabled, then
3942		 * the P-256 values are required. If they are not
3943		 * available, then do not declare that OOB data is
3944		 * present.
3945		 */
3946		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3947		    !memcmp(data->hash256, ZERO_KEY, 16))
3948			return 0x00;
3949
3950		return 0x02;
3951	}
3952
3953	/* When Secure Connections is not enabled or actually
3954	 * not supported by the hardware, then check that if
3955	 * P-192 data values are present.
3956	 */
3957	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3958	    !memcmp(data->hash192, ZERO_KEY, 16))
3959		return 0x00;
3960
3961	return 0x01;
3962}
3963
3964static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3965{
3966	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3967	struct hci_conn *conn;
3968
3969	BT_DBG("%s", hdev->name);
3970
3971	hci_dev_lock(hdev);
3972
3973	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3974	if (!conn)
3975		goto unlock;
3976
 
 
 
3977	hci_conn_hold(conn);
3978
3979	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3980		goto unlock;
3981
3982	/* Allow pairing if we're pairable, the initiators of the
3983	 * pairing or if the remote is not requesting bonding.
3984	 */
3985	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3986	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3987	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3988		struct hci_cp_io_capability_reply cp;
3989
3990		bacpy(&cp.bdaddr, &ev->bdaddr);
3991		/* Change the IO capability from KeyboardDisplay
3992		 * to DisplayYesNo as it is not supported by BT spec. */
3993		cp.capability = (conn->io_capability == 0x04) ?
3994				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3995
3996		/* If we are initiators, there is no remote information yet */
3997		if (conn->remote_auth == 0xff) {
3998			/* Request MITM protection if our IO caps allow it
3999			 * except for the no-bonding case.
4000			 */
4001			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4002			    conn->auth_type != HCI_AT_NO_BONDING)
4003				conn->auth_type |= 0x01;
4004		} else {
4005			conn->auth_type = hci_get_auth_req(conn);
4006		}
4007
4008		/* If we're not bondable, force one of the non-bondable
4009		 * authentication requirement values.
4010		 */
4011		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4012			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4013
4014		cp.authentication = conn->auth_type;
4015		cp.oob_data = bredr_oob_data_present(conn);
4016
4017		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4018			     sizeof(cp), &cp);
4019	} else {
4020		struct hci_cp_io_capability_neg_reply cp;
4021
4022		bacpy(&cp.bdaddr, &ev->bdaddr);
4023		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4024
4025		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4026			     sizeof(cp), &cp);
4027	}
4028
4029unlock:
4030	hci_dev_unlock(hdev);
4031}
4032
4033static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4034{
4035	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4036	struct hci_conn *conn;
4037
4038	BT_DBG("%s", hdev->name);
4039
4040	hci_dev_lock(hdev);
4041
4042	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4043	if (!conn)
4044		goto unlock;
4045
4046	conn->remote_cap = ev->capability;
4047	conn->remote_auth = ev->authentication;
4048
4049unlock:
4050	hci_dev_unlock(hdev);
4051}
4052
4053static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4054					 struct sk_buff *skb)
4055{
4056	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4057	int loc_mitm, rem_mitm, confirm_hint = 0;
4058	struct hci_conn *conn;
4059
4060	BT_DBG("%s", hdev->name);
4061
4062	hci_dev_lock(hdev);
4063
4064	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4065		goto unlock;
4066
4067	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4068	if (!conn)
4069		goto unlock;
4070
4071	loc_mitm = (conn->auth_type & 0x01);
4072	rem_mitm = (conn->remote_auth & 0x01);
4073
4074	/* If we require MITM but the remote device can't provide that
4075	 * (it has NoInputNoOutput) then reject the confirmation
4076	 * request. We check the security level here since it doesn't
4077	 * necessarily match conn->auth_type.
4078	 */
4079	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4080	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4081		BT_DBG("Rejecting request: remote device can't provide MITM");
4082		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4083			     sizeof(ev->bdaddr), &ev->bdaddr);
4084		goto unlock;
4085	}
4086
4087	/* If no side requires MITM protection; auto-accept */
4088	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4089	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4090
4091		/* If we're not the initiators request authorization to
4092		 * proceed from user space (mgmt_user_confirm with
4093		 * confirm_hint set to 1). The exception is if neither
4094		 * side had MITM or if the local IO capability is
4095		 * NoInputNoOutput, in which case we do auto-accept
4096		 */
4097		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4098		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4099		    (loc_mitm || rem_mitm)) {
4100			BT_DBG("Confirming auto-accept as acceptor");
 
 
 
 
 
 
 
 
 
4101			confirm_hint = 1;
4102			goto confirm;
4103		}
4104
4105		BT_DBG("Auto-accept of user confirmation with %ums delay",
4106		       hdev->auto_accept_delay);
4107
4108		if (hdev->auto_accept_delay > 0) {
4109			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4110			queue_delayed_work(conn->hdev->workqueue,
4111					   &conn->auto_accept_work, delay);
4112			goto unlock;
4113		}
4114
4115		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4116			     sizeof(ev->bdaddr), &ev->bdaddr);
4117		goto unlock;
4118	}
4119
4120confirm:
4121	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4122				  le32_to_cpu(ev->passkey), confirm_hint);
4123
4124unlock:
4125	hci_dev_unlock(hdev);
4126}
4127
4128static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4129					 struct sk_buff *skb)
4130{
4131	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4132
4133	BT_DBG("%s", hdev->name);
4134
4135	if (hci_dev_test_flag(hdev, HCI_MGMT))
4136		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4137}
4138
4139static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4140					struct sk_buff *skb)
4141{
4142	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4143	struct hci_conn *conn;
4144
4145	BT_DBG("%s", hdev->name);
4146
4147	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4148	if (!conn)
4149		return;
4150
4151	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4152	conn->passkey_entered = 0;
4153
4154	if (hci_dev_test_flag(hdev, HCI_MGMT))
4155		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4156					 conn->dst_type, conn->passkey_notify,
4157					 conn->passkey_entered);
4158}
4159
4160static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4161{
4162	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4163	struct hci_conn *conn;
4164
4165	BT_DBG("%s", hdev->name);
4166
4167	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4168	if (!conn)
4169		return;
4170
4171	switch (ev->type) {
4172	case HCI_KEYPRESS_STARTED:
4173		conn->passkey_entered = 0;
4174		return;
4175
4176	case HCI_KEYPRESS_ENTERED:
4177		conn->passkey_entered++;
4178		break;
4179
4180	case HCI_KEYPRESS_ERASED:
4181		conn->passkey_entered--;
4182		break;
4183
4184	case HCI_KEYPRESS_CLEARED:
4185		conn->passkey_entered = 0;
4186		break;
4187
4188	case HCI_KEYPRESS_COMPLETED:
4189		return;
4190	}
4191
4192	if (hci_dev_test_flag(hdev, HCI_MGMT))
4193		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4194					 conn->dst_type, conn->passkey_notify,
4195					 conn->passkey_entered);
4196}
4197
4198static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4199					 struct sk_buff *skb)
4200{
4201	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4202	struct hci_conn *conn;
4203
4204	BT_DBG("%s", hdev->name);
4205
4206	hci_dev_lock(hdev);
4207
4208	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4209	if (!conn)
4210		goto unlock;
4211
4212	/* Reset the authentication requirement to unknown */
4213	conn->remote_auth = 0xff;
4214
4215	/* To avoid duplicate auth_failed events to user space we check
4216	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4217	 * initiated the authentication. A traditional auth_complete
4218	 * event gets always produced as initiator and is also mapped to
4219	 * the mgmt_auth_failed event */
4220	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4221		mgmt_auth_failed(conn, ev->status);
4222
4223	hci_conn_drop(conn);
4224
4225unlock:
4226	hci_dev_unlock(hdev);
4227}
4228
4229static void hci_remote_host_features_evt(struct hci_dev *hdev,
4230					 struct sk_buff *skb)
4231{
4232	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4233	struct inquiry_entry *ie;
4234	struct hci_conn *conn;
4235
4236	BT_DBG("%s", hdev->name);
4237
4238	hci_dev_lock(hdev);
4239
4240	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4241	if (conn)
4242		memcpy(conn->features[1], ev->features, 8);
4243
4244	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4245	if (ie)
4246		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4247
4248	hci_dev_unlock(hdev);
4249}
4250
4251static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4252					    struct sk_buff *skb)
4253{
4254	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4255	struct oob_data *data;
4256
4257	BT_DBG("%s", hdev->name);
4258
4259	hci_dev_lock(hdev);
4260
4261	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4262		goto unlock;
4263
4264	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4265	if (!data) {
4266		struct hci_cp_remote_oob_data_neg_reply cp;
4267
4268		bacpy(&cp.bdaddr, &ev->bdaddr);
4269		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4270			     sizeof(cp), &cp);
4271		goto unlock;
4272	}
4273
4274	if (bredr_sc_enabled(hdev)) {
4275		struct hci_cp_remote_oob_ext_data_reply cp;
4276
4277		bacpy(&cp.bdaddr, &ev->bdaddr);
4278		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4279			memset(cp.hash192, 0, sizeof(cp.hash192));
4280			memset(cp.rand192, 0, sizeof(cp.rand192));
4281		} else {
4282			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4283			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4284		}
4285		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4286		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4287
4288		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4289			     sizeof(cp), &cp);
4290	} else {
4291		struct hci_cp_remote_oob_data_reply cp;
4292
4293		bacpy(&cp.bdaddr, &ev->bdaddr);
4294		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4295		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4296
4297		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4298			     sizeof(cp), &cp);
4299	}
4300
4301unlock:
4302	hci_dev_unlock(hdev);
4303}
4304
4305#if IS_ENABLED(CONFIG_BT_HS)
4306static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4307{
4308	struct hci_ev_channel_selected *ev = (void *)skb->data;
4309	struct hci_conn *hcon;
4310
4311	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4312
4313	skb_pull(skb, sizeof(*ev));
4314
4315	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4316	if (!hcon)
4317		return;
4318
4319	amp_read_loc_assoc_final_data(hdev, hcon);
4320}
4321
4322static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4323				      struct sk_buff *skb)
4324{
4325	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4326	struct hci_conn *hcon, *bredr_hcon;
4327
4328	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4329	       ev->status);
4330
4331	hci_dev_lock(hdev);
4332
4333	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4334	if (!hcon) {
4335		hci_dev_unlock(hdev);
4336		return;
4337	}
4338
4339	if (ev->status) {
4340		hci_conn_del(hcon);
4341		hci_dev_unlock(hdev);
4342		return;
4343	}
4344
4345	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4346
4347	hcon->state = BT_CONNECTED;
4348	bacpy(&hcon->dst, &bredr_hcon->dst);
4349
4350	hci_conn_hold(hcon);
4351	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4352	hci_conn_drop(hcon);
4353
4354	hci_debugfs_create_conn(hcon);
4355	hci_conn_add_sysfs(hcon);
4356
4357	amp_physical_cfm(bredr_hcon, hcon);
4358
4359	hci_dev_unlock(hdev);
4360}
4361
4362static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4363{
4364	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4365	struct hci_conn *hcon;
4366	struct hci_chan *hchan;
4367	struct amp_mgr *mgr;
4368
4369	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4370	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4371	       ev->status);
4372
4373	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4374	if (!hcon)
4375		return;
4376
4377	/* Create AMP hchan */
4378	hchan = hci_chan_create(hcon);
4379	if (!hchan)
4380		return;
4381
4382	hchan->handle = le16_to_cpu(ev->handle);
4383
4384	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4385
4386	mgr = hcon->amp_mgr;
4387	if (mgr && mgr->bredr_chan) {
4388		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4389
4390		l2cap_chan_lock(bredr_chan);
4391
4392		bredr_chan->conn->mtu = hdev->block_mtu;
4393		l2cap_logical_cfm(bredr_chan, hchan, 0);
4394		hci_conn_hold(hcon);
4395
4396		l2cap_chan_unlock(bredr_chan);
4397	}
4398}
4399
4400static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4401					     struct sk_buff *skb)
4402{
4403	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4404	struct hci_chan *hchan;
4405
4406	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4407	       le16_to_cpu(ev->handle), ev->status);
4408
4409	if (ev->status)
4410		return;
4411
4412	hci_dev_lock(hdev);
4413
4414	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4415	if (!hchan)
4416		goto unlock;
4417
4418	amp_destroy_logical_link(hchan, ev->reason);
4419
4420unlock:
4421	hci_dev_unlock(hdev);
4422}
4423
4424static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4425					     struct sk_buff *skb)
4426{
4427	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4428	struct hci_conn *hcon;
4429
4430	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4431
4432	if (ev->status)
4433		return;
4434
4435	hci_dev_lock(hdev);
 
4436
4437	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4438	if (hcon) {
4439		hcon->state = BT_CLOSED;
4440		hci_conn_del(hcon);
 
 
 
4441	}
4442
4443	hci_dev_unlock(hdev);
4444}
4445#endif
4446
4447static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
 
 
 
4448{
4449	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4450	struct hci_conn_params *params;
4451	struct hci_conn *conn;
4452	struct smp_irk *irk;
4453	u8 addr_type;
4454
4455	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4456
4457	hci_dev_lock(hdev);
4458
4459	/* All controllers implicitly stop advertising in the event of a
4460	 * connection, so ensure that the state bit is cleared.
4461	 */
4462	hci_dev_clear_flag(hdev, HCI_LE_ADV);
4463
4464	conn = hci_lookup_le_connect(hdev);
4465	if (!conn) {
4466		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4467		if (!conn) {
4468			BT_ERR("No memory for new connection");
 
 
 
 
 
 
4469			goto unlock;
4470		}
4471
4472		conn->dst_type = ev->bdaddr_type;
4473
4474		/* If we didn't have a hci_conn object previously
4475		 * but we're in master role this must be something
4476		 * initiated using a white list. Since white list based
4477		 * connections are not "first class citizens" we don't
4478		 * have full tracking of them. Therefore, we go ahead
4479		 * with a "best effort" approach of determining the
4480		 * initiator address based on the HCI_PRIVACY flag.
4481		 */
4482		if (conn->out) {
4483			conn->resp_addr_type = ev->bdaddr_type;
4484			bacpy(&conn->resp_addr, &ev->bdaddr);
4485			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4486				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4487				bacpy(&conn->init_addr, &hdev->rpa);
4488			} else {
4489				hci_copy_identity_address(hdev,
4490							  &conn->init_addr,
4491							  &conn->init_addr_type);
4492			}
4493		}
4494	} else {
4495		cancel_delayed_work(&conn->le_conn_timeout);
4496	}
4497
4498	if (!conn->out) {
4499		/* Set the responder (our side) address type based on
4500		 * the advertising address type.
4501		 */
4502		conn->resp_addr_type = hdev->adv_addr_type;
4503		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4504			bacpy(&conn->resp_addr, &hdev->random_addr);
4505		else
4506			bacpy(&conn->resp_addr, &hdev->bdaddr);
4507
4508		conn->init_addr_type = ev->bdaddr_type;
4509		bacpy(&conn->init_addr, &ev->bdaddr);
4510
4511		/* For incoming connections, set the default minimum
4512		 * and maximum connection interval. They will be used
4513		 * to check if the parameters are in range and if not
4514		 * trigger the connection update procedure.
4515		 */
4516		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4517		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4518	}
4519
 
 
4520	/* Lookup the identity address from the stored connection
4521	 * address and address type.
4522	 *
4523	 * When establishing connections to an identity address, the
4524	 * connection procedure will store the resolvable random
4525	 * address first. Now if it can be converted back into the
4526	 * identity address, start using the identity address from
4527	 * now on.
4528	 */
4529	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4530	if (irk) {
4531		bacpy(&conn->dst, &irk->bdaddr);
4532		conn->dst_type = irk->addr_type;
4533	}
4534
4535	if (ev->status) {
4536		hci_le_conn_failed(conn, ev->status);
 
 
 
 
 
 
 
 
 
 
4537		goto unlock;
4538	}
4539
4540	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4541		addr_type = BDADDR_LE_PUBLIC;
4542	else
4543		addr_type = BDADDR_LE_RANDOM;
4544
4545	/* Drop the connection if the device is blocked */
4546	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4547		hci_conn_drop(conn);
4548		goto unlock;
4549	}
4550
4551	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4552		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4553
4554	conn->sec_level = BT_SECURITY_LOW;
4555	conn->handle = __le16_to_cpu(ev->handle);
4556	conn->state = BT_CONFIG;
4557
4558	conn->le_conn_interval = le16_to_cpu(ev->interval);
4559	conn->le_conn_latency = le16_to_cpu(ev->latency);
4560	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
 
 
 
 
 
 
 
4561
4562	hci_debugfs_create_conn(conn);
4563	hci_conn_add_sysfs(conn);
4564
4565	if (!ev->status) {
4566		/* The remote features procedure is defined for master
4567		 * role only. So only in case of an initiated connection
4568		 * request the remote features.
4569		 *
4570		 * If the local controller supports slave-initiated features
4571		 * exchange, then requesting the remote features in slave
4572		 * role is possible. Otherwise just transition into the
4573		 * connected state without requesting the remote features.
4574		 */
4575		if (conn->out ||
4576		    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4577			struct hci_cp_le_read_remote_features cp;
4578
4579			cp.handle = __cpu_to_le16(conn->handle);
4580
4581			hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4582				     sizeof(cp), &cp);
4583
4584			hci_conn_hold(conn);
4585		} else {
4586			conn->state = BT_CONNECTED;
4587			hci_connect_cfm(conn, ev->status);
4588		}
4589	} else {
4590		hci_connect_cfm(conn, ev->status);
 
4591	}
4592
4593	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4594					   conn->dst_type);
4595	if (params) {
4596		list_del_init(&params->action);
4597		if (params->conn) {
4598			hci_conn_drop(params->conn);
4599			hci_conn_put(params->conn);
4600			params->conn = NULL;
4601		}
4602	}
4603
4604unlock:
4605	hci_update_background_scan(hdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4606	hci_dev_unlock(hdev);
4607}
4608
4609static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4610					    struct sk_buff *skb)
4611{
4612	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4613	struct hci_conn *conn;
4614
4615	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4616
4617	if (ev->status)
4618		return;
4619
4620	hci_dev_lock(hdev);
4621
4622	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4623	if (conn) {
4624		conn->le_conn_interval = le16_to_cpu(ev->interval);
4625		conn->le_conn_latency = le16_to_cpu(ev->latency);
4626		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4627	}
4628
4629	hci_dev_unlock(hdev);
4630}
4631
4632/* This function requires the caller holds hdev->lock */
4633static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4634					      bdaddr_t *addr,
4635					      u8 addr_type, u8 adv_type)
 
4636{
4637	struct hci_conn *conn;
4638	struct hci_conn_params *params;
4639
4640	/* If the event is not connectable don't proceed further */
4641	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4642		return NULL;
4643
4644	/* Ignore if the device is blocked */
4645	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
 
4646		return NULL;
4647
4648	/* Most controller will fail if we try to create new connections
4649	 * while we have an existing one in slave role.
4650	 */
4651	if (hdev->conn_hash.le_num_slave > 0)
 
 
4652		return NULL;
4653
4654	/* If we're not connectable only connect devices that we have in
4655	 * our pend_le_conns list.
4656	 */
4657	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4658					   addr_type);
4659	if (!params)
4660		return NULL;
4661
4662	if (!params->explicit_connect) {
4663		switch (params->auto_connect) {
4664		case HCI_AUTO_CONN_DIRECT:
4665			/* Only devices advertising with ADV_DIRECT_IND are
4666			 * triggering a connection attempt. This is allowing
4667			 * incoming connections from slave devices.
4668			 */
4669			if (adv_type != LE_ADV_DIRECT_IND)
4670				return NULL;
4671			break;
4672		case HCI_AUTO_CONN_ALWAYS:
4673			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
4674			 * are triggering a connection attempt. This means
4675			 * that incoming connectioms from slave device are
4676			 * accepted and also outgoing connections to slave
4677			 * devices are established when found.
4678			 */
4679			break;
4680		default:
4681			return NULL;
4682		}
4683	}
4684
4685	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4686			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
 
4687	if (!IS_ERR(conn)) {
4688		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4689		 * by higher layer that tried to connect, if no then
4690		 * store the pointer since we don't really have any
4691		 * other owner of the object besides the params that
4692		 * triggered it. This way we can abort the connection if
4693		 * the parameters get removed and keep the reference
4694		 * count consistent once the connection is established.
4695		 */
4696
4697		if (!params->explicit_connect)
4698			params->conn = hci_conn_get(conn);
4699
4700		return conn;
4701	}
4702
4703	switch (PTR_ERR(conn)) {
4704	case -EBUSY:
4705		/* If hci_connect() returns -EBUSY it means there is already
4706		 * an LE connection attempt going on. Since controllers don't
4707		 * support more than one connection attempt at the time, we
4708		 * don't consider this an error case.
4709		 */
4710		break;
4711	default:
4712		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4713		return NULL;
4714	}
4715
4716	return NULL;
4717}
4718
4719static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4720			       u8 bdaddr_type, bdaddr_t *direct_addr,
4721			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
 
 
4722{
4723	struct discovery_state *d = &hdev->discovery;
4724	struct smp_irk *irk;
4725	struct hci_conn *conn;
4726	bool match;
4727	u32 flags;
4728	u8 *ptr, real_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4729
4730	/* Find the end of the data in case the report contains padded zero
4731	 * bytes at the end causing an invalid length value.
4732	 *
4733	 * When data is NULL, len is 0 so there is no need for extra ptr
4734	 * check as 'ptr < data + 0' is already false in such case.
4735	 */
4736	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
4737		if (ptr + 1 + *ptr > data + len)
4738			break;
4739	}
4740
4741	real_len = ptr - data;
4742
4743	/* Adjust for actual length */
4744	if (len != real_len) {
4745		BT_ERR_RATELIMITED("%s advertising data length corrected",
4746				   hdev->name);
4747		len = real_len;
4748	}
4749
4750	/* If the direct address is present, then this report is from
4751	 * a LE Direct Advertising Report event. In that case it is
4752	 * important to see if the address is matching the local
4753	 * controller address.
4754	 */
4755	if (direct_addr) {
 
 
 
4756		/* Only resolvable random addresses are valid for these
4757		 * kind of reports and others can be ignored.
4758		 */
4759		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4760			return;
4761
4762		/* If the controller is not using resolvable random
4763		 * addresses, then this report can be ignored.
4764		 */
4765		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4766			return;
4767
4768		/* If the local IRK of the controller does not match
4769		 * with the resolvable random address provided, then
4770		 * this report can be ignored.
4771		 */
4772		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4773			return;
4774	}
4775
4776	/* Check if we need to convert to identity address */
4777	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4778	if (irk) {
4779		bdaddr = &irk->bdaddr;
4780		bdaddr_type = irk->addr_type;
4781	}
4782
4783	/* Check if we have been requested to connect to this device */
4784	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4785	if (conn && type == LE_ADV_IND) {
 
 
 
 
 
 
 
 
4786		/* Store report for later inclusion by
4787		 * mgmt_device_connected
4788		 */
4789		memcpy(conn->le_adv_data, data, len);
4790		conn->le_adv_data_len = len;
4791	}
4792
 
 
 
 
 
 
 
 
 
 
 
 
4793	/* Passive scanning shouldn't trigger any device found events,
4794	 * except for devices marked as CONN_REPORT for which we do send
4795	 * device found events.
4796	 */
4797	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4798		if (type == LE_ADV_DIRECT_IND)
4799			return;
4800
4801		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4802					       bdaddr, bdaddr_type))
 
4803			return;
4804
4805		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4806			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4807		else
4808			flags = 0;
4809		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4810				  rssi, flags, data, len, NULL, 0);
4811		return;
4812	}
4813
4814	/* When receiving non-connectable or scannable undirected
4815	 * advertising reports, this means that the remote device is
4816	 * not connectable and then clearly indicate this in the
4817	 * device found event.
4818	 *
4819	 * When receiving a scan response, then there is no way to
4820	 * know if the remote device is connectable or not. However
4821	 * since scan responses are merged with a previously seen
4822	 * advertising report, the flags field from that report
4823	 * will be used.
4824	 *
4825	 * In the really unlikely case that a controller get confused
4826	 * and just sends a scan response event, then it is marked as
4827	 * not connectable as well.
4828	 */
4829	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4830	    type == LE_ADV_SCAN_RSP)
4831		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4832	else
4833		flags = 0;
4834
4835	/* If there's nothing pending either store the data from this
4836	 * event or send an immediate device found event if the data
4837	 * should not be stored for later.
4838	 */
4839	if (!has_pending_adv_report(hdev)) {
4840		/* If the report will trigger a SCAN_REQ store it for
4841		 * later merging.
4842		 */
4843		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4844			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4845						 rssi, flags, data, len);
4846			return;
4847		}
4848
4849		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4850				  rssi, flags, data, len, NULL, 0);
4851		return;
4852	}
4853
4854	/* Check if the pending report is for the same device as the new one */
4855	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4856		 bdaddr_type == d->last_adv_addr_type);
4857
4858	/* If the pending data doesn't match this report or this isn't a
4859	 * scan response (e.g. we got a duplicate ADV_IND) then force
4860	 * sending of the pending data.
4861	 */
4862	if (type != LE_ADV_SCAN_RSP || !match) {
4863		/* Send out whatever is in the cache, but skip duplicates */
4864		if (!match)
4865			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4866					  d->last_adv_addr_type, NULL,
4867					  d->last_adv_rssi, d->last_adv_flags,
4868					  d->last_adv_data,
4869					  d->last_adv_data_len, NULL, 0);
4870
4871		/* If the new report will trigger a SCAN_REQ store it for
4872		 * later merging.
4873		 */
4874		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
 
4875			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4876						 rssi, flags, data, len);
4877			return;
4878		}
4879
4880		/* The advertising reports cannot be merged, so clear
4881		 * the pending report and send out a device found event.
4882		 */
4883		clear_pending_adv_report(hdev);
4884		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4885				  rssi, flags, data, len, NULL, 0);
4886		return;
4887	}
4888
4889	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4890	 * the new event is a SCAN_RSP. We can therefore proceed with
4891	 * sending a merged device found event.
4892	 */
4893	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4894			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4895			  d->last_adv_data, d->last_adv_data_len, data, len);
4896	clear_pending_adv_report(hdev);
4897}
4898
4899static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4900{
4901	u8 num_reports = skb->data[0];
4902	void *ptr = &skb->data[1];
 
 
 
4903
4904	hci_dev_lock(hdev);
4905
4906	while (num_reports--) {
4907		struct hci_ev_le_advertising_info *ev = ptr;
4908		s8 rssi;
4909
4910		rssi = ev->data[ev->length];
4911		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4912				   ev->bdaddr_type, NULL, 0, rssi,
4913				   ev->data, ev->length);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4914
4915		ptr += sizeof(*ev) + ev->length + 1;
 
4916	}
4917
 
 
 
 
4918	hci_dev_unlock(hdev);
4919}
4920
4921static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4922					    struct sk_buff *skb)
4923{
4924	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4925	struct hci_conn *conn;
4926
4927	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4928
4929	hci_dev_lock(hdev);
4930
4931	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4932	if (conn) {
4933		if (!ev->status)
4934			memcpy(conn->features[0], ev->features, 8);
4935
4936		if (conn->state == BT_CONFIG) {
4937			__u8 status;
4938
4939			/* If the local controller supports slave-initiated
4940			 * features exchange, but the remote controller does
4941			 * not, then it is possible that the error code 0x1a
4942			 * for unsupported remote feature gets returned.
4943			 *
4944			 * In this specific case, allow the connection to
4945			 * transition into connected state and mark it as
4946			 * successful.
4947			 */
4948			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4949			    !conn->out && ev->status == 0x1a)
4950				status = 0x00;
4951			else
4952				status = ev->status;
4953
4954			conn->state = BT_CONNECTED;
4955			hci_connect_cfm(conn, status);
4956			hci_conn_drop(conn);
4957		}
4958	}
4959
4960	hci_dev_unlock(hdev);
4961}
4962
4963static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4964{
4965	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4966	struct hci_cp_le_ltk_reply cp;
4967	struct hci_cp_le_ltk_neg_reply neg;
4968	struct hci_conn *conn;
4969	struct smp_ltk *ltk;
4970
4971	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4972
4973	hci_dev_lock(hdev);
4974
4975	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4976	if (conn == NULL)
4977		goto not_found;
4978
4979	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4980	if (!ltk)
4981		goto not_found;
4982
4983	if (smp_ltk_is_sc(ltk)) {
4984		/* With SC both EDiv and Rand are set to zero */
4985		if (ev->ediv || ev->rand)
4986			goto not_found;
4987	} else {
4988		/* For non-SC keys check that EDiv and Rand match */
4989		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4990			goto not_found;
4991	}
4992
4993	memcpy(cp.ltk, ltk->val, ltk->enc_size);
4994	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
4995	cp.handle = cpu_to_le16(conn->handle);
4996
4997	conn->pending_sec_level = smp_ltk_sec_level(ltk);
4998
4999	conn->enc_key_size = ltk->enc_size;
5000
5001	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5002
5003	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5004	 * temporary key used to encrypt a connection following
5005	 * pairing. It is used during the Encrypted Session Setup to
5006	 * distribute the keys. Later, security can be re-established
5007	 * using a distributed LTK.
5008	 */
5009	if (ltk->type == SMP_STK) {
5010		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5011		list_del_rcu(&ltk->list);
5012		kfree_rcu(ltk, rcu);
5013	} else {
5014		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5015	}
5016
5017	hci_dev_unlock(hdev);
5018
5019	return;
5020
5021not_found:
5022	neg.handle = ev->handle;
5023	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5024	hci_dev_unlock(hdev);
5025}
5026
5027static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5028				      u8 reason)
5029{
5030	struct hci_cp_le_conn_param_req_neg_reply cp;
5031
5032	cp.handle = cpu_to_le16(handle);
5033	cp.reason = reason;
5034
5035	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5036		     &cp);
5037}
5038
5039static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5040					     struct sk_buff *skb)
5041{
5042	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5043	struct hci_cp_le_conn_param_req_reply cp;
5044	struct hci_conn *hcon;
5045	u16 handle, min, max, latency, timeout;
5046
 
 
5047	handle = le16_to_cpu(ev->handle);
5048	min = le16_to_cpu(ev->interval_min);
5049	max = le16_to_cpu(ev->interval_max);
5050	latency = le16_to_cpu(ev->latency);
5051	timeout = le16_to_cpu(ev->timeout);
5052
5053	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5054	if (!hcon || hcon->state != BT_CONNECTED)
5055		return send_conn_param_neg_reply(hdev, handle,
5056						 HCI_ERROR_UNKNOWN_CONN_ID);
5057
 
 
 
 
5058	if (hci_check_conn_params(min, max, latency, timeout))
5059		return send_conn_param_neg_reply(hdev, handle,
5060						 HCI_ERROR_INVALID_LL_PARAMS);
5061
5062	if (hcon->role == HCI_ROLE_MASTER) {
5063		struct hci_conn_params *params;
5064		u8 store_hint;
5065
5066		hci_dev_lock(hdev);
5067
5068		params = hci_conn_params_lookup(hdev, &hcon->dst,
5069						hcon->dst_type);
5070		if (params) {
5071			params->conn_min_interval = min;
5072			params->conn_max_interval = max;
5073			params->conn_latency = latency;
5074			params->supervision_timeout = timeout;
5075			store_hint = 0x01;
5076		} else{
5077			store_hint = 0x00;
5078		}
5079
5080		hci_dev_unlock(hdev);
5081
5082		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5083				    store_hint, min, max, latency, timeout);
5084	}
5085
5086	cp.handle = ev->handle;
5087	cp.interval_min = ev->interval_min;
5088	cp.interval_max = ev->interval_max;
5089	cp.latency = ev->latency;
5090	cp.timeout = ev->timeout;
5091	cp.min_ce_len = 0;
5092	cp.max_ce_len = 0;
5093
5094	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5095}
5096
5097static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5098					 struct sk_buff *skb)
5099{
5100	u8 num_reports = skb->data[0];
5101	void *ptr = &skb->data[1];
 
5102
5103	hci_dev_lock(hdev);
 
 
 
 
 
5104
5105	while (num_reports--) {
5106		struct hci_ev_le_direct_adv_info *ev = ptr;
5107
5108		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5109				   ev->bdaddr_type, &ev->direct_addr,
5110				   ev->direct_addr_type, ev->rssi, NULL, 0);
5111
5112		ptr += sizeof(*ev);
 
 
 
5113	}
5114
5115	hci_dev_unlock(hdev);
5116}
5117
5118static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
5119{
5120	struct hci_ev_le_meta *le_ev = (void *) skb->data;
 
5121
5122	skb_pull(skb, sizeof(*le_ev));
5123
5124	switch (le_ev->subevent) {
5125	case HCI_EV_LE_CONN_COMPLETE:
5126		hci_le_conn_complete_evt(hdev, skb);
5127		break;
5128
5129	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5130		hci_le_conn_update_complete_evt(hdev, skb);
5131		break;
5132
5133	case HCI_EV_LE_ADVERTISING_REPORT:
5134		hci_le_adv_report_evt(hdev, skb);
5135		break;
5136
5137	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5138		hci_le_remote_feat_complete_evt(hdev, skb);
5139		break;
5140
5141	case HCI_EV_LE_LTK_REQ:
5142		hci_le_ltk_request_evt(hdev, skb);
5143		break;
5144
5145	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5146		hci_le_remote_conn_param_req_evt(hdev, skb);
5147		break;
 
 
 
 
 
 
5148
5149	case HCI_EV_LE_DIRECT_ADV_REPORT:
5150		hci_le_direct_adv_report_evt(hdev, skb);
5151		break;
5152
5153	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5154		break;
5155	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5156}
5157
5158static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5159				 u8 event, struct sk_buff *skb)
5160{
5161	struct hci_ev_cmd_complete *ev;
5162	struct hci_event_hdr *hdr;
5163
5164	if (!skb)
5165		return false;
 
 
 
5166
5167	if (skb->len < sizeof(*hdr)) {
5168		BT_ERR("Too short HCI event");
5169		return false;
5170	}
5171
5172	hdr = (void *) skb->data;
5173	skb_pull(skb, HCI_EVENT_HDR_SIZE);
 
 
5174
5175	if (event) {
5176		if (hdr->evt != event)
5177			return false;
5178		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5179	}
5180
5181	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5182		BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5183		return false;
 
 
 
 
 
5184	}
5185
5186	if (skb->len < sizeof(*ev)) {
5187		BT_ERR("Too short cmd_complete event");
5188		return false;
 
 
 
 
 
5189	}
5190
5191	ev = (void *) skb->data;
5192	skb_pull(skb, sizeof(*ev));
 
5193
5194	if (opcode != __le16_to_cpu(ev->opcode)) {
5195		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5196		       __le16_to_cpu(ev->opcode));
5197		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5198	}
5199
5200	return true;
 
 
 
 
 
 
 
 
 
5201}
5202
5203void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
 
5204{
5205	struct hci_event_hdr *hdr = (void *) skb->data;
5206	hci_req_complete_t req_complete = NULL;
5207	hci_req_complete_skb_t req_complete_skb = NULL;
5208	struct sk_buff *orig_skb = NULL;
5209	u8 status = 0, event = hdr->evt, req_evt = 0;
5210	u16 opcode = HCI_OP_NOP;
5211
5212	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5213		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5214		opcode = __le16_to_cpu(cmd_hdr->opcode);
5215		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5216				     &req_complete_skb);
5217		req_evt = event;
 
 
 
 
 
 
 
 
 
5218	}
5219
5220	/* If it looks like we might end up having to call
5221	 * req_complete_skb, store a pristine copy of the skb since the
5222	 * various handlers may modify the original one through
5223	 * skb_pull() calls, etc.
5224	 */
5225	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5226	    event == HCI_EV_CMD_COMPLETE)
5227		orig_skb = skb_clone(skb, GFP_KERNEL);
5228
5229	skb_pull(skb, HCI_EVENT_HDR_SIZE);
 
5230
5231	switch (event) {
5232	case HCI_EV_INQUIRY_COMPLETE:
5233		hci_inquiry_complete_evt(hdev, skb);
5234		break;
5235
5236	case HCI_EV_INQUIRY_RESULT:
5237		hci_inquiry_result_evt(hdev, skb);
5238		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5239
5240	case HCI_EV_CONN_COMPLETE:
5241		hci_conn_complete_evt(hdev, skb);
5242		break;
 
 
5243
5244	case HCI_EV_CONN_REQUEST:
5245		hci_conn_request_evt(hdev, skb);
5246		break;
 
 
 
5247
5248	case HCI_EV_DISCONN_COMPLETE:
5249		hci_disconn_complete_evt(hdev, skb);
5250		break;
5251
5252	case HCI_EV_AUTH_COMPLETE:
5253		hci_auth_complete_evt(hdev, skb);
5254		break;
5255
5256	case HCI_EV_REMOTE_NAME:
5257		hci_remote_name_evt(hdev, skb);
5258		break;
5259
5260	case HCI_EV_ENCRYPT_CHANGE:
5261		hci_encrypt_change_evt(hdev, skb);
5262		break;
5263
5264	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5265		hci_change_link_key_complete_evt(hdev, skb);
5266		break;
 
 
 
 
5267
5268	case HCI_EV_REMOTE_FEATURES:
5269		hci_remote_features_evt(hdev, skb);
5270		break;
5271
5272	case HCI_EV_CMD_COMPLETE:
5273		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5274				     &req_complete, &req_complete_skb);
5275		break;
5276
5277	case HCI_EV_CMD_STATUS:
5278		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5279				   &req_complete_skb);
5280		break;
5281
5282	case HCI_EV_HARDWARE_ERROR:
5283		hci_hardware_error_evt(hdev, skb);
5284		break;
5285
5286	case HCI_EV_ROLE_CHANGE:
5287		hci_role_change_evt(hdev, skb);
5288		break;
5289
5290	case HCI_EV_NUM_COMP_PKTS:
5291		hci_num_comp_pkts_evt(hdev, skb);
5292		break;
5293
5294	case HCI_EV_MODE_CHANGE:
5295		hci_mode_change_evt(hdev, skb);
5296		break;
5297
5298	case HCI_EV_PIN_CODE_REQ:
5299		hci_pin_code_request_evt(hdev, skb);
5300		break;
5301
5302	case HCI_EV_LINK_KEY_REQ:
5303		hci_link_key_request_evt(hdev, skb);
5304		break;
5305
5306	case HCI_EV_LINK_KEY_NOTIFY:
5307		hci_link_key_notify_evt(hdev, skb);
5308		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5309
5310	case HCI_EV_CLOCK_OFFSET:
5311		hci_clock_offset_evt(hdev, skb);
5312		break;
5313
5314	case HCI_EV_PKT_TYPE_CHANGE:
5315		hci_pkt_type_change_evt(hdev, skb);
5316		break;
 
 
5317
5318	case HCI_EV_PSCAN_REP_MODE:
5319		hci_pscan_rep_mode_evt(hdev, skb);
5320		break;
 
 
 
 
 
 
 
5321
5322	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5323		hci_inquiry_result_with_rssi_evt(hdev, skb);
5324		break;
5325
5326	case HCI_EV_REMOTE_EXT_FEATURES:
5327		hci_remote_ext_features_evt(hdev, skb);
5328		break;
 
 
5329
5330	case HCI_EV_SYNC_CONN_COMPLETE:
5331		hci_sync_conn_complete_evt(hdev, skb);
5332		break;
5333
5334	case HCI_EV_EXTENDED_INQUIRY_RESULT:
5335		hci_extended_inquiry_result_evt(hdev, skb);
5336		break;
5337
5338	case HCI_EV_KEY_REFRESH_COMPLETE:
5339		hci_key_refresh_complete_evt(hdev, skb);
5340		break;
 
 
5341
5342	case HCI_EV_IO_CAPA_REQUEST:
5343		hci_io_capa_request_evt(hdev, skb);
5344		break;
 
 
5345
5346	case HCI_EV_IO_CAPA_REPLY:
5347		hci_io_capa_reply_evt(hdev, skb);
5348		break;
 
 
5349
5350	case HCI_EV_USER_CONFIRM_REQUEST:
5351		hci_user_confirm_request_evt(hdev, skb);
5352		break;
5353
5354	case HCI_EV_USER_PASSKEY_REQUEST:
5355		hci_user_passkey_request_evt(hdev, skb);
5356		break;
 
 
5357
5358	case HCI_EV_USER_PASSKEY_NOTIFY:
5359		hci_user_passkey_notify_evt(hdev, skb);
5360		break;
5361
5362	case HCI_EV_KEYPRESS_NOTIFY:
5363		hci_keypress_notify_evt(hdev, skb);
5364		break;
 
 
 
 
 
5365
5366	case HCI_EV_SIMPLE_PAIR_COMPLETE:
5367		hci_simple_pair_complete_evt(hdev, skb);
5368		break;
5369
5370	case HCI_EV_REMOTE_HOST_FEATURES:
5371		hci_remote_host_features_evt(hdev, skb);
5372		break;
 
 
5373
5374	case HCI_EV_LE_META:
5375		hci_le_meta_evt(hdev, skb);
5376		break;
 
5377
5378	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5379		hci_remote_oob_data_request_evt(hdev, skb);
5380		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5381
5382#if IS_ENABLED(CONFIG_BT_HS)
5383	case HCI_EV_CHANNEL_SELECTED:
5384		hci_chan_selected_evt(hdev, skb);
5385		break;
5386
5387	case HCI_EV_PHY_LINK_COMPLETE:
5388		hci_phy_link_complete_evt(hdev, skb);
5389		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5390
5391	case HCI_EV_LOGICAL_LINK_COMPLETE:
5392		hci_loglink_complete_evt(hdev, skb);
5393		break;
5394
5395	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5396		hci_disconn_loglink_complete_evt(hdev, skb);
5397		break;
 
 
 
5398
5399	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5400		hci_disconn_phylink_complete_evt(hdev, skb);
5401		break;
5402#endif
 
 
 
 
5403
5404	case HCI_EV_NUM_COMP_BLOCKS:
5405		hci_num_comp_blocks_evt(hdev, skb);
5406		break;
 
5407
5408	default:
5409		BT_DBG("%s event 0x%2.2x", hdev->name, event);
5410		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5411	}
5412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5413	if (req_complete) {
5414		req_complete(hdev, status, opcode);
5415	} else if (req_complete_skb) {
5416		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5417			kfree_skb(orig_skb);
5418			orig_skb = NULL;
5419		}
5420		req_complete_skb(hdev, status, opcode, orig_skb);
5421	}
5422
 
5423	kfree_skb(orig_skb);
5424	kfree_skb(skb);
5425	hdev->stat.evt_rx++;
5426}
v6.13.7
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
   4   Copyright 2023-2024 NXP
   5
   6   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   7
   8   This program is free software; you can redistribute it and/or modify
   9   it under the terms of the GNU General Public License version 2 as
  10   published by the Free Software Foundation;
  11
  12   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  13   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  15   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  16   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  17   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  18   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  19   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  20
  21   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  22   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  23   SOFTWARE IS DISCLAIMED.
  24*/
  25
  26/* Bluetooth HCI event handling. */
  27
  28#include <linux/unaligned.h>
  29#include <linux/crypto.h>
  30#include <crypto/algapi.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/mgmt.h>
  35
 
  36#include "hci_debugfs.h"
  37#include "hci_codec.h"
 
  38#include "smp.h"
  39#include "msft.h"
  40#include "eir.h"
  41
  42#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
  43		 "\x00\x00\x00\x00\x00\x00\x00\x00"
  44
  45/* Handle HCI Event packets */
  46
  47static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
  48			     u8 ev, size_t len)
  49{
  50	void *data;
  51
  52	data = skb_pull_data(skb, len);
  53	if (!data)
  54		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
  55
  56	return data;
  57}
  58
  59static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
  60			     u16 op, size_t len)
  61{
  62	void *data;
  63
  64	data = skb_pull_data(skb, len);
  65	if (!data)
  66		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
  67
  68	return data;
  69}
  70
  71static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
  72				u8 ev, size_t len)
  73{
  74	void *data;
  75
  76	data = skb_pull_data(skb, len);
  77	if (!data)
  78		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
  79
  80	return data;
  81}
  82
  83static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
  84				struct sk_buff *skb)
  85{
  86	struct hci_ev_status *rp = data;
  87
  88	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
  89
  90	/* It is possible that we receive Inquiry Complete event right
  91	 * before we receive Inquiry Cancel Command Complete event, in
  92	 * which case the latter event should have status of Command
  93	 * Disallowed. This should not be treated as error, since
  94	 * we actually achieve what Inquiry Cancel wants to achieve,
  95	 * which is to end the last Inquiry session.
  96	 */
  97	if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
  98		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
  99		rp->status = 0x00;
 100	}
 101
 102	if (rp->status)
 103		return rp->status;
 104
 105	clear_bit(HCI_INQUIRY, &hdev->flags);
 106	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
 107	wake_up_bit(&hdev->flags, HCI_INQUIRY);
 108
 109	hci_dev_lock(hdev);
 110	/* Set discovery state to stopped if we're not doing LE active
 111	 * scanning.
 112	 */
 113	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
 114	    hdev->le_scan_type != LE_SCAN_ACTIVE)
 115		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 116	hci_dev_unlock(hdev);
 117
 118	return rp->status;
 119}
 120
 121static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
 122			      struct sk_buff *skb)
 123{
 124	struct hci_ev_status *rp = data;
 125
 126	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 127
 128	if (rp->status)
 129		return rp->status;
 130
 131	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
 132
 133	return rp->status;
 134}
 135
 136static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
 137				   struct sk_buff *skb)
 138{
 139	struct hci_ev_status *rp = data;
 140
 141	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 142
 143	if (rp->status)
 144		return rp->status;
 145
 146	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
 147
 148	return rp->status;
 149}
 150
 151static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
 152					struct sk_buff *skb)
 153{
 154	struct hci_ev_status *rp = data;
 155
 156	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 157
 158	return rp->status;
 159}
 160
 161static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
 162				struct sk_buff *skb)
 163{
 164	struct hci_rp_role_discovery *rp = data;
 165	struct hci_conn *conn;
 166
 167	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 168
 169	if (rp->status)
 170		return rp->status;
 171
 172	hci_dev_lock(hdev);
 173
 174	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 175	if (conn)
 176		conn->role = rp->role;
 177
 178	hci_dev_unlock(hdev);
 179
 180	return rp->status;
 181}
 182
 183static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
 184				  struct sk_buff *skb)
 185{
 186	struct hci_rp_read_link_policy *rp = data;
 187	struct hci_conn *conn;
 188
 189	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 190
 191	if (rp->status)
 192		return rp->status;
 193
 194	hci_dev_lock(hdev);
 195
 196	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 197	if (conn)
 198		conn->link_policy = __le16_to_cpu(rp->policy);
 199
 200	hci_dev_unlock(hdev);
 201
 202	return rp->status;
 203}
 204
 205static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
 206				   struct sk_buff *skb)
 207{
 208	struct hci_rp_write_link_policy *rp = data;
 209	struct hci_conn *conn;
 210	void *sent;
 211
 212	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 213
 214	if (rp->status)
 215		return rp->status;
 216
 217	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
 218	if (!sent)
 219		return rp->status;
 220
 221	hci_dev_lock(hdev);
 222
 223	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 224	if (conn)
 225		conn->link_policy = get_unaligned_le16(sent + 2);
 226
 227	hci_dev_unlock(hdev);
 228
 229	return rp->status;
 230}
 231
 232static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
 233				      struct sk_buff *skb)
 234{
 235	struct hci_rp_read_def_link_policy *rp = data;
 236
 237	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 238
 239	if (rp->status)
 240		return rp->status;
 241
 242	hdev->link_policy = __le16_to_cpu(rp->policy);
 243
 244	return rp->status;
 245}
 246
 247static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
 248				       struct sk_buff *skb)
 249{
 250	struct hci_ev_status *rp = data;
 251	void *sent;
 252
 253	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 254
 255	if (rp->status)
 256		return rp->status;
 257
 258	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
 259	if (!sent)
 260		return rp->status;
 261
 262	hdev->link_policy = get_unaligned_le16(sent);
 263
 264	return rp->status;
 265}
 266
 267static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
 268{
 269	struct hci_ev_status *rp = data;
 270
 271	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 272
 273	clear_bit(HCI_RESET, &hdev->flags);
 274
 275	if (rp->status)
 276		return rp->status;
 277
 278	/* Reset all non-persistent flags */
 279	hci_dev_clear_volatile_flags(hdev);
 280
 281	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 282
 283	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
 284	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 285
 286	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
 287	hdev->adv_data_len = 0;
 288
 289	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
 290	hdev->scan_rsp_data_len = 0;
 291
 292	hdev->le_scan_type = LE_SCAN_PASSIVE;
 293
 294	hdev->ssp_debug_mode = 0;
 295
 296	hci_bdaddr_list_clear(&hdev->le_accept_list);
 297	hci_bdaddr_list_clear(&hdev->le_resolv_list);
 298
 299	return rp->status;
 300}
 301
 302static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
 303				      struct sk_buff *skb)
 304{
 305	struct hci_rp_read_stored_link_key *rp = data;
 306	struct hci_cp_read_stored_link_key *sent;
 307
 308	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 309
 310	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
 311	if (!sent)
 312		return rp->status;
 313
 314	if (!rp->status && sent->read_all == 0x01) {
 315		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
 316		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
 317	}
 318
 319	return rp->status;
 320}
 321
 322static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
 323					struct sk_buff *skb)
 324{
 325	struct hci_rp_delete_stored_link_key *rp = data;
 326	u16 num_keys;
 327
 328	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 329
 330	if (rp->status)
 331		return rp->status;
 332
 333	num_keys = le16_to_cpu(rp->num_keys);
 334
 335	if (num_keys <= hdev->stored_num_keys)
 336		hdev->stored_num_keys -= num_keys;
 337	else
 338		hdev->stored_num_keys = 0;
 339
 340	return rp->status;
 341}
 342
 343static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
 344				  struct sk_buff *skb)
 345{
 346	struct hci_ev_status *rp = data;
 347	void *sent;
 348
 349	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 350
 351	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
 352	if (!sent)
 353		return rp->status;
 354
 355	hci_dev_lock(hdev);
 356
 357	if (hci_dev_test_flag(hdev, HCI_MGMT))
 358		mgmt_set_local_name_complete(hdev, sent, rp->status);
 359	else if (!rp->status)
 360		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
 361
 362	hci_dev_unlock(hdev);
 363
 364	return rp->status;
 365}
 366
 367static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
 368				 struct sk_buff *skb)
 369{
 370	struct hci_rp_read_local_name *rp = data;
 371
 372	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 373
 374	if (rp->status)
 375		return rp->status;
 376
 377	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 378	    hci_dev_test_flag(hdev, HCI_CONFIG))
 379		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
 380
 381	return rp->status;
 382}
 383
 384static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
 385				   struct sk_buff *skb)
 386{
 387	struct hci_ev_status *rp = data;
 388	void *sent;
 389
 390	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 391
 392	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
 393	if (!sent)
 394		return rp->status;
 395
 396	hci_dev_lock(hdev);
 397
 398	if (!rp->status) {
 399		__u8 param = *((__u8 *) sent);
 400
 401		if (param == AUTH_ENABLED)
 402			set_bit(HCI_AUTH, &hdev->flags);
 403		else
 404			clear_bit(HCI_AUTH, &hdev->flags);
 405	}
 406
 407	if (hci_dev_test_flag(hdev, HCI_MGMT))
 408		mgmt_auth_enable_complete(hdev, rp->status);
 409
 410	hci_dev_unlock(hdev);
 411
 412	return rp->status;
 413}
 414
 415static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
 416				    struct sk_buff *skb)
 417{
 418	struct hci_ev_status *rp = data;
 419	__u8 param;
 420	void *sent;
 421
 422	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 423
 424	if (rp->status)
 425		return rp->status;
 426
 427	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
 428	if (!sent)
 429		return rp->status;
 430
 431	param = *((__u8 *) sent);
 432
 433	if (param)
 434		set_bit(HCI_ENCRYPT, &hdev->flags);
 435	else
 436		clear_bit(HCI_ENCRYPT, &hdev->flags);
 437
 438	return rp->status;
 439}
 440
 441static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
 442				   struct sk_buff *skb)
 443{
 444	struct hci_ev_status *rp = data;
 445	__u8 param;
 446	void *sent;
 447
 448	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 449
 450	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
 451	if (!sent)
 452		return rp->status;
 453
 454	param = *((__u8 *) sent);
 455
 456	hci_dev_lock(hdev);
 457
 458	if (rp->status) {
 459		hdev->discov_timeout = 0;
 460		goto done;
 461	}
 462
 463	if (param & SCAN_INQUIRY)
 464		set_bit(HCI_ISCAN, &hdev->flags);
 465	else
 466		clear_bit(HCI_ISCAN, &hdev->flags);
 467
 468	if (param & SCAN_PAGE)
 469		set_bit(HCI_PSCAN, &hdev->flags);
 470	else
 471		clear_bit(HCI_PSCAN, &hdev->flags);
 472
 473done:
 474	hci_dev_unlock(hdev);
 475
 476	return rp->status;
 477}
 478
 479static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
 480				  struct sk_buff *skb)
 481{
 482	struct hci_ev_status *rp = data;
 483	struct hci_cp_set_event_filter *cp;
 484	void *sent;
 485
 486	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 487
 488	if (rp->status)
 489		return rp->status;
 490
 491	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
 492	if (!sent)
 493		return rp->status;
 494
 495	cp = (struct hci_cp_set_event_filter *)sent;
 496
 497	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
 498		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
 499	else
 500		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
 501
 502	return rp->status;
 503}
 504
 505static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
 506				   struct sk_buff *skb)
 507{
 508	struct hci_rp_read_class_of_dev *rp = data;
 509
 510	if (WARN_ON(!hdev))
 511		return HCI_ERROR_UNSPECIFIED;
 512
 513	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 514
 515	if (rp->status)
 516		return rp->status;
 517
 518	memcpy(hdev->dev_class, rp->dev_class, 3);
 519
 520	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
 521		   hdev->dev_class[1], hdev->dev_class[0]);
 522
 523	return rp->status;
 524}
 525
 526static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
 527				    struct sk_buff *skb)
 528{
 529	struct hci_ev_status *rp = data;
 530	void *sent;
 531
 532	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 533
 534	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
 535	if (!sent)
 536		return rp->status;
 537
 538	hci_dev_lock(hdev);
 539
 540	if (!rp->status)
 541		memcpy(hdev->dev_class, sent, 3);
 542
 543	if (hci_dev_test_flag(hdev, HCI_MGMT))
 544		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
 545
 546	hci_dev_unlock(hdev);
 547
 548	return rp->status;
 549}
 550
 551static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
 552				    struct sk_buff *skb)
 553{
 554	struct hci_rp_read_voice_setting *rp = data;
 555	__u16 setting;
 556
 557	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 558
 559	if (rp->status)
 560		return rp->status;
 561
 562	setting = __le16_to_cpu(rp->voice_setting);
 563
 564	if (hdev->voice_setting == setting)
 565		return rp->status;
 566
 567	hdev->voice_setting = setting;
 568
 569	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
 570
 571	if (hdev->notify)
 572		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 573
 574	return rp->status;
 575}
 576
 577static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
 578				     struct sk_buff *skb)
 579{
 580	struct hci_ev_status *rp = data;
 581	__u16 setting;
 582	void *sent;
 583
 584	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 585
 586	if (rp->status)
 587		return rp->status;
 588
 589	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
 590	if (!sent)
 591		return rp->status;
 592
 593	setting = get_unaligned_le16(sent);
 594
 595	if (hdev->voice_setting == setting)
 596		return rp->status;
 597
 598	hdev->voice_setting = setting;
 599
 600	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
 601
 602	if (hdev->notify)
 603		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 604
 605	return rp->status;
 606}
 607
 608static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
 609					struct sk_buff *skb)
 610{
 611	struct hci_rp_read_num_supported_iac *rp = data;
 612
 613	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 614
 615	if (rp->status)
 616		return rp->status;
 617
 618	hdev->num_iac = rp->num_iac;
 619
 620	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
 621
 622	return rp->status;
 623}
 624
 625static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
 626				struct sk_buff *skb)
 627{
 628	struct hci_ev_status *rp = data;
 629	struct hci_cp_write_ssp_mode *sent;
 630
 631	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 632
 633	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
 634	if (!sent)
 635		return rp->status;
 636
 637	hci_dev_lock(hdev);
 638
 639	if (!rp->status) {
 640		if (sent->mode)
 641			hdev->features[1][0] |= LMP_HOST_SSP;
 642		else
 643			hdev->features[1][0] &= ~LMP_HOST_SSP;
 644	}
 645
 646	if (!rp->status) {
 
 
 647		if (sent->mode)
 648			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
 649		else
 650			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
 651	}
 652
 653	hci_dev_unlock(hdev);
 654
 655	return rp->status;
 656}
 657
 658static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
 659				  struct sk_buff *skb)
 660{
 661	struct hci_ev_status *rp = data;
 662	struct hci_cp_write_sc_support *sent;
 663
 664	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 665
 666	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
 667	if (!sent)
 668		return rp->status;
 669
 670	hci_dev_lock(hdev);
 671
 672	if (!rp->status) {
 673		if (sent->support)
 674			hdev->features[1][0] |= LMP_HOST_SC;
 675		else
 676			hdev->features[1][0] &= ~LMP_HOST_SC;
 677	}
 678
 679	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
 680		if (sent->support)
 681			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
 682		else
 683			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
 684	}
 685
 686	hci_dev_unlock(hdev);
 687
 688	return rp->status;
 689}
 690
 691static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
 692				    struct sk_buff *skb)
 693{
 694	struct hci_rp_read_local_version *rp = data;
 695
 696	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 697
 698	if (rp->status)
 699		return rp->status;
 700
 701	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 702	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
 703		hdev->hci_ver = rp->hci_ver;
 704		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
 705		hdev->lmp_ver = rp->lmp_ver;
 706		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
 707		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
 708	}
 709
 710	return rp->status;
 711}
 712
 713static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
 714				   struct sk_buff *skb)
 715{
 716	struct hci_rp_read_enc_key_size *rp = data;
 717	struct hci_conn *conn;
 718	u16 handle;
 719	u8 status = rp->status;
 720
 721	bt_dev_dbg(hdev, "status 0x%2.2x", status);
 722
 723	handle = le16_to_cpu(rp->handle);
 724
 725	hci_dev_lock(hdev);
 726
 727	conn = hci_conn_hash_lookup_handle(hdev, handle);
 728	if (!conn) {
 729		status = 0xFF;
 730		goto done;
 731	}
 732
 733	/* While unexpected, the read_enc_key_size command may fail. The most
 734	 * secure approach is to then assume the key size is 0 to force a
 735	 * disconnection.
 736	 */
 737	if (status) {
 738		bt_dev_err(hdev, "failed to read key size for handle %u",
 739			   handle);
 740		conn->enc_key_size = 0;
 741	} else {
 742		conn->enc_key_size = rp->key_size;
 743		status = 0;
 744
 745		if (conn->enc_key_size < hdev->min_enc_key_size) {
 746			/* As slave role, the conn->state has been set to
 747			 * BT_CONNECTED and l2cap conn req might not be received
 748			 * yet, at this moment the l2cap layer almost does
 749			 * nothing with the non-zero status.
 750			 * So we also clear encrypt related bits, and then the
 751			 * handler of l2cap conn req will get the right secure
 752			 * state at a later time.
 753			 */
 754			status = HCI_ERROR_AUTH_FAILURE;
 755			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
 756			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
 757		}
 758	}
 759
 760	hci_encrypt_cfm(conn, status);
 761
 762done:
 763	hci_dev_unlock(hdev);
 764
 765	return status;
 766}
 767
 768static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
 769				     struct sk_buff *skb)
 770{
 771	struct hci_rp_read_local_commands *rp = data;
 772
 773	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 774
 775	if (rp->status)
 776		return rp->status;
 777
 778	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 779	    hci_dev_test_flag(hdev, HCI_CONFIG))
 780		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
 781
 782	return rp->status;
 783}
 784
 785static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
 786					   struct sk_buff *skb)
 787{
 788	struct hci_rp_read_auth_payload_to *rp = data;
 789	struct hci_conn *conn;
 790
 791	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 792
 793	if (rp->status)
 794		return rp->status;
 795
 796	hci_dev_lock(hdev);
 797
 798	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 799	if (conn)
 800		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
 801
 802	hci_dev_unlock(hdev);
 803
 804	return rp->status;
 805}
 806
 807static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
 808					    struct sk_buff *skb)
 809{
 810	struct hci_rp_write_auth_payload_to *rp = data;
 811	struct hci_conn *conn;
 812	void *sent;
 813
 814	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 815
 816	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
 817	if (!sent)
 818		return rp->status;
 819
 820	hci_dev_lock(hdev);
 821
 822	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 823	if (!conn) {
 824		rp->status = 0xff;
 825		goto unlock;
 826	}
 827
 828	if (!rp->status)
 829		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
 830
 831unlock:
 832	hci_dev_unlock(hdev);
 833
 834	return rp->status;
 835}
 836
 837static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
 838				     struct sk_buff *skb)
 839{
 840	struct hci_rp_read_local_features *rp = data;
 841
 842	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 843
 844	if (rp->status)
 845		return rp->status;
 846
 847	memcpy(hdev->features, rp->features, 8);
 848
 849	/* Adjust default settings according to features
 850	 * supported by device. */
 851
 852	if (hdev->features[0][0] & LMP_3SLOT)
 853		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
 854
 855	if (hdev->features[0][0] & LMP_5SLOT)
 856		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
 857
 858	if (hdev->features[0][1] & LMP_HV2) {
 859		hdev->pkt_type  |= (HCI_HV2);
 860		hdev->esco_type |= (ESCO_HV2);
 861	}
 862
 863	if (hdev->features[0][1] & LMP_HV3) {
 864		hdev->pkt_type  |= (HCI_HV3);
 865		hdev->esco_type |= (ESCO_HV3);
 866	}
 867
 868	if (lmp_esco_capable(hdev))
 869		hdev->esco_type |= (ESCO_EV3);
 870
 871	if (hdev->features[0][4] & LMP_EV4)
 872		hdev->esco_type |= (ESCO_EV4);
 873
 874	if (hdev->features[0][4] & LMP_EV5)
 875		hdev->esco_type |= (ESCO_EV5);
 876
 877	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
 878		hdev->esco_type |= (ESCO_2EV3);
 879
 880	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
 881		hdev->esco_type |= (ESCO_3EV3);
 882
 883	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
 884		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
 885
 886	return rp->status;
 887}
 888
 889static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
 890					 struct sk_buff *skb)
 891{
 892	struct hci_rp_read_local_ext_features *rp = data;
 893
 894	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 895
 896	if (rp->status)
 897		return rp->status;
 898
 899	if (hdev->max_page < rp->max_page) {
 900		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
 901			     &hdev->quirks))
 902			bt_dev_warn(hdev, "broken local ext features page 2");
 903		else
 904			hdev->max_page = rp->max_page;
 905	}
 906
 907	if (rp->page < HCI_MAX_PAGES)
 908		memcpy(hdev->features[rp->page], rp->features, 8);
 
 
 
 
 
 
 
 
 909
 910	return rp->status;
 
 
 
 911}
 912
 913static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
 914				  struct sk_buff *skb)
 915{
 916	struct hci_rp_read_buffer_size *rp = data;
 917
 918	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 919
 920	if (rp->status)
 921		return rp->status;
 922
 923	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
 924	hdev->sco_mtu  = rp->sco_mtu;
 925	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
 926	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
 927
 928	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
 929		hdev->sco_mtu  = 64;
 930		hdev->sco_pkts = 8;
 931	}
 932
 933	hdev->acl_cnt = hdev->acl_pkts;
 934	hdev->sco_cnt = hdev->sco_pkts;
 935
 936	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
 937	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
 938
 939	if (!hdev->acl_mtu || !hdev->acl_pkts)
 940		return HCI_ERROR_INVALID_PARAMETERS;
 941
 942	return rp->status;
 943}
 944
 945static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
 946			      struct sk_buff *skb)
 947{
 948	struct hci_rp_read_bd_addr *rp = data;
 949
 950	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 951
 952	if (rp->status)
 953		return rp->status;
 954
 955	if (test_bit(HCI_INIT, &hdev->flags))
 956		bacpy(&hdev->bdaddr, &rp->bdaddr);
 957
 958	if (hci_dev_test_flag(hdev, HCI_SETUP))
 959		bacpy(&hdev->setup_addr, &rp->bdaddr);
 960
 961	return rp->status;
 962}
 963
 964static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
 965					 struct sk_buff *skb)
 966{
 967	struct hci_rp_read_local_pairing_opts *rp = data;
 968
 969	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 970
 971	if (rp->status)
 972		return rp->status;
 973
 974	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 975	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
 976		hdev->pairing_opts = rp->pairing_opts;
 977		hdev->max_enc_key_size = rp->max_key_size;
 978	}
 979
 980	return rp->status;
 981}
 982
 983static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
 984					 struct sk_buff *skb)
 985{
 986	struct hci_rp_read_page_scan_activity *rp = data;
 987
 988	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 989
 990	if (rp->status)
 991		return rp->status;
 992
 993	if (test_bit(HCI_INIT, &hdev->flags)) {
 994		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
 995		hdev->page_scan_window = __le16_to_cpu(rp->window);
 996	}
 997
 998	return rp->status;
 999}
1000
1001static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1002					  struct sk_buff *skb)
1003{
1004	struct hci_ev_status *rp = data;
1005	struct hci_cp_write_page_scan_activity *sent;
1006
1007	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1008
1009	if (rp->status)
1010		return rp->status;
1011
1012	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1013	if (!sent)
1014		return rp->status;
1015
1016	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1017	hdev->page_scan_window = __le16_to_cpu(sent->window);
1018
1019	return rp->status;
1020}
1021
1022static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1023				     struct sk_buff *skb)
1024{
1025	struct hci_rp_read_page_scan_type *rp = data;
1026
1027	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1028
1029	if (rp->status)
1030		return rp->status;
1031
1032	if (test_bit(HCI_INIT, &hdev->flags))
1033		hdev->page_scan_type = rp->type;
1034
1035	return rp->status;
1036}
1037
1038static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1039				      struct sk_buff *skb)
1040{
1041	struct hci_ev_status *rp = data;
1042	u8 *type;
1043
1044	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1045
1046	if (rp->status)
1047		return rp->status;
1048
1049	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1050	if (type)
1051		hdev->page_scan_type = *type;
 
 
 
 
 
 
 
 
 
 
 
1052
1053	return rp->status;
 
 
 
 
 
 
 
1054}
1055
1056static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1057			    struct sk_buff *skb)
1058{
1059	struct hci_rp_read_clock *rp = data;
1060	struct hci_cp_read_clock *cp;
1061	struct hci_conn *conn;
1062
1063	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
 
1064
1065	if (rp->status)
1066		return rp->status;
1067
1068	hci_dev_lock(hdev);
1069
1070	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1071	if (!cp)
1072		goto unlock;
1073
1074	if (cp->which == 0x00) {
1075		hdev->clock = le32_to_cpu(rp->clock);
1076		goto unlock;
1077	}
1078
1079	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1080	if (conn) {
1081		conn->clock = le32_to_cpu(rp->clock);
1082		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1083	}
1084
1085unlock:
1086	hci_dev_unlock(hdev);
1087	return rp->status;
1088}
1089
1090static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1091				       struct sk_buff *skb)
1092{
1093	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1094
1095	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1096
1097	if (rp->status)
1098		return rp->status;
1099
1100	hdev->inq_tx_power = rp->tx_power;
1101
1102	return rp->status;
 
 
 
 
 
 
 
1103}
1104
1105static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1106					     struct sk_buff *skb)
1107{
1108	struct hci_rp_read_def_err_data_reporting *rp = data;
1109
1110	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1111
1112	if (rp->status)
1113		return rp->status;
1114
1115	hdev->err_data_reporting = rp->err_data_reporting;
1116
1117	return rp->status;
1118}
1119
1120static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1121					      struct sk_buff *skb)
1122{
1123	struct hci_ev_status *rp = data;
1124	struct hci_cp_write_def_err_data_reporting *cp;
1125
1126	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1127
1128	if (rp->status)
1129		return rp->status;
1130
1131	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1132	if (!cp)
1133		return rp->status;
1134
1135	hdev->err_data_reporting = cp->err_data_reporting;
1136
1137	return rp->status;
1138}
1139
1140static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1141				struct sk_buff *skb)
1142{
1143	struct hci_rp_pin_code_reply *rp = data;
1144	struct hci_cp_pin_code_reply *cp;
1145	struct hci_conn *conn;
1146
1147	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1148
1149	hci_dev_lock(hdev);
1150
1151	if (hci_dev_test_flag(hdev, HCI_MGMT))
1152		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1153
1154	if (rp->status)
1155		goto unlock;
1156
1157	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1158	if (!cp)
1159		goto unlock;
1160
1161	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1162	if (conn)
1163		conn->pin_length = cp->pin_len;
1164
1165unlock:
1166	hci_dev_unlock(hdev);
1167	return rp->status;
1168}
1169
1170static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1171				    struct sk_buff *skb)
1172{
1173	struct hci_rp_pin_code_neg_reply *rp = data;
1174
1175	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1176
1177	hci_dev_lock(hdev);
1178
1179	if (hci_dev_test_flag(hdev, HCI_MGMT))
1180		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1181						 rp->status);
1182
1183	hci_dev_unlock(hdev);
1184
1185	return rp->status;
1186}
1187
1188static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1189				     struct sk_buff *skb)
1190{
1191	struct hci_rp_le_read_buffer_size *rp = data;
1192
1193	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1194
1195	if (rp->status)
1196		return rp->status;
1197
1198	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1199	hdev->le_pkts = rp->le_max_pkt;
1200
1201	hdev->le_cnt = hdev->le_pkts;
1202
1203	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1204
1205	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1206		return HCI_ERROR_INVALID_PARAMETERS;
1207
1208	return rp->status;
1209}
1210
1211static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1212					struct sk_buff *skb)
1213{
1214	struct hci_rp_le_read_local_features *rp = data;
1215
1216	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1217
1218	if (rp->status)
1219		return rp->status;
1220
1221	memcpy(hdev->le_features, rp->features, 8);
1222
1223	return rp->status;
1224}
1225
1226static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1227				      struct sk_buff *skb)
1228{
1229	struct hci_rp_le_read_adv_tx_power *rp = data;
1230
1231	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1232
1233	if (rp->status)
1234		return rp->status;
1235
1236	hdev->adv_tx_power = rp->tx_power;
1237
1238	return rp->status;
1239}
1240
1241static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1242				    struct sk_buff *skb)
1243{
1244	struct hci_rp_user_confirm_reply *rp = data;
1245
1246	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1247
1248	hci_dev_lock(hdev);
1249
1250	if (hci_dev_test_flag(hdev, HCI_MGMT))
1251		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1252						 rp->status);
1253
1254	hci_dev_unlock(hdev);
1255
1256	return rp->status;
1257}
1258
1259static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1260					struct sk_buff *skb)
1261{
1262	struct hci_rp_user_confirm_reply *rp = data;
1263
1264	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1265
1266	hci_dev_lock(hdev);
1267
1268	if (hci_dev_test_flag(hdev, HCI_MGMT))
1269		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1270						     ACL_LINK, 0, rp->status);
1271
1272	hci_dev_unlock(hdev);
1273
1274	return rp->status;
1275}
1276
1277static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1278				    struct sk_buff *skb)
1279{
1280	struct hci_rp_user_confirm_reply *rp = data;
1281
1282	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1283
1284	hci_dev_lock(hdev);
1285
1286	if (hci_dev_test_flag(hdev, HCI_MGMT))
1287		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1288						 0, rp->status);
1289
1290	hci_dev_unlock(hdev);
1291
1292	return rp->status;
1293}
1294
1295static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1296					struct sk_buff *skb)
1297{
1298	struct hci_rp_user_confirm_reply *rp = data;
1299
1300	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1301
1302	hci_dev_lock(hdev);
1303
1304	if (hci_dev_test_flag(hdev, HCI_MGMT))
1305		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1306						     ACL_LINK, 0, rp->status);
1307
1308	hci_dev_unlock(hdev);
1309
1310	return rp->status;
1311}
1312
1313static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1314				     struct sk_buff *skb)
1315{
1316	struct hci_rp_read_local_oob_data *rp = data;
1317
1318	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1319
1320	return rp->status;
1321}
1322
1323static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1324					 struct sk_buff *skb)
1325{
1326	struct hci_rp_read_local_oob_ext_data *rp = data;
1327
1328	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1329
1330	return rp->status;
1331}
1332
1333static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1334				    struct sk_buff *skb)
1335{
1336	struct hci_ev_status *rp = data;
1337	bdaddr_t *sent;
1338
1339	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1340
1341	if (rp->status)
1342		return rp->status;
1343
1344	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1345	if (!sent)
1346		return rp->status;
1347
1348	hci_dev_lock(hdev);
1349
1350	bacpy(&hdev->random_addr, sent);
1351
1352	if (!bacmp(&hdev->rpa, sent)) {
1353		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1354		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1355				   secs_to_jiffies(hdev->rpa_timeout));
1356	}
1357
1358	hci_dev_unlock(hdev);
1359
1360	return rp->status;
1361}
1362
1363static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1364				    struct sk_buff *skb)
1365{
1366	struct hci_ev_status *rp = data;
1367	struct hci_cp_le_set_default_phy *cp;
1368
1369	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1370
1371	if (rp->status)
1372		return rp->status;
1373
1374	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1375	if (!cp)
1376		return rp->status;
1377
1378	hci_dev_lock(hdev);
1379
1380	hdev->le_tx_def_phys = cp->tx_phys;
1381	hdev->le_rx_def_phys = cp->rx_phys;
1382
1383	hci_dev_unlock(hdev);
1384
1385	return rp->status;
1386}
1387
1388static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1389					    struct sk_buff *skb)
1390{
1391	struct hci_ev_status *rp = data;
1392	struct hci_cp_le_set_adv_set_rand_addr *cp;
1393	struct adv_info *adv;
1394
1395	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1396
1397	if (rp->status)
1398		return rp->status;
1399
1400	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1401	/* Update only in case the adv instance since handle 0x00 shall be using
1402	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1403	 * non-extended adverting.
1404	 */
1405	if (!cp || !cp->handle)
1406		return rp->status;
1407
1408	hci_dev_lock(hdev);
1409
1410	adv = hci_find_adv_instance(hdev, cp->handle);
1411	if (adv) {
1412		bacpy(&adv->random_addr, &cp->bdaddr);
1413		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1414			adv->rpa_expired = false;
1415			queue_delayed_work(hdev->workqueue,
1416					   &adv->rpa_expired_cb,
1417					   secs_to_jiffies(hdev->rpa_timeout));
1418		}
1419	}
1420
1421	hci_dev_unlock(hdev);
1422
1423	return rp->status;
1424}
1425
1426static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1427				   struct sk_buff *skb)
1428{
1429	struct hci_ev_status *rp = data;
1430	u8 *instance;
1431	int err;
1432
1433	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1434
1435	if (rp->status)
1436		return rp->status;
1437
1438	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1439	if (!instance)
1440		return rp->status;
1441
1442	hci_dev_lock(hdev);
1443
1444	err = hci_remove_adv_instance(hdev, *instance);
1445	if (!err)
1446		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1447					 *instance);
1448
1449	hci_dev_unlock(hdev);
1450
1451	return rp->status;
1452}
1453
1454static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1455				   struct sk_buff *skb)
1456{
1457	struct hci_ev_status *rp = data;
1458	struct adv_info *adv, *n;
1459	int err;
1460
1461	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1462
1463	if (rp->status)
1464		return rp->status;
1465
1466	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1467		return rp->status;
1468
1469	hci_dev_lock(hdev);
1470
1471	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1472		u8 instance = adv->instance;
1473
1474		err = hci_remove_adv_instance(hdev, instance);
1475		if (!err)
1476			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1477						 hdev, instance);
1478	}
1479
1480	hci_dev_unlock(hdev);
1481
1482	return rp->status;
1483}
1484
1485static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1486					struct sk_buff *skb)
1487{
1488	struct hci_rp_le_read_transmit_power *rp = data;
1489
1490	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1491
1492	if (rp->status)
1493		return rp->status;
1494
1495	hdev->min_le_tx_power = rp->min_le_tx_power;
1496	hdev->max_le_tx_power = rp->max_le_tx_power;
1497
1498	return rp->status;
1499}
1500
1501static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1502				     struct sk_buff *skb)
1503{
1504	struct hci_ev_status *rp = data;
1505	struct hci_cp_le_set_privacy_mode *cp;
1506	struct hci_conn_params *params;
1507
1508	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1509
1510	if (rp->status)
1511		return rp->status;
1512
1513	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1514	if (!cp)
1515		return rp->status;
1516
1517	hci_dev_lock(hdev);
1518
1519	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1520	if (params)
1521		WRITE_ONCE(params->privacy_mode, cp->mode);
1522
1523	hci_dev_unlock(hdev);
1524
1525	return rp->status;
1526}
1527
1528static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1529				   struct sk_buff *skb)
1530{
1531	struct hci_ev_status *rp = data;
1532	__u8 *sent;
1533
1534	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1535
1536	if (rp->status)
1537		return rp->status;
1538
1539	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1540	if (!sent)
1541		return rp->status;
1542
1543	hci_dev_lock(hdev);
1544
1545	/* If we're doing connection initiation as peripheral. Set a
1546	 * timeout in case something goes wrong.
1547	 */
1548	if (*sent) {
1549		struct hci_conn *conn;
1550
1551		hci_dev_set_flag(hdev, HCI_LE_ADV);
1552
1553		conn = hci_lookup_le_connect(hdev);
1554		if (conn)
1555			queue_delayed_work(hdev->workqueue,
1556					   &conn->le_conn_timeout,
1557					   conn->conn_timeout);
1558	} else {
1559		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1560	}
1561
1562	hci_dev_unlock(hdev);
1563
1564	return rp->status;
1565}
1566
1567static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1568				       struct sk_buff *skb)
1569{
1570	struct hci_cp_le_set_ext_adv_enable *cp;
1571	struct hci_cp_ext_adv_set *set;
1572	struct adv_info *adv = NULL, *n;
1573	struct hci_ev_status *rp = data;
1574
1575	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1576
1577	if (rp->status)
1578		return rp->status;
1579
1580	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1581	if (!cp)
1582		return rp->status;
1583
1584	set = (void *)cp->data;
1585
1586	hci_dev_lock(hdev);
1587
1588	if (cp->num_of_sets)
1589		adv = hci_find_adv_instance(hdev, set->handle);
1590
1591	if (cp->enable) {
1592		struct hci_conn *conn;
1593
1594		hci_dev_set_flag(hdev, HCI_LE_ADV);
1595
1596		if (adv && !adv->periodic)
1597			adv->enabled = true;
1598
1599		conn = hci_lookup_le_connect(hdev);
1600		if (conn)
1601			queue_delayed_work(hdev->workqueue,
1602					   &conn->le_conn_timeout,
1603					   conn->conn_timeout);
1604	} else {
1605		if (cp->num_of_sets) {
1606			if (adv)
1607				adv->enabled = false;
1608
1609			/* If just one instance was disabled check if there are
1610			 * any other instance enabled before clearing HCI_LE_ADV
1611			 */
1612			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1613						 list) {
1614				if (adv->enabled)
1615					goto unlock;
1616			}
1617		} else {
1618			/* All instances shall be considered disabled */
1619			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1620						 list)
1621				adv->enabled = false;
1622		}
1623
1624		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1625	}
1626
1627unlock:
1628	hci_dev_unlock(hdev);
1629	return rp->status;
1630}
1631
1632static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1633				   struct sk_buff *skb)
1634{
1635	struct hci_cp_le_set_scan_param *cp;
1636	struct hci_ev_status *rp = data;
1637
1638	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1639
1640	if (rp->status)
1641		return rp->status;
1642
1643	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1644	if (!cp)
1645		return rp->status;
1646
1647	hci_dev_lock(hdev);
1648
1649	hdev->le_scan_type = cp->type;
1650
1651	hci_dev_unlock(hdev);
1652
1653	return rp->status;
1654}
1655
1656static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1657				       struct sk_buff *skb)
1658{
1659	struct hci_cp_le_set_ext_scan_params *cp;
1660	struct hci_ev_status *rp = data;
1661	struct hci_cp_le_scan_phy_params *phy_param;
1662
1663	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1664
1665	if (rp->status)
1666		return rp->status;
1667
1668	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1669	if (!cp)
1670		return rp->status;
1671
1672	phy_param = (void *)cp->data;
1673
1674	hci_dev_lock(hdev);
1675
1676	hdev->le_scan_type = phy_param->type;
1677
1678	hci_dev_unlock(hdev);
1679
1680	return rp->status;
1681}
1682
1683static bool has_pending_adv_report(struct hci_dev *hdev)
1684{
1685	struct discovery_state *d = &hdev->discovery;
1686
1687	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1688}
1689
1690static void clear_pending_adv_report(struct hci_dev *hdev)
1691{
1692	struct discovery_state *d = &hdev->discovery;
1693
1694	bacpy(&d->last_adv_addr, BDADDR_ANY);
1695	d->last_adv_data_len = 0;
1696}
1697
1698static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1699				     u8 bdaddr_type, s8 rssi, u32 flags,
1700				     u8 *data, u8 len)
1701{
1702	struct discovery_state *d = &hdev->discovery;
1703
1704	if (len > max_adv_len(hdev))
1705		return;
1706
1707	bacpy(&d->last_adv_addr, bdaddr);
1708	d->last_adv_addr_type = bdaddr_type;
1709	d->last_adv_rssi = rssi;
1710	d->last_adv_flags = flags;
1711	memcpy(d->last_adv_data, data, len);
1712	d->last_adv_data_len = len;
1713}
1714
1715static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
 
1716{
 
 
 
 
 
 
 
 
 
 
 
 
1717	hci_dev_lock(hdev);
1718
1719	switch (enable) {
1720	case LE_SCAN_ENABLE:
1721		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1722		if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
1723			clear_pending_adv_report(hdev);
1724			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1725		}
1726		break;
1727
1728	case LE_SCAN_DISABLE:
1729		/* We do this here instead of when setting DISCOVERY_STOPPED
1730		 * since the latter would potentially require waiting for
1731		 * inquiry to stop too.
1732		 */
1733		if (has_pending_adv_report(hdev)) {
1734			struct discovery_state *d = &hdev->discovery;
1735
1736			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1737					  d->last_adv_addr_type, NULL,
1738					  d->last_adv_rssi, d->last_adv_flags,
1739					  d->last_adv_data,
1740					  d->last_adv_data_len, NULL, 0, 0);
1741		}
1742
1743		/* Cancel this timer so that we don't try to disable scanning
1744		 * when it's already disabled.
1745		 */
1746		cancel_delayed_work(&hdev->le_scan_disable);
1747
1748		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1749
1750		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1751		 * interrupted scanning due to a connect request. Mark
1752		 * therefore discovery as stopped.
 
 
 
1753		 */
1754		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1755			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1756		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1757			 hdev->discovery.state == DISCOVERY_FINDING)
1758			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1759
1760		break;
1761
1762	default:
1763		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1764			   enable);
1765		break;
1766	}
1767
1768	hci_dev_unlock(hdev);
1769}
1770
1771static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1772				    struct sk_buff *skb)
1773{
1774	struct hci_cp_le_set_scan_enable *cp;
1775	struct hci_ev_status *rp = data;
1776
1777	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1778
1779	if (rp->status)
1780		return rp->status;
1781
1782	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1783	if (!cp)
1784		return rp->status;
1785
1786	le_set_scan_enable_complete(hdev, cp->enable);
1787
1788	return rp->status;
1789}
1790
1791static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1792					struct sk_buff *skb)
1793{
1794	struct hci_cp_le_set_ext_scan_enable *cp;
1795	struct hci_ev_status *rp = data;
1796
1797	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1798
1799	if (rp->status)
1800		return rp->status;
1801
1802	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1803	if (!cp)
1804		return rp->status;
1805
1806	le_set_scan_enable_complete(hdev, cp->enable);
1807
1808	return rp->status;
1809}
1810
1811static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1812				      struct sk_buff *skb)
1813{
1814	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
 
1815
1816	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1817		   rp->num_of_sets);
1818
1819	if (rp->status)
1820		return rp->status;
1821
1822	hdev->le_num_of_adv_sets = rp->num_of_sets;
 
 
1823
1824	return rp->status;
 
1825}
1826
1827static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1828					  struct sk_buff *skb)
1829{
1830	struct hci_rp_le_read_accept_list_size *rp = data;
 
1831
1832	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1833
1834	if (rp->status)
1835		return rp->status;
1836
1837	hdev->le_accept_list_size = rp->size;
1838
1839	return rp->status;
1840}
1841
1842static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1843				      struct sk_buff *skb)
1844{
1845	struct hci_ev_status *rp = data;
1846
1847	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1848
1849	if (rp->status)
1850		return rp->status;
1851
1852	hci_dev_lock(hdev);
1853	hci_bdaddr_list_clear(&hdev->le_accept_list);
1854	hci_dev_unlock(hdev);
1855
1856	return rp->status;
1857}
1858
1859static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1860				       struct sk_buff *skb)
1861{
1862	struct hci_cp_le_add_to_accept_list *sent;
1863	struct hci_ev_status *rp = data;
1864
1865	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1866
1867	if (rp->status)
1868		return rp->status;
1869
1870	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1871	if (!sent)
1872		return rp->status;
1873
1874	hci_dev_lock(hdev);
1875	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1876			    sent->bdaddr_type);
1877	hci_dev_unlock(hdev);
1878
1879	return rp->status;
1880}
1881
1882static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1883					 struct sk_buff *skb)
1884{
1885	struct hci_cp_le_del_from_accept_list *sent;
1886	struct hci_ev_status *rp = data;
1887
1888	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1889
1890	if (rp->status)
1891		return rp->status;
1892
1893	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1894	if (!sent)
1895		return rp->status;
1896
1897	hci_dev_lock(hdev);
1898	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1899			    sent->bdaddr_type);
1900	hci_dev_unlock(hdev);
1901
1902	return rp->status;
1903}
1904
1905static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1906					  struct sk_buff *skb)
1907{
1908	struct hci_rp_le_read_supported_states *rp = data;
1909
1910	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1911
1912	if (rp->status)
1913		return rp->status;
1914
1915	memcpy(hdev->le_states, rp->le_states, 8);
1916
1917	return rp->status;
1918}
1919
1920static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1921				      struct sk_buff *skb)
1922{
1923	struct hci_rp_le_read_def_data_len *rp = data;
1924
1925	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1926
1927	if (rp->status)
1928		return rp->status;
1929
1930	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1931	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1932
1933	return rp->status;
1934}
1935
1936static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1937				       struct sk_buff *skb)
1938{
1939	struct hci_cp_le_write_def_data_len *sent;
1940	struct hci_ev_status *rp = data;
1941
1942	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1943
1944	if (rp->status)
1945		return rp->status;
1946
1947	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1948	if (!sent)
1949		return rp->status;
1950
1951	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1952	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1953
1954	return rp->status;
1955}
1956
1957static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1958				       struct sk_buff *skb)
1959{
1960	struct hci_cp_le_add_to_resolv_list *sent;
1961	struct hci_ev_status *rp = data;
1962
1963	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1964
1965	if (rp->status)
1966		return rp->status;
1967
1968	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1969	if (!sent)
1970		return rp->status;
1971
1972	hci_dev_lock(hdev);
1973	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1974				sent->bdaddr_type, sent->peer_irk,
1975				sent->local_irk);
1976	hci_dev_unlock(hdev);
1977
1978	return rp->status;
1979}
1980
1981static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
1982					 struct sk_buff *skb)
1983{
1984	struct hci_cp_le_del_from_resolv_list *sent;
1985	struct hci_ev_status *rp = data;
1986
1987	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1988
1989	if (rp->status)
1990		return rp->status;
1991
1992	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1993	if (!sent)
1994		return rp->status;
1995
1996	hci_dev_lock(hdev);
1997	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1998			    sent->bdaddr_type);
1999	hci_dev_unlock(hdev);
2000
2001	return rp->status;
2002}
2003
2004static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2005				      struct sk_buff *skb)
2006{
2007	struct hci_ev_status *rp = data;
2008
2009	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2010
2011	if (rp->status)
2012		return rp->status;
2013
2014	hci_dev_lock(hdev);
2015	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2016	hci_dev_unlock(hdev);
2017
2018	return rp->status;
2019}
2020
2021static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2022					  struct sk_buff *skb)
2023{
2024	struct hci_rp_le_read_resolv_list_size *rp = data;
2025
2026	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2027
2028	if (rp->status)
2029		return rp->status;
2030
2031	hdev->le_resolv_list_size = rp->size;
2032
2033	return rp->status;
2034}
2035
2036static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2037					       struct sk_buff *skb)
2038{
2039	struct hci_ev_status *rp = data;
2040	__u8 *sent;
2041
2042	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2043
2044	if (rp->status)
2045		return rp->status;
2046
2047	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2048	if (!sent)
2049		return rp->status;
2050
2051	hci_dev_lock(hdev);
2052
2053	if (*sent)
2054		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2055	else
2056		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2057
2058	hci_dev_unlock(hdev);
2059
2060	return rp->status;
2061}
2062
2063static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2064				      struct sk_buff *skb)
2065{
2066	struct hci_rp_le_read_max_data_len *rp = data;
2067
2068	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2069
2070	if (rp->status)
2071		return rp->status;
2072
2073	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2074	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2075	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2076	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2077
2078	return rp->status;
2079}
2080
2081static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2082					 struct sk_buff *skb)
2083{
2084	struct hci_cp_write_le_host_supported *sent;
2085	struct hci_ev_status *rp = data;
2086
2087	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2088
2089	if (rp->status)
2090		return rp->status;
2091
2092	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2093	if (!sent)
2094		return rp->status;
2095
2096	hci_dev_lock(hdev);
2097
2098	if (sent->le) {
2099		hdev->features[1][0] |= LMP_HOST_LE;
2100		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2101	} else {
2102		hdev->features[1][0] &= ~LMP_HOST_LE;
2103		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2104		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2105	}
2106
2107	if (sent->simul)
2108		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2109	else
2110		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2111
2112	hci_dev_unlock(hdev);
2113
2114	return rp->status;
2115}
2116
2117static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2118			       struct sk_buff *skb)
2119{
2120	struct hci_cp_le_set_adv_param *cp;
2121	struct hci_ev_status *rp = data;
2122
2123	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2124
2125	if (rp->status)
2126		return rp->status;
2127
2128	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2129	if (!cp)
2130		return rp->status;
2131
2132	hci_dev_lock(hdev);
2133	hdev->adv_addr_type = cp->own_address_type;
2134	hci_dev_unlock(hdev);
2135
2136	return rp->status;
2137}
2138
2139static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2140				   struct sk_buff *skb)
2141{
2142	struct hci_rp_le_set_ext_adv_params *rp = data;
2143	struct hci_cp_le_set_ext_adv_params *cp;
2144	struct adv_info *adv_instance;
2145
2146	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2147
2148	if (rp->status)
2149		return rp->status;
2150
2151	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2152	if (!cp)
2153		return rp->status;
2154
2155	hci_dev_lock(hdev);
2156	hdev->adv_addr_type = cp->own_addr_type;
2157	if (!cp->handle) {
2158		/* Store in hdev for instance 0 */
2159		hdev->adv_tx_power = rp->tx_power;
2160	} else {
2161		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2162		if (adv_instance)
2163			adv_instance->tx_power = rp->tx_power;
2164	}
2165	/* Update adv data as tx power is known now */
2166	hci_update_adv_data(hdev, cp->handle);
2167
2168	hci_dev_unlock(hdev);
2169
2170	return rp->status;
2171}
2172
2173static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2174			   struct sk_buff *skb)
2175{
2176	struct hci_rp_read_rssi *rp = data;
2177	struct hci_conn *conn;
2178
2179	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2180
2181	if (rp->status)
2182		return rp->status;
2183
2184	hci_dev_lock(hdev);
2185
2186	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2187	if (conn)
2188		conn->rssi = rp->rssi;
2189
2190	hci_dev_unlock(hdev);
2191
2192	return rp->status;
2193}
2194
2195static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2196			       struct sk_buff *skb)
2197{
2198	struct hci_cp_read_tx_power *sent;
2199	struct hci_rp_read_tx_power *rp = data;
2200	struct hci_conn *conn;
2201
2202	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2203
2204	if (rp->status)
2205		return rp->status;
2206
2207	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2208	if (!sent)
2209		return rp->status;
2210
2211	hci_dev_lock(hdev);
2212
2213	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2214	if (!conn)
2215		goto unlock;
2216
2217	switch (sent->type) {
2218	case 0x00:
2219		conn->tx_power = rp->tx_power;
2220		break;
2221	case 0x01:
2222		conn->max_tx_power = rp->tx_power;
2223		break;
2224	}
2225
2226unlock:
2227	hci_dev_unlock(hdev);
2228	return rp->status;
2229}
2230
2231static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2232				      struct sk_buff *skb)
2233{
2234	struct hci_ev_status *rp = data;
2235	u8 *mode;
2236
2237	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2238
2239	if (rp->status)
2240		return rp->status;
2241
2242	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2243	if (mode)
2244		hdev->ssp_debug_mode = *mode;
2245
2246	return rp->status;
2247}
2248
2249static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2250{
2251	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2252
2253	if (status)
 
2254		return;
 
2255
2256	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2257		set_bit(HCI_INQUIRY, &hdev->flags);
2258}
2259
2260static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2261{
2262	struct hci_cp_create_conn *cp;
2263	struct hci_conn *conn;
2264
2265	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2266
2267	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2268	if (!cp)
2269		return;
2270
2271	hci_dev_lock(hdev);
2272
2273	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2274
2275	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2276
2277	if (status) {
2278		if (conn && conn->state == BT_CONNECT) {
2279			conn->state = BT_CLOSED;
2280			hci_connect_cfm(conn, status);
2281			hci_conn_del(conn);
 
 
 
2282		}
2283	} else {
2284		if (!conn) {
2285			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2286						  HCI_ROLE_MASTER);
2287			if (IS_ERR(conn))
2288				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2289		}
2290	}
2291
2292	hci_dev_unlock(hdev);
2293}
2294
2295static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2296{
2297	struct hci_cp_add_sco *cp;
2298	struct hci_conn *acl;
2299	struct hci_link *link;
2300	__u16 handle;
2301
2302	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2303
2304	if (!status)
2305		return;
2306
2307	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2308	if (!cp)
2309		return;
2310
2311	handle = __le16_to_cpu(cp->handle);
2312
2313	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2314
2315	hci_dev_lock(hdev);
2316
2317	acl = hci_conn_hash_lookup_handle(hdev, handle);
2318	if (acl) {
2319		link = list_first_entry_or_null(&acl->link_list,
2320						struct hci_link, list);
2321		if (link && link->conn) {
2322			link->conn->state = BT_CLOSED;
2323
2324			hci_connect_cfm(link->conn, status);
2325			hci_conn_del(link->conn);
2326		}
2327	}
2328
2329	hci_dev_unlock(hdev);
2330}
2331
2332static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2333{
2334	struct hci_cp_auth_requested *cp;
2335	struct hci_conn *conn;
2336
2337	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2338
2339	if (!status)
2340		return;
2341
2342	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2343	if (!cp)
2344		return;
2345
2346	hci_dev_lock(hdev);
2347
2348	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2349	if (conn) {
2350		if (conn->state == BT_CONFIG) {
2351			hci_connect_cfm(conn, status);
2352			hci_conn_drop(conn);
2353		}
2354	}
2355
2356	hci_dev_unlock(hdev);
2357}
2358
2359static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2360{
2361	struct hci_cp_set_conn_encrypt *cp;
2362	struct hci_conn *conn;
2363
2364	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2365
2366	if (!status)
2367		return;
2368
2369	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2370	if (!cp)
2371		return;
2372
2373	hci_dev_lock(hdev);
2374
2375	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2376	if (conn) {
2377		if (conn->state == BT_CONFIG) {
2378			hci_connect_cfm(conn, status);
2379			hci_conn_drop(conn);
2380		}
2381	}
2382
2383	hci_dev_unlock(hdev);
2384}
2385
2386static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2387				    struct hci_conn *conn)
2388{
2389	if (conn->state != BT_CONFIG || !conn->out)
2390		return 0;
2391
2392	if (conn->pending_sec_level == BT_SECURITY_SDP)
2393		return 0;
2394
2395	/* Only request authentication for SSP connections or non-SSP
2396	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2397	 * is requested.
2398	 */
2399	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2400	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2401	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2402	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2403		return 0;
2404
2405	return 1;
2406}
2407
2408static int hci_resolve_name(struct hci_dev *hdev,
2409				   struct inquiry_entry *e)
2410{
2411	struct hci_cp_remote_name_req cp;
2412
2413	memset(&cp, 0, sizeof(cp));
2414
2415	bacpy(&cp.bdaddr, &e->data.bdaddr);
2416	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2417	cp.pscan_mode = e->data.pscan_mode;
2418	cp.clock_offset = e->data.clock_offset;
2419
2420	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2421}
2422
2423static bool hci_resolve_next_name(struct hci_dev *hdev)
2424{
2425	struct discovery_state *discov = &hdev->discovery;
2426	struct inquiry_entry *e;
2427
2428	if (list_empty(&discov->resolve))
2429		return false;
2430
2431	/* We should stop if we already spent too much time resolving names. */
2432	if (time_after(jiffies, discov->name_resolve_timeout)) {
2433		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2434		return false;
2435	}
2436
2437	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2438	if (!e)
2439		return false;
2440
2441	if (hci_resolve_name(hdev, e) == 0) {
2442		e->name_state = NAME_PENDING;
2443		return true;
2444	}
2445
2446	return false;
2447}
2448
2449static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2450				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2451{
2452	struct discovery_state *discov = &hdev->discovery;
2453	struct inquiry_entry *e;
2454
2455	/* Update the mgmt connected state if necessary. Be careful with
2456	 * conn objects that exist but are not (yet) connected however.
2457	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2458	 * considered connected.
2459	 */
2460	if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2461		mgmt_device_connected(hdev, conn, name, name_len);
 
 
2462
2463	if (discov->state == DISCOVERY_STOPPED)
2464		return;
2465
2466	if (discov->state == DISCOVERY_STOPPING)
2467		goto discov_complete;
2468
2469	if (discov->state != DISCOVERY_RESOLVING)
2470		return;
2471
2472	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2473	/* If the device was not found in a list of found devices names of which
2474	 * are pending. there is no need to continue resolving a next name as it
2475	 * will be done upon receiving another Remote Name Request Complete
2476	 * Event */
2477	if (!e)
2478		return;
2479
2480	list_del(&e->list);
2481
2482	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2483	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2484			 name, name_len);
 
 
 
2485
2486	if (hci_resolve_next_name(hdev))
2487		return;
2488
2489discov_complete:
2490	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2491}
2492
2493static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2494{
2495	struct hci_cp_remote_name_req *cp;
2496	struct hci_conn *conn;
2497
2498	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2499
2500	/* If successful wait for the name req complete event before
2501	 * checking for the need to do authentication */
2502	if (!status)
2503		return;
2504
2505	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2506	if (!cp)
2507		return;
2508
2509	hci_dev_lock(hdev);
2510
2511	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2512
2513	if (hci_dev_test_flag(hdev, HCI_MGMT))
2514		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2515
2516	if (!conn)
2517		goto unlock;
2518
2519	if (!hci_outgoing_auth_needed(hdev, conn))
2520		goto unlock;
2521
2522	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2523		struct hci_cp_auth_requested auth_cp;
2524
2525		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2526
2527		auth_cp.handle = __cpu_to_le16(conn->handle);
2528		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2529			     sizeof(auth_cp), &auth_cp);
2530	}
2531
2532unlock:
2533	hci_dev_unlock(hdev);
2534}
2535
2536static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2537{
2538	struct hci_cp_read_remote_features *cp;
2539	struct hci_conn *conn;
2540
2541	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2542
2543	if (!status)
2544		return;
2545
2546	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2547	if (!cp)
2548		return;
2549
2550	hci_dev_lock(hdev);
2551
2552	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2553	if (conn) {
2554		if (conn->state == BT_CONFIG) {
2555			hci_connect_cfm(conn, status);
2556			hci_conn_drop(conn);
2557		}
2558	}
2559
2560	hci_dev_unlock(hdev);
2561}
2562
2563static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2564{
2565	struct hci_cp_read_remote_ext_features *cp;
2566	struct hci_conn *conn;
2567
2568	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2569
2570	if (!status)
2571		return;
2572
2573	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2574	if (!cp)
2575		return;
2576
2577	hci_dev_lock(hdev);
2578
2579	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2580	if (conn) {
2581		if (conn->state == BT_CONFIG) {
2582			hci_connect_cfm(conn, status);
2583			hci_conn_drop(conn);
2584		}
2585	}
2586
2587	hci_dev_unlock(hdev);
2588}
2589
2590static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2591				       __u8 status)
2592{
2593	struct hci_conn *acl;
2594	struct hci_link *link;
2595
2596	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2597
2598	hci_dev_lock(hdev);
2599
2600	acl = hci_conn_hash_lookup_handle(hdev, handle);
2601	if (acl) {
2602		link = list_first_entry_or_null(&acl->link_list,
2603						struct hci_link, list);
2604		if (link && link->conn) {
2605			link->conn->state = BT_CLOSED;
2606
2607			hci_connect_cfm(link->conn, status);
2608			hci_conn_del(link->conn);
2609		}
2610	}
2611
2612	hci_dev_unlock(hdev);
2613}
2614
2615static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2616{
2617	struct hci_cp_setup_sync_conn *cp;
 
 
2618
2619	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2620
2621	if (!status)
2622		return;
2623
2624	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2625	if (!cp)
2626		return;
2627
2628	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2629}
2630
2631static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2632{
2633	struct hci_cp_enhanced_setup_sync_conn *cp;
2634
2635	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2636
2637	if (!status)
2638		return;
 
 
 
2639
2640	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2641	if (!cp)
2642		return;
 
2643
2644	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2645}
2646
2647static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2648{
2649	struct hci_cp_sniff_mode *cp;
2650	struct hci_conn *conn;
2651
2652	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2653
2654	if (!status)
2655		return;
2656
2657	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2658	if (!cp)
2659		return;
2660
2661	hci_dev_lock(hdev);
2662
2663	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2664	if (conn) {
2665		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2666
2667		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2668			hci_sco_setup(conn, status);
2669	}
2670
2671	hci_dev_unlock(hdev);
2672}
2673
2674static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2675{
2676	struct hci_cp_exit_sniff_mode *cp;
2677	struct hci_conn *conn;
2678
2679	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2680
2681	if (!status)
2682		return;
2683
2684	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2685	if (!cp)
2686		return;
2687
2688	hci_dev_lock(hdev);
2689
2690	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2691	if (conn) {
2692		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2693
2694		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2695			hci_sco_setup(conn, status);
2696	}
2697
2698	hci_dev_unlock(hdev);
2699}
2700
2701static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2702{
2703	struct hci_cp_disconnect *cp;
2704	struct hci_conn_params *params;
2705	struct hci_conn *conn;
2706	bool mgmt_conn;
2707
2708	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2709
2710	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2711	 * otherwise cleanup the connection immediately.
2712	 */
2713	if (!status && !hdev->suspended)
2714		return;
2715
2716	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2717	if (!cp)
2718		return;
2719
2720	hci_dev_lock(hdev);
2721
2722	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2723	if (!conn)
2724		goto unlock;
2725
2726	if (status) {
2727		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2728				       conn->dst_type, status);
2729
2730		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2731			hdev->cur_adv_instance = conn->adv_instance;
2732			hci_enable_advertising(hdev);
2733		}
2734
2735		/* Inform sockets conn is gone before we delete it */
2736		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2737
2738		goto done;
2739	}
2740
2741	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2742
2743	if (conn->type == ACL_LINK) {
2744		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2745			hci_remove_link_key(hdev, &conn->dst);
2746	}
2747
2748	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2749	if (params) {
2750		switch (params->auto_connect) {
2751		case HCI_AUTO_CONN_LINK_LOSS:
2752			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2753				break;
2754			fallthrough;
2755
2756		case HCI_AUTO_CONN_DIRECT:
2757		case HCI_AUTO_CONN_ALWAYS:
2758			hci_pend_le_list_del_init(params);
2759			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2760			break;
2761
2762		default:
2763			break;
2764		}
2765	}
2766
2767	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2768				 cp->reason, mgmt_conn);
2769
2770	hci_disconn_cfm(conn, cp->reason);
2771
2772done:
2773	/* If the disconnection failed for any reason, the upper layer
2774	 * does not retry to disconnect in current implementation.
2775	 * Hence, we need to do some basic cleanup here and re-enable
2776	 * advertising if necessary.
2777	 */
2778	hci_conn_del(conn);
2779unlock:
2780	hci_dev_unlock(hdev);
2781}
2782
2783static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2784{
2785	/* When using controller based address resolution, then the new
2786	 * address types 0x02 and 0x03 are used. These types need to be
2787	 * converted back into either public address or random address type
2788	 */
2789	switch (type) {
2790	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2791		if (resolved)
2792			*resolved = true;
2793		return ADDR_LE_DEV_PUBLIC;
2794	case ADDR_LE_DEV_RANDOM_RESOLVED:
2795		if (resolved)
2796			*resolved = true;
2797		return ADDR_LE_DEV_RANDOM;
2798	}
2799
2800	if (resolved)
2801		*resolved = false;
2802	return type;
2803}
2804
2805static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2806			      u8 peer_addr_type, u8 own_address_type,
2807			      u8 filter_policy)
2808{
2809	struct hci_conn *conn;
2810
2811	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2812				       peer_addr_type);
2813	if (!conn)
2814		return;
2815
2816	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2817
2818	/* Store the initiator and responder address information which
2819	 * is needed for SMP. These values will not change during the
2820	 * lifetime of the connection.
2821	 */
2822	conn->init_addr_type = own_address_type;
2823	if (own_address_type == ADDR_LE_DEV_RANDOM)
2824		bacpy(&conn->init_addr, &hdev->random_addr);
2825	else
2826		bacpy(&conn->init_addr, &hdev->bdaddr);
2827
2828	conn->resp_addr_type = peer_addr_type;
2829	bacpy(&conn->resp_addr, peer_addr);
2830}
2831
2832static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2833{
2834	struct hci_cp_le_create_conn *cp;
 
2835
2836	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2837
2838	/* All connection failure handling is taken care of by the
2839	 * hci_conn_failed function which is triggered by the HCI
2840	 * request completion callbacks used for connecting.
2841	 */
2842	if (status)
2843		return;
2844
2845	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2846	if (!cp)
2847		return;
2848
2849	hci_dev_lock(hdev);
2850
2851	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2852			  cp->own_address_type, cp->filter_policy);
 
 
2853
2854	hci_dev_unlock(hdev);
2855}
2856
2857static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2858{
2859	struct hci_cp_le_ext_create_conn *cp;
2860
2861	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2862
2863	/* All connection failure handling is taken care of by the
2864	 * hci_conn_failed function which is triggered by the HCI
2865	 * request completion callbacks used for connecting.
2866	 */
2867	if (status)
2868		return;
 
 
 
2869
2870	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2871	if (!cp)
2872		return;
2873
2874	hci_dev_lock(hdev);
2875
2876	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2877			  cp->own_addr_type, cp->filter_policy);
 
 
 
 
 
2878
 
2879	hci_dev_unlock(hdev);
2880}
2881
2882static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2883{
2884	struct hci_cp_le_read_remote_features *cp;
2885	struct hci_conn *conn;
2886
2887	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2888
2889	if (!status)
2890		return;
2891
2892	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2893	if (!cp)
2894		return;
2895
2896	hci_dev_lock(hdev);
2897
2898	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2899	if (conn) {
2900		if (conn->state == BT_CONFIG) {
2901			hci_connect_cfm(conn, status);
2902			hci_conn_drop(conn);
2903		}
2904	}
2905
2906	hci_dev_unlock(hdev);
2907}
2908
2909static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2910{
2911	struct hci_cp_le_start_enc *cp;
2912	struct hci_conn *conn;
2913
2914	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2915
2916	if (!status)
2917		return;
2918
2919	hci_dev_lock(hdev);
2920
2921	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2922	if (!cp)
2923		goto unlock;
2924
2925	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2926	if (!conn)
2927		goto unlock;
2928
2929	if (conn->state != BT_CONNECTED)
2930		goto unlock;
2931
2932	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2933	hci_conn_drop(conn);
2934
2935unlock:
2936	hci_dev_unlock(hdev);
2937}
2938
2939static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2940{
2941	struct hci_cp_switch_role *cp;
2942	struct hci_conn *conn;
2943
2944	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2945
2946	if (!status)
2947		return;
2948
2949	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2950	if (!cp)
2951		return;
2952
2953	hci_dev_lock(hdev);
2954
2955	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2956	if (conn)
2957		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2958
2959	hci_dev_unlock(hdev);
2960}
2961
2962static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2963				     struct sk_buff *skb)
2964{
2965	struct hci_ev_status *ev = data;
2966	struct discovery_state *discov = &hdev->discovery;
2967	struct inquiry_entry *e;
2968
2969	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
 
 
2970
2971	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2972		return;
2973
2974	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2975	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2976
2977	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2978		return;
2979
2980	hci_dev_lock(hdev);
2981
2982	if (discov->state != DISCOVERY_FINDING)
2983		goto unlock;
2984
2985	if (list_empty(&discov->resolve)) {
2986		/* When BR/EDR inquiry is active and no LE scanning is in
2987		 * progress, then change discovery state to indicate completion.
2988		 *
2989		 * When running LE scanning and BR/EDR inquiry simultaneously
2990		 * and the LE scan already finished, then change the discovery
2991		 * state to indicate completion.
2992		 */
2993		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2994		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2995			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2996		goto unlock;
2997	}
2998
2999	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3000	if (e && hci_resolve_name(hdev, e) == 0) {
3001		e->name_state = NAME_PENDING;
3002		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3003		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3004	} else {
3005		/* When BR/EDR inquiry is active and no LE scanning is in
3006		 * progress, then change discovery state to indicate completion.
3007		 *
3008		 * When running LE scanning and BR/EDR inquiry simultaneously
3009		 * and the LE scan already finished, then change the discovery
3010		 * state to indicate completion.
3011		 */
3012		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3013		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3014			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3015	}
3016
3017unlock:
3018	hci_dev_unlock(hdev);
3019}
3020
3021static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3022				   struct sk_buff *skb)
3023{
3024	struct hci_ev_inquiry_result *ev = edata;
3025	struct inquiry_data data;
3026	int i;
3027
3028	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3029			     flex_array_size(ev, info, ev->num)))
3030		return;
3031
3032	bt_dev_dbg(hdev, "num %d", ev->num);
3033
3034	if (!ev->num)
3035		return;
3036
3037	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3038		return;
3039
3040	hci_dev_lock(hdev);
3041
3042	for (i = 0; i < ev->num; i++) {
3043		struct inquiry_info *info = &ev->info[i];
3044		u32 flags;
3045
3046		bacpy(&data.bdaddr, &info->bdaddr);
3047		data.pscan_rep_mode	= info->pscan_rep_mode;
3048		data.pscan_period_mode	= info->pscan_period_mode;
3049		data.pscan_mode		= info->pscan_mode;
3050		memcpy(data.dev_class, info->dev_class, 3);
3051		data.clock_offset	= info->clock_offset;
3052		data.rssi		= HCI_RSSI_INVALID;
3053		data.ssp_mode		= 0x00;
3054
3055		flags = hci_inquiry_cache_update(hdev, &data, false);
3056
3057		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3058				  info->dev_class, HCI_RSSI_INVALID,
3059				  flags, NULL, 0, NULL, 0, 0);
3060	}
3061
3062	hci_dev_unlock(hdev);
3063}
3064
3065static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3066				  struct sk_buff *skb)
3067{
3068	struct hci_ev_conn_complete *ev = data;
3069	struct hci_conn *conn;
3070	u8 status = ev->status;
3071
3072	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3073
3074	hci_dev_lock(hdev);
3075
3076	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3077	if (!conn) {
3078		/* In case of error status and there is no connection pending
3079		 * just unlock as there is nothing to cleanup.
3080		 */
3081		if (ev->status)
3082			goto unlock;
3083
3084		/* Connection may not exist if auto-connected. Check the bredr
3085		 * allowlist to see if this device is allowed to auto connect.
3086		 * If link is an ACL type, create a connection class
3087		 * automatically.
3088		 *
3089		 * Auto-connect will only occur if the event filter is
3090		 * programmed with a given address. Right now, event filter is
3091		 * only used during suspend.
3092		 */
3093		if (ev->link_type == ACL_LINK &&
3094		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3095						      &ev->bdaddr,
3096						      BDADDR_BREDR)) {
3097			conn = hci_conn_add_unset(hdev, ev->link_type,
3098						  &ev->bdaddr, HCI_ROLE_SLAVE);
3099			if (IS_ERR(conn)) {
3100				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3101				goto unlock;
3102			}
3103		} else {
3104			if (ev->link_type != SCO_LINK)
3105				goto unlock;
3106
3107			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3108						       &ev->bdaddr);
3109			if (!conn)
3110				goto unlock;
3111
3112			conn->type = SCO_LINK;
3113		}
3114	}
3115
3116	/* The HCI_Connection_Complete event is only sent once per connection.
3117	 * Processing it more than once per connection can corrupt kernel memory.
3118	 *
3119	 * As the connection handle is set here for the first time, it indicates
3120	 * whether the connection is already set up.
3121	 */
3122	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3123		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3124		goto unlock;
3125	}
3126
3127	if (!status) {
3128		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3129		if (status)
3130			goto done;
3131
3132		if (conn->type == ACL_LINK) {
3133			conn->state = BT_CONFIG;
3134			hci_conn_hold(conn);
3135
3136			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3137			    !hci_find_link_key(hdev, &ev->bdaddr))
3138				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3139			else
3140				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3141		} else
3142			conn->state = BT_CONNECTED;
3143
3144		hci_debugfs_create_conn(conn);
3145		hci_conn_add_sysfs(conn);
3146
3147		if (test_bit(HCI_AUTH, &hdev->flags))
3148			set_bit(HCI_CONN_AUTH, &conn->flags);
3149
3150		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3151			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3152
3153		/* "Link key request" completed ahead of "connect request" completes */
3154		if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3155		    ev->link_type == ACL_LINK) {
3156			struct link_key *key;
3157			struct hci_cp_read_enc_key_size cp;
3158
3159			key = hci_find_link_key(hdev, &ev->bdaddr);
3160			if (key) {
3161				set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3162
3163				if (!read_key_size_capable(hdev)) {
3164					conn->enc_key_size = HCI_LINK_KEY_SIZE;
3165				} else {
3166					cp.handle = cpu_to_le16(conn->handle);
3167					if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3168							 sizeof(cp), &cp)) {
3169						bt_dev_err(hdev, "sending read key size failed");
3170						conn->enc_key_size = HCI_LINK_KEY_SIZE;
3171					}
3172				}
3173
3174				hci_encrypt_cfm(conn, ev->status);
3175			}
3176		}
3177
3178		/* Get remote features */
3179		if (conn->type == ACL_LINK) {
3180			struct hci_cp_read_remote_features cp;
3181			cp.handle = ev->handle;
3182			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3183				     sizeof(cp), &cp);
3184
3185			hci_update_scan(hdev);
3186		}
3187
3188		/* Set packet type for incoming connection */
3189		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3190			struct hci_cp_change_conn_ptype cp;
3191			cp.handle = ev->handle;
3192			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3193			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3194				     &cp);
3195		}
 
 
 
 
 
3196	}
3197
3198	if (conn->type == ACL_LINK)
3199		hci_sco_setup(conn, ev->status);
3200
3201done:
3202	if (status) {
3203		hci_conn_failed(conn, status);
3204	} else if (ev->link_type == SCO_LINK) {
3205		switch (conn->setting & SCO_AIRMODE_MASK) {
3206		case SCO_AIRMODE_CVSD:
3207			if (hdev->notify)
3208				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3209			break;
3210		}
3211
3212		hci_connect_cfm(conn, status);
3213	}
3214
3215unlock:
3216	hci_dev_unlock(hdev);
 
 
3217}
3218
3219static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3220{
3221	struct hci_cp_reject_conn_req cp;
3222
3223	bacpy(&cp.bdaddr, bdaddr);
3224	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3225	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3226}
3227
3228static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3229				 struct sk_buff *skb)
3230{
3231	struct hci_ev_conn_request *ev = data;
3232	int mask = hdev->link_mode;
3233	struct inquiry_entry *ie;
3234	struct hci_conn *conn;
3235	__u8 flags = 0;
3236
3237	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3238
3239	/* Reject incoming connection from device with same BD ADDR against
3240	 * CVE-2020-26555
3241	 */
3242	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3243		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3244			   &ev->bdaddr);
3245		hci_reject_conn(hdev, &ev->bdaddr);
3246		return;
3247	}
3248
3249	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3250				      &flags);
3251
3252	if (!(mask & HCI_LM_ACCEPT)) {
3253		hci_reject_conn(hdev, &ev->bdaddr);
3254		return;
3255	}
3256
3257	hci_dev_lock(hdev);
3258
3259	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3260				   BDADDR_BREDR)) {
3261		hci_reject_conn(hdev, &ev->bdaddr);
3262		goto unlock;
3263	}
3264
3265	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3266	 * connection. These features are only touched through mgmt so
3267	 * only do the checks if HCI_MGMT is set.
3268	 */
3269	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3270	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3271	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3272					       BDADDR_BREDR)) {
3273		hci_reject_conn(hdev, &ev->bdaddr);
3274		goto unlock;
3275	}
3276
3277	/* Connection accepted */
3278
 
 
3279	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3280	if (ie)
3281		memcpy(ie->data.dev_class, ev->dev_class, 3);
3282
3283	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3284			&ev->bdaddr);
3285	if (!conn) {
3286		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3287					  HCI_ROLE_SLAVE);
3288		if (IS_ERR(conn)) {
3289			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3290			goto unlock;
 
3291		}
3292	}
3293
3294	memcpy(conn->dev_class, ev->dev_class, 3);
3295
3296	hci_dev_unlock(hdev);
3297
3298	if (ev->link_type == ACL_LINK ||
3299	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3300		struct hci_cp_accept_conn_req cp;
3301		conn->state = BT_CONNECT;
3302
3303		bacpy(&cp.bdaddr, &ev->bdaddr);
3304
3305		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3306			cp.role = 0x00; /* Become central */
3307		else
3308			cp.role = 0x01; /* Remain peripheral */
3309
3310		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3311	} else if (!(flags & HCI_PROTO_DEFER)) {
3312		struct hci_cp_accept_sync_conn_req cp;
3313		conn->state = BT_CONNECT;
3314
3315		bacpy(&cp.bdaddr, &ev->bdaddr);
3316		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3317
3318		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3319		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3320		cp.max_latency    = cpu_to_le16(0xffff);
3321		cp.content_format = cpu_to_le16(hdev->voice_setting);
3322		cp.retrans_effort = 0xff;
3323
3324		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3325			     &cp);
3326	} else {
3327		conn->state = BT_CONNECT2;
3328		hci_connect_cfm(conn, 0);
3329	}
3330
3331	return;
3332unlock:
3333	hci_dev_unlock(hdev);
3334}
3335
3336static u8 hci_to_mgmt_reason(u8 err)
3337{
3338	switch (err) {
3339	case HCI_ERROR_CONNECTION_TIMEOUT:
3340		return MGMT_DEV_DISCONN_TIMEOUT;
3341	case HCI_ERROR_REMOTE_USER_TERM:
3342	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3343	case HCI_ERROR_REMOTE_POWER_OFF:
3344		return MGMT_DEV_DISCONN_REMOTE;
3345	case HCI_ERROR_LOCAL_HOST_TERM:
3346		return MGMT_DEV_DISCONN_LOCAL_HOST;
3347	default:
3348		return MGMT_DEV_DISCONN_UNKNOWN;
3349	}
3350}
3351
3352static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3353				     struct sk_buff *skb)
3354{
3355	struct hci_ev_disconn_complete *ev = data;
3356	u8 reason;
3357	struct hci_conn_params *params;
3358	struct hci_conn *conn;
3359	bool mgmt_connected;
 
3360
3361	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3362
3363	hci_dev_lock(hdev);
3364
3365	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3366	if (!conn)
3367		goto unlock;
3368
3369	if (ev->status) {
3370		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3371				       conn->dst_type, ev->status);
3372		goto unlock;
3373	}
3374
3375	conn->state = BT_CLOSED;
3376
3377	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3378
3379	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3380		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3381	else
3382		reason = hci_to_mgmt_reason(ev->reason);
3383
3384	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3385				reason, mgmt_connected);
3386
3387	if (conn->type == ACL_LINK) {
3388		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3389			hci_remove_link_key(hdev, &conn->dst);
3390
3391		hci_update_scan(hdev);
3392	}
3393
3394	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3395	if (params) {
3396		switch (params->auto_connect) {
3397		case HCI_AUTO_CONN_LINK_LOSS:
3398			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3399				break;
3400			fallthrough;
3401
3402		case HCI_AUTO_CONN_DIRECT:
3403		case HCI_AUTO_CONN_ALWAYS:
3404			hci_pend_le_list_del_init(params);
3405			hci_pend_le_list_add(params, &hdev->pend_le_conns);
3406			hci_update_passive_scan(hdev);
3407			break;
3408
3409		default:
3410			break;
3411		}
3412	}
3413
 
 
3414	hci_disconn_cfm(conn, ev->reason);
 
3415
3416	/* Re-enable advertising if necessary, since it might
3417	 * have been disabled by the connection. From the
3418	 * HCI_LE_Set_Advertise_Enable command description in
3419	 * the core specification (v4.0):
3420	 * "The Controller shall continue advertising until the Host
3421	 * issues an LE_Set_Advertise_Enable command with
3422	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3423	 * or until a connection is created or until the Advertising
3424	 * is timed out due to Directed Advertising."
3425	 */
3426	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3427		hdev->cur_adv_instance = conn->adv_instance;
3428		hci_enable_advertising(hdev);
3429	}
3430
3431	hci_conn_del(conn);
3432
3433unlock:
3434	hci_dev_unlock(hdev);
3435}
3436
3437static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3438				  struct sk_buff *skb)
3439{
3440	struct hci_ev_auth_complete *ev = data;
3441	struct hci_conn *conn;
3442
3443	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3444
3445	hci_dev_lock(hdev);
3446
3447	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3448	if (!conn)
3449		goto unlock;
3450
3451	if (!ev->status) {
3452		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3453		set_bit(HCI_CONN_AUTH, &conn->flags);
3454		conn->sec_level = conn->pending_sec_level;
 
 
 
 
3455	} else {
3456		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3457			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3458
3459		mgmt_auth_failed(conn, ev->status);
3460	}
3461
3462	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
 
3463
3464	if (conn->state == BT_CONFIG) {
3465		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3466			struct hci_cp_set_conn_encrypt cp;
3467			cp.handle  = ev->handle;
3468			cp.encrypt = 0x01;
3469			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3470				     &cp);
3471		} else {
3472			conn->state = BT_CONNECTED;
3473			hci_connect_cfm(conn, ev->status);
3474			hci_conn_drop(conn);
3475		}
3476	} else {
3477		hci_auth_cfm(conn, ev->status);
3478
3479		hci_conn_hold(conn);
3480		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3481		hci_conn_drop(conn);
3482	}
3483
3484	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3485		if (!ev->status) {
3486			struct hci_cp_set_conn_encrypt cp;
3487			cp.handle  = ev->handle;
3488			cp.encrypt = 0x01;
3489			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3490				     &cp);
3491		} else {
3492			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3493			hci_encrypt_cfm(conn, ev->status);
3494		}
3495	}
3496
3497unlock:
3498	hci_dev_unlock(hdev);
3499}
3500
3501static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3502				struct sk_buff *skb)
3503{
3504	struct hci_ev_remote_name *ev = data;
3505	struct hci_conn *conn;
3506
3507	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
 
 
3508
3509	hci_dev_lock(hdev);
3510
3511	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3512
3513	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3514		goto check_auth;
3515
3516	if (ev->status == 0)
3517		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3518				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3519	else
3520		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3521
3522check_auth:
3523	if (!conn)
3524		goto unlock;
3525
3526	if (!hci_outgoing_auth_needed(hdev, conn))
3527		goto unlock;
3528
3529	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3530		struct hci_cp_auth_requested cp;
3531
3532		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3533
3534		cp.handle = __cpu_to_le16(conn->handle);
3535		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3536	}
3537
3538unlock:
3539	hci_dev_unlock(hdev);
3540}
3541
3542static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3543				   struct sk_buff *skb)
3544{
3545	struct hci_ev_encrypt_change *ev = data;
3546	struct hci_conn *conn;
 
3547
3548	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3549
3550	hci_dev_lock(hdev);
3551
3552	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3553	if (!conn)
3554		goto unlock;
3555
3556	if (!ev->status) {
3557		if (ev->encrypt) {
3558			/* Encryption implies authentication */
3559			set_bit(HCI_CONN_AUTH, &conn->flags);
3560			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3561			conn->sec_level = conn->pending_sec_level;
3562
3563			/* P-256 authentication key implies FIPS */
3564			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3565				set_bit(HCI_CONN_FIPS, &conn->flags);
3566
3567			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3568			    conn->type == LE_LINK)
3569				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3570		} else {
3571			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3572			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3573		}
3574	}
3575
3576	/* We should disregard the current RPA and generate a new one
3577	 * whenever the encryption procedure fails.
3578	 */
3579	if (ev->status && conn->type == LE_LINK) {
3580		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3581		hci_adv_instances_set_rpa_expired(hdev, true);
3582	}
3583
3584	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3585
3586	/* Check link security requirements are met */
3587	if (!hci_conn_check_link_mode(conn))
3588		ev->status = HCI_ERROR_AUTH_FAILURE;
3589
3590	if (ev->status && conn->state == BT_CONNECTED) {
3591		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3592			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
 
 
3593
3594		/* Notify upper layers so they can cleanup before
3595		 * disconnecting.
3596		 */
3597		hci_encrypt_cfm(conn, ev->status);
3598		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
 
 
 
3599		hci_conn_drop(conn);
3600		goto unlock;
3601	}
3602
3603	/* Try reading the encryption key size for encrypted ACL links */
3604	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3605		struct hci_cp_read_enc_key_size cp;
 
3606
3607		/* Only send HCI_Read_Encryption_Key_Size if the
3608		 * controller really supports it. If it doesn't, assume
3609		 * the default size (16).
3610		 */
3611		if (!read_key_size_capable(hdev)) {
3612			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3613			goto notify;
3614		}
3615
 
 
3616		cp.handle = cpu_to_le16(conn->handle);
3617		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3618				 sizeof(cp), &cp)) {
3619			bt_dev_err(hdev, "sending read key size failed");
 
3620			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3621			goto notify;
3622		}
3623
3624		goto unlock;
3625	}
3626
3627	/* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
3628	 * to avoid unexpected SMP command errors when pairing.
3629	 */
3630	if (test_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
3631		     &hdev->quirks))
3632		goto notify;
3633
3634	/* Set the default Authenticated Payload Timeout after
3635	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3636	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3637	 * sent when the link is active and Encryption is enabled, the conn
3638	 * type can be either LE or ACL and controller must support LMP Ping.
3639	 * Ensure for AES-CCM encryption as well.
3640	 */
3641	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3642	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3643	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3644	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3645		struct hci_cp_write_auth_payload_to cp;
3646
3647		cp.handle = cpu_to_le16(conn->handle);
3648		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3649		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3650				 sizeof(cp), &cp))
3651			bt_dev_err(hdev, "write auth payload timeout failed");
3652	}
3653
3654notify:
3655	hci_encrypt_cfm(conn, ev->status);
3656
3657unlock:
3658	hci_dev_unlock(hdev);
3659}
3660
3661static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3662					     struct sk_buff *skb)
3663{
3664	struct hci_ev_change_link_key_complete *ev = data;
3665	struct hci_conn *conn;
3666
3667	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3668
3669	hci_dev_lock(hdev);
3670
3671	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3672	if (conn) {
3673		if (!ev->status)
3674			set_bit(HCI_CONN_SECURE, &conn->flags);
3675
3676		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3677
3678		hci_key_change_cfm(conn, ev->status);
3679	}
3680
3681	hci_dev_unlock(hdev);
3682}
3683
3684static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3685				    struct sk_buff *skb)
3686{
3687	struct hci_ev_remote_features *ev = data;
3688	struct hci_conn *conn;
3689
3690	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3691
3692	hci_dev_lock(hdev);
3693
3694	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3695	if (!conn)
3696		goto unlock;
3697
3698	if (!ev->status)
3699		memcpy(conn->features[0], ev->features, 8);
3700
3701	if (conn->state != BT_CONFIG)
3702		goto unlock;
3703
3704	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3705	    lmp_ext_feat_capable(conn)) {
3706		struct hci_cp_read_remote_ext_features cp;
3707		cp.handle = ev->handle;
3708		cp.page = 0x01;
3709		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3710			     sizeof(cp), &cp);
3711		goto unlock;
3712	}
3713
3714	if (!ev->status) {
3715		struct hci_cp_remote_name_req cp;
3716		memset(&cp, 0, sizeof(cp));
3717		bacpy(&cp.bdaddr, &conn->dst);
3718		cp.pscan_rep_mode = 0x02;
3719		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3720	} else {
3721		mgmt_device_connected(hdev, conn, NULL, 0);
3722	}
3723
3724	if (!hci_outgoing_auth_needed(hdev, conn)) {
3725		conn->state = BT_CONNECTED;
3726		hci_connect_cfm(conn, ev->status);
3727		hci_conn_drop(conn);
3728	}
3729
3730unlock:
3731	hci_dev_unlock(hdev);
3732}
3733
3734static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
 
 
 
3735{
3736	cancel_delayed_work(&hdev->cmd_timer);
 
 
 
3737
3738	rcu_read_lock();
3739	if (!test_bit(HCI_RESET, &hdev->flags)) {
3740		if (ncmd) {
3741			cancel_delayed_work(&hdev->ncmd_timer);
3742			atomic_set(&hdev->cmd_cnt, 1);
3743		} else {
3744			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3745				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3746						   HCI_NCMD_TIMEOUT);
3747		}
3748	}
3749	rcu_read_unlock();
3750}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3751
3752static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3753					struct sk_buff *skb)
3754{
3755	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3756
3757	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
3758
3759	if (rp->status)
3760		return rp->status;
 
3761
3762	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3763	hdev->le_pkts  = rp->acl_max_pkt;
3764	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3765	hdev->iso_pkts = rp->iso_max_pkt;
3766
3767	hdev->le_cnt  = hdev->le_pkts;
3768	hdev->iso_cnt = hdev->iso_pkts;
 
3769
3770	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3771	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
 
3772
3773	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3774		return HCI_ERROR_INVALID_PARAMETERS;
 
3775
3776	return rp->status;
3777}
 
 
 
 
 
3778
3779static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3780{
3781	struct hci_conn *conn, *tmp;
3782
3783	lockdep_assert_held(&hdev->lock);
 
 
3784
3785	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3786		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3787		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3788			continue;
3789
3790		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3791			hci_conn_failed(conn, status);
3792	}
3793}
3794
3795static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3796				   struct sk_buff *skb)
3797{
3798	struct hci_rp_le_set_cig_params *rp = data;
3799	struct hci_cp_le_set_cig_params *cp;
3800	struct hci_conn *conn;
3801	u8 status = rp->status;
3802	bool pending = false;
3803	int i;
3804
3805	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
3806
3807	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3808	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3809			    rp->cig_id != cp->cig_id)) {
3810		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3811		status = HCI_ERROR_UNSPECIFIED;
3812	}
3813
3814	hci_dev_lock(hdev);
 
 
3815
3816	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3817	 *
3818	 * If the Status return parameter is non-zero, then the state of the CIG
3819	 * and its CIS configurations shall not be changed by the command. If
3820	 * the CIG did not already exist, it shall not be created.
3821	 */
3822	if (status) {
3823		/* Keep current configuration, fail only the unbound CIS */
3824		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3825		goto unlock;
3826	}
3827
3828	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3829	 *
3830	 * If the Status return parameter is zero, then the Controller shall
3831	 * set the Connection_Handle arrayed return parameter to the connection
3832	 * handle(s) corresponding to the CIS configurations specified in
3833	 * the CIS_IDs command parameter, in the same order.
3834	 */
3835	for (i = 0; i < rp->num_handles; ++i) {
3836		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3837						cp->cis[i].cis_id);
3838		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3839			continue;
3840
3841		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3842			continue;
 
3843
3844		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3845			continue;
 
3846
3847		if (conn->state == BT_CONNECT)
3848			pending = true;
3849	}
3850
3851unlock:
3852	if (pending)
3853		hci_le_create_cis_pending(hdev);
3854
3855	hci_dev_unlock(hdev);
 
 
3856
3857	return rp->status;
3858}
 
3859
3860static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3861				   struct sk_buff *skb)
3862{
3863	struct hci_rp_le_setup_iso_path *rp = data;
3864	struct hci_cp_le_setup_iso_path *cp;
3865	struct hci_conn *conn;
3866
3867	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
3868
3869	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3870	if (!cp)
3871		return rp->status;
3872
3873	hci_dev_lock(hdev);
 
 
3874
3875	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3876	if (!conn)
3877		goto unlock;
3878
3879	if (rp->status) {
3880		hci_connect_cfm(conn, rp->status);
3881		hci_conn_del(conn);
3882		goto unlock;
3883	}
3884
3885	switch (cp->direction) {
3886	/* Input (Host to Controller) */
3887	case 0x00:
3888		/* Only confirm connection if output only */
3889		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3890			hci_connect_cfm(conn, rp->status);
3891		break;
3892	/* Output (Controller to Host) */
3893	case 0x01:
3894		/* Confirm connection since conn->iso_qos is always configured
3895		 * last.
3896		 */
3897		hci_connect_cfm(conn, rp->status);
3898
3899		/* Notify device connected in case it is a BIG Sync */
3900		if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3901			mgmt_device_connected(hdev, conn, NULL, 0);
3902
 
 
3903		break;
3904	}
3905
3906unlock:
3907	hci_dev_unlock(hdev);
3908	return rp->status;
3909}
3910
3911static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3912{
3913	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3914}
3915
3916static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3917				   struct sk_buff *skb)
3918{
3919	struct hci_ev_status *rp = data;
3920	struct hci_cp_le_set_per_adv_params *cp;
3921
3922	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
3923
3924	if (rp->status)
3925		return rp->status;
 
3926
3927	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3928	if (!cp)
3929		return rp->status;
3930
3931	/* TODO: set the conn state */
3932	return rp->status;
3933}
3934
3935static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3936				       struct sk_buff *skb)
3937{
3938	struct hci_ev_status *rp = data;
3939	struct hci_cp_le_set_per_adv_enable *cp;
3940	struct adv_info *adv = NULL, *n;
3941	u8 per_adv_cnt = 0;
3942
3943	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
3944
3945	if (rp->status)
3946		return rp->status;
 
3947
3948	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3949	if (!cp)
3950		return rp->status;
3951
3952	hci_dev_lock(hdev);
 
 
3953
3954	adv = hci_find_adv_instance(hdev, cp->handle);
 
 
3955
3956	if (cp->enable) {
3957		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
 
3958
3959		if (adv)
3960			adv->enabled = true;
3961	} else {
3962		/* If just one instance was disabled check if there are
3963		 * any other instance enabled before clearing HCI_LE_PER_ADV.
3964		 * The current periodic adv instance will be marked as
3965		 * disabled once extended advertising is also disabled.
3966		 */
3967		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3968					 list) {
3969			if (adv->periodic && adv->enabled)
3970				per_adv_cnt++;
3971		}
3972
3973		if (per_adv_cnt > 1)
3974			goto unlock;
 
3975
3976		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3977	}
 
3978
3979unlock:
3980	hci_dev_unlock(hdev);
 
3981
3982	return rp->status;
3983}
 
3984
3985#define HCI_CC_VL(_op, _func, _min, _max) \
3986{ \
3987	.op = _op, \
3988	.func = _func, \
3989	.min_len = _min, \
3990	.max_len = _max, \
3991}
3992
3993#define HCI_CC(_op, _func, _len) \
3994	HCI_CC_VL(_op, _func, _len, _len)
3995
3996#define HCI_CC_STATUS(_op, _func) \
3997	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
3998
3999static const struct hci_cc {
4000	u16  op;
4001	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4002	u16  min_len;
4003	u16  max_len;
4004} hci_cc_table[] = {
4005	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4006	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4007	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4008	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4009		      hci_cc_remote_name_req_cancel),
4010	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4011	       sizeof(struct hci_rp_role_discovery)),
4012	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4013	       sizeof(struct hci_rp_read_link_policy)),
4014	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4015	       sizeof(struct hci_rp_write_link_policy)),
4016	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4017	       sizeof(struct hci_rp_read_def_link_policy)),
4018	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4019		      hci_cc_write_def_link_policy),
4020	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4021	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4022	       sizeof(struct hci_rp_read_stored_link_key)),
4023	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4024	       sizeof(struct hci_rp_delete_stored_link_key)),
4025	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4026	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4027	       sizeof(struct hci_rp_read_local_name)),
4028	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4029	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4030	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4031	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4032	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4033	       sizeof(struct hci_rp_read_class_of_dev)),
4034	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4035	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4036	       sizeof(struct hci_rp_read_voice_setting)),
4037	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4038	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4039	       sizeof(struct hci_rp_read_num_supported_iac)),
4040	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4041	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4042	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4043	       sizeof(struct hci_rp_read_auth_payload_to)),
4044	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4045	       sizeof(struct hci_rp_write_auth_payload_to)),
4046	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4047	       sizeof(struct hci_rp_read_local_version)),
4048	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4049	       sizeof(struct hci_rp_read_local_commands)),
4050	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4051	       sizeof(struct hci_rp_read_local_features)),
4052	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4053	       sizeof(struct hci_rp_read_local_ext_features)),
4054	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4055	       sizeof(struct hci_rp_read_buffer_size)),
4056	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4057	       sizeof(struct hci_rp_read_bd_addr)),
4058	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4059	       sizeof(struct hci_rp_read_local_pairing_opts)),
4060	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4061	       sizeof(struct hci_rp_read_page_scan_activity)),
4062	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4063		      hci_cc_write_page_scan_activity),
4064	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4065	       sizeof(struct hci_rp_read_page_scan_type)),
4066	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4067	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4068	       sizeof(struct hci_rp_read_clock)),
4069	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4070	       sizeof(struct hci_rp_read_enc_key_size)),
4071	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4072	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4073	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4074	       hci_cc_read_def_err_data_reporting,
4075	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4076	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4077		      hci_cc_write_def_err_data_reporting),
4078	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4079	       sizeof(struct hci_rp_pin_code_reply)),
4080	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4081	       sizeof(struct hci_rp_pin_code_neg_reply)),
4082	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4083	       sizeof(struct hci_rp_read_local_oob_data)),
4084	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4085	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4086	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4087	       sizeof(struct hci_rp_le_read_buffer_size)),
4088	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4089	       sizeof(struct hci_rp_le_read_local_features)),
4090	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4091	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4092	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4093	       sizeof(struct hci_rp_user_confirm_reply)),
4094	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4095	       sizeof(struct hci_rp_user_confirm_reply)),
4096	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4097	       sizeof(struct hci_rp_user_confirm_reply)),
4098	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4099	       sizeof(struct hci_rp_user_confirm_reply)),
4100	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4101	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4102	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4103	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4104	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4105	       hci_cc_le_read_accept_list_size,
4106	       sizeof(struct hci_rp_le_read_accept_list_size)),
4107	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4108	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4109		      hci_cc_le_add_to_accept_list),
4110	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4111		      hci_cc_le_del_from_accept_list),
4112	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4113	       sizeof(struct hci_rp_le_read_supported_states)),
4114	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4115	       sizeof(struct hci_rp_le_read_def_data_len)),
4116	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4117		      hci_cc_le_write_def_data_len),
4118	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4119		      hci_cc_le_add_to_resolv_list),
4120	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4121		      hci_cc_le_del_from_resolv_list),
4122	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4123		      hci_cc_le_clear_resolv_list),
4124	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4125	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4126	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4127		      hci_cc_le_set_addr_resolution_enable),
4128	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4129	       sizeof(struct hci_rp_le_read_max_data_len)),
4130	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4131		      hci_cc_write_le_host_supported),
4132	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4133	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4134	       sizeof(struct hci_rp_read_rssi)),
4135	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4136	       sizeof(struct hci_rp_read_tx_power)),
4137	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4138	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4139		      hci_cc_le_set_ext_scan_param),
4140	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4141		      hci_cc_le_set_ext_scan_enable),
4142	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4143	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4144	       hci_cc_le_read_num_adv_sets,
4145	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4146	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4147	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4148	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4149		      hci_cc_le_set_ext_adv_enable),
4150	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4151		      hci_cc_le_set_adv_set_random_addr),
4152	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4153	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4154	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4155	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4156		      hci_cc_le_set_per_adv_enable),
4157	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4158	       sizeof(struct hci_rp_le_read_transmit_power)),
4159	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4160	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4161	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4162	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4163		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4164	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4165	       sizeof(struct hci_rp_le_setup_iso_path)),
4166};
4167
4168static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4169		      struct sk_buff *skb)
4170{
4171	void *data;
4172
4173	if (skb->len < cc->min_len) {
4174		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4175			   cc->op, skb->len, cc->min_len);
4176		return HCI_ERROR_UNSPECIFIED;
4177	}
4178
4179	/* Just warn if the length is over max_len size it still be possible to
4180	 * partially parse the cc so leave to callback to decide if that is
4181	 * acceptable.
4182	 */
4183	if (skb->len > cc->max_len)
4184		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4185			    cc->op, skb->len, cc->max_len);
4186
4187	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4188	if (!data)
4189		return HCI_ERROR_UNSPECIFIED;
4190
4191	return cc->func(hdev, data, skb);
4192}
 
4193
4194static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4195				 struct sk_buff *skb, u16 *opcode, u8 *status,
4196				 hci_req_complete_t *req_complete,
4197				 hci_req_complete_skb_t *req_complete_skb)
4198{
4199	struct hci_ev_cmd_complete *ev = data;
4200	int i;
4201
4202	*opcode = __le16_to_cpu(ev->opcode);
 
 
4203
4204	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
 
 
4205
4206	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4207		if (hci_cc_table[i].op == *opcode) {
4208			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4209			break;
4210		}
4211	}
4212
4213	if (i == ARRAY_SIZE(hci_cc_table)) {
4214		/* Unknown opcode, assume byte 0 contains the status, so
4215		 * that e.g. __hci_cmd_sync() properly returns errors
4216		 * for vendor specific commands send by HCI drivers.
4217		 * If a vendor doesn't actually follow this convention we may
4218		 * need to introduce a vendor CC table in order to properly set
4219		 * the status.
4220		 */
4221		*status = skb->data[0];
4222	}
4223
4224	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
 
4225
4226	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4227			     req_complete_skb);
4228
4229	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4230		bt_dev_err(hdev,
4231			   "unexpected event for opcode 0x%4.4x", *opcode);
4232		return;
4233	}
4234
4235	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4236		queue_work(hdev->workqueue, &hdev->cmd_work);
4237}
4238
4239static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
 
 
 
4240{
4241	struct hci_cp_le_create_cis *cp;
4242	bool pending = false;
4243	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4244
4245	bt_dev_dbg(hdev, "status 0x%2.2x", status);
 
 
4246
4247	if (!status)
4248		return;
 
4249
4250	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4251	if (!cp)
4252		return;
4253
4254	hci_dev_lock(hdev);
 
 
4255
4256	/* Remove connection if command failed */
4257	for (i = 0; i < cp->num_cis; i++) {
4258		struct hci_conn *conn;
4259		u16 handle;
4260
4261		handle = __le16_to_cpu(cp->cis[i].cis_handle);
 
 
4262
4263		conn = hci_conn_hash_lookup_handle(hdev, handle);
4264		if (conn) {
4265			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4266					       &conn->flags))
4267				pending = true;
4268			conn->state = BT_CLOSED;
4269			hci_connect_cfm(conn, status);
4270			hci_conn_del(conn);
4271		}
4272	}
4273	cp->num_cis = 0;
4274
4275	if (pending)
4276		hci_le_create_cis_pending(hdev);
 
4277
4278	hci_dev_unlock(hdev);
4279}
 
4280
4281#define HCI_CS(_op, _func) \
4282{ \
4283	.op = _op, \
4284	.func = _func, \
4285}
4286
4287static const struct hci_cs {
4288	u16  op;
4289	void (*func)(struct hci_dev *hdev, __u8 status);
4290} hci_cs_table[] = {
4291	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4292	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4293	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4294	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4295	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4296	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4297	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4298	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4299	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4300	       hci_cs_read_remote_ext_features),
4301	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4302	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4303	       hci_cs_enhanced_setup_sync_conn),
4304	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4305	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4306	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4307	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4308	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4309	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4310	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4311	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4312	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4313};
4314
4315static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4316			       struct sk_buff *skb, u16 *opcode, u8 *status,
4317			       hci_req_complete_t *req_complete,
4318			       hci_req_complete_skb_t *req_complete_skb)
4319{
4320	struct hci_ev_cmd_status *ev = data;
4321	int i;
4322
4323	*opcode = __le16_to_cpu(ev->opcode);
4324	*status = ev->status;
 
4325
4326	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
 
 
4327
4328	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4329		if (hci_cs_table[i].op == *opcode) {
4330			hci_cs_table[i].func(hdev, ev->status);
4331			break;
4332		}
4333	}
4334
4335	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
 
 
 
 
4336
4337	/* Indicate request completion if the command failed. Also, if
4338	 * we're not waiting for a special event and we get a success
4339	 * command status we should try to flag the request as completed
4340	 * (since for this kind of commands there will not be a command
4341	 * complete event).
4342	 */
4343	if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
 
4344		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4345				     req_complete_skb);
4346		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4347			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4348				   *opcode);
4349			return;
4350		}
4351	}
4352
4353	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4354		queue_work(hdev->workqueue, &hdev->cmd_work);
4355}
4356
4357static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4358				   struct sk_buff *skb)
4359{
4360	struct hci_ev_hardware_error *ev = data;
4361
4362	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4363
4364	hdev->hw_error_code = ev->code;
4365
4366	queue_work(hdev->req_workqueue, &hdev->error_reset);
4367}
4368
4369static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4370				struct sk_buff *skb)
4371{
4372	struct hci_ev_role_change *ev = data;
4373	struct hci_conn *conn;
4374
4375	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4376
4377	hci_dev_lock(hdev);
4378
4379	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4380	if (conn) {
4381		if (!ev->status)
4382			conn->role = ev->role;
4383
4384		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4385
4386		hci_role_switch_cfm(conn, ev->status, ev->role);
4387	}
4388
4389	hci_dev_unlock(hdev);
4390}
4391
4392static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4393				  struct sk_buff *skb)
4394{
4395	struct hci_ev_num_comp_pkts *ev = data;
4396	int i;
4397
4398	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4399			     flex_array_size(ev, handles, ev->num)))
4400		return;
 
4401
4402	bt_dev_dbg(hdev, "num %d", ev->num);
 
 
 
 
 
 
4403
4404	for (i = 0; i < ev->num; i++) {
4405		struct hci_comp_pkts_info *info = &ev->handles[i];
4406		struct hci_conn *conn;
4407		__u16  handle, count;
4408
4409		handle = __le16_to_cpu(info->handle);
4410		count  = __le16_to_cpu(info->count);
4411
4412		conn = hci_conn_hash_lookup_handle(hdev, handle);
4413		if (!conn)
4414			continue;
4415
4416		conn->sent -= count;
4417
4418		switch (conn->type) {
4419		case ACL_LINK:
4420			hdev->acl_cnt += count;
4421			if (hdev->acl_cnt > hdev->acl_pkts)
4422				hdev->acl_cnt = hdev->acl_pkts;
4423			break;
4424
4425		case LE_LINK:
4426			if (hdev->le_pkts) {
4427				hdev->le_cnt += count;
4428				if (hdev->le_cnt > hdev->le_pkts)
4429					hdev->le_cnt = hdev->le_pkts;
4430			} else {
4431				hdev->acl_cnt += count;
4432				if (hdev->acl_cnt > hdev->acl_pkts)
4433					hdev->acl_cnt = hdev->acl_pkts;
4434			}
4435			break;
4436
4437		case SCO_LINK:
4438			hdev->sco_cnt += count;
4439			if (hdev->sco_cnt > hdev->sco_pkts)
4440				hdev->sco_cnt = hdev->sco_pkts;
4441			break;
4442
4443		case ISO_LINK:
4444			if (hdev->iso_pkts) {
4445				hdev->iso_cnt += count;
4446				if (hdev->iso_cnt > hdev->iso_pkts)
4447					hdev->iso_cnt = hdev->iso_pkts;
4448			} else if (hdev->le_pkts) {
4449				hdev->le_cnt += count;
4450				if (hdev->le_cnt > hdev->le_pkts)
4451					hdev->le_cnt = hdev->le_pkts;
4452			} else {
4453				hdev->acl_cnt += count;
4454				if (hdev->acl_cnt > hdev->acl_pkts)
4455					hdev->acl_cnt = hdev->acl_pkts;
4456			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4457			break;
4458
4459		default:
4460			bt_dev_err(hdev, "unknown type %d conn %p",
4461				   conn->type, conn);
4462			break;
4463		}
4464	}
4465
4466	queue_work(hdev->workqueue, &hdev->tx_work);
4467}
4468
4469static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4470				struct sk_buff *skb)
4471{
4472	struct hci_ev_mode_change *ev = data;
4473	struct hci_conn *conn;
4474
4475	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4476
4477	hci_dev_lock(hdev);
4478
4479	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4480	if (conn) {
4481		conn->mode = ev->mode;
4482
4483		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4484					&conn->flags)) {
4485			if (conn->mode == HCI_CM_ACTIVE)
4486				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4487			else
4488				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4489		}
4490
4491		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4492			hci_sco_setup(conn, ev->status);
4493	}
4494
4495	hci_dev_unlock(hdev);
4496}
4497
4498static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4499				     struct sk_buff *skb)
4500{
4501	struct hci_ev_pin_code_req *ev = data;
4502	struct hci_conn *conn;
4503
4504	bt_dev_dbg(hdev, "");
4505
4506	hci_dev_lock(hdev);
4507
4508	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4509	if (!conn)
4510		goto unlock;
4511
4512	if (conn->state == BT_CONNECTED) {
4513		hci_conn_hold(conn);
4514		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4515		hci_conn_drop(conn);
4516	}
4517
4518	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4519	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4520		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4521			     sizeof(ev->bdaddr), &ev->bdaddr);
4522	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4523		u8 secure;
4524
4525		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4526			secure = 1;
4527		else
4528			secure = 0;
4529
4530		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4531	}
4532
4533unlock:
4534	hci_dev_unlock(hdev);
4535}
4536
4537static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4538{
4539	if (key_type == HCI_LK_CHANGED_COMBINATION)
4540		return;
4541
4542	conn->pin_length = pin_len;
4543	conn->key_type = key_type;
4544
4545	switch (key_type) {
4546	case HCI_LK_LOCAL_UNIT:
4547	case HCI_LK_REMOTE_UNIT:
4548	case HCI_LK_DEBUG_COMBINATION:
4549		return;
4550	case HCI_LK_COMBINATION:
4551		if (pin_len == 16)
4552			conn->pending_sec_level = BT_SECURITY_HIGH;
4553		else
4554			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4555		break;
4556	case HCI_LK_UNAUTH_COMBINATION_P192:
4557	case HCI_LK_UNAUTH_COMBINATION_P256:
4558		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4559		break;
4560	case HCI_LK_AUTH_COMBINATION_P192:
4561		conn->pending_sec_level = BT_SECURITY_HIGH;
4562		break;
4563	case HCI_LK_AUTH_COMBINATION_P256:
4564		conn->pending_sec_level = BT_SECURITY_FIPS;
4565		break;
4566	}
4567}
4568
4569static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4570				     struct sk_buff *skb)
4571{
4572	struct hci_ev_link_key_req *ev = data;
4573	struct hci_cp_link_key_reply cp;
4574	struct hci_conn *conn;
4575	struct link_key *key;
4576
4577	bt_dev_dbg(hdev, "");
4578
4579	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4580		return;
4581
4582	hci_dev_lock(hdev);
4583
4584	key = hci_find_link_key(hdev, &ev->bdaddr);
4585	if (!key) {
4586		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
 
4587		goto not_found;
4588	}
4589
4590	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
 
4591
4592	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4593	if (conn) {
4594		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4595
4596		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4597		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4598		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4599			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4600			goto not_found;
4601		}
4602
4603		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4604		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4605		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4606			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
 
4607			goto not_found;
4608		}
4609
4610		conn_set_key(conn, key->type, key->pin_len);
4611	}
4612
4613	bacpy(&cp.bdaddr, &ev->bdaddr);
4614	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4615
4616	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4617
4618	hci_dev_unlock(hdev);
4619
4620	return;
4621
4622not_found:
4623	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4624	hci_dev_unlock(hdev);
4625}
4626
4627static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4628				    struct sk_buff *skb)
4629{
4630	struct hci_ev_link_key_notify *ev = data;
4631	struct hci_conn *conn;
4632	struct link_key *key;
4633	bool persistent;
4634	u8 pin_len = 0;
4635
4636	bt_dev_dbg(hdev, "");
4637
4638	hci_dev_lock(hdev);
4639
4640	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4641	if (!conn)
4642		goto unlock;
4643
4644	/* Ignore NULL link key against CVE-2020-26555 */
4645	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4646		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4647			   &ev->bdaddr);
4648		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4649		hci_conn_drop(conn);
4650		goto unlock;
4651	}
4652
4653	hci_conn_hold(conn);
4654	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4655	hci_conn_drop(conn);
4656
4657	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4658	conn_set_key(conn, ev->key_type, conn->pin_length);
4659
4660	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4661		goto unlock;
4662
4663	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4664			        ev->key_type, pin_len, &persistent);
4665	if (!key)
4666		goto unlock;
4667
4668	/* Update connection information since adding the key will have
4669	 * fixed up the type in the case of changed combination keys.
4670	 */
4671	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4672		conn_set_key(conn, key->type, key->pin_len);
4673
4674	mgmt_new_link_key(hdev, key, persistent);
4675
4676	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4677	 * is set. If it's not set simply remove the key from the kernel
4678	 * list (we've still notified user space about it but with
4679	 * store_hint being 0).
4680	 */
4681	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4682	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4683		list_del_rcu(&key->list);
4684		kfree_rcu(key, rcu);
4685		goto unlock;
4686	}
4687
4688	if (persistent)
4689		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4690	else
4691		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4692
4693unlock:
4694	hci_dev_unlock(hdev);
4695}
4696
4697static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4698				 struct sk_buff *skb)
4699{
4700	struct hci_ev_clock_offset *ev = data;
4701	struct hci_conn *conn;
4702
4703	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4704
4705	hci_dev_lock(hdev);
4706
4707	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4708	if (conn && !ev->status) {
4709		struct inquiry_entry *ie;
4710
4711		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4712		if (ie) {
4713			ie->data.clock_offset = ev->clock_offset;
4714			ie->timestamp = jiffies;
4715		}
4716	}
4717
4718	hci_dev_unlock(hdev);
4719}
4720
4721static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4722				    struct sk_buff *skb)
4723{
4724	struct hci_ev_pkt_type_change *ev = data;
4725	struct hci_conn *conn;
4726
4727	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4728
4729	hci_dev_lock(hdev);
4730
4731	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4732	if (conn && !ev->status)
4733		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4734
4735	hci_dev_unlock(hdev);
4736}
4737
4738static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4739				   struct sk_buff *skb)
4740{
4741	struct hci_ev_pscan_rep_mode *ev = data;
4742	struct inquiry_entry *ie;
4743
4744	bt_dev_dbg(hdev, "");
4745
4746	hci_dev_lock(hdev);
4747
4748	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4749	if (ie) {
4750		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4751		ie->timestamp = jiffies;
4752	}
4753
4754	hci_dev_unlock(hdev);
4755}
4756
4757static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4758					     struct sk_buff *skb)
4759{
4760	struct hci_ev_inquiry_result_rssi *ev = edata;
4761	struct inquiry_data data;
4762	int i;
4763
4764	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4765
4766	if (!ev->num)
4767		return;
4768
4769	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4770		return;
4771
4772	hci_dev_lock(hdev);
4773
4774	if (skb->len == array_size(ev->num,
4775				   sizeof(struct inquiry_info_rssi_pscan))) {
4776		struct inquiry_info_rssi_pscan *info;
4777
4778		for (i = 0; i < ev->num; i++) {
4779			u32 flags;
4780
4781			info = hci_ev_skb_pull(hdev, skb,
4782					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4783					       sizeof(*info));
4784			if (!info) {
4785				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4786					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4787				goto unlock;
4788			}
4789
4790			bacpy(&data.bdaddr, &info->bdaddr);
4791			data.pscan_rep_mode	= info->pscan_rep_mode;
4792			data.pscan_period_mode	= info->pscan_period_mode;
4793			data.pscan_mode		= info->pscan_mode;
4794			memcpy(data.dev_class, info->dev_class, 3);
4795			data.clock_offset	= info->clock_offset;
4796			data.rssi		= info->rssi;
4797			data.ssp_mode		= 0x00;
4798
4799			flags = hci_inquiry_cache_update(hdev, &data, false);
4800
4801			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4802					  info->dev_class, info->rssi,
4803					  flags, NULL, 0, NULL, 0, 0);
4804		}
4805	} else if (skb->len == array_size(ev->num,
4806					  sizeof(struct inquiry_info_rssi))) {
4807		struct inquiry_info_rssi *info;
4808
4809		for (i = 0; i < ev->num; i++) {
4810			u32 flags;
4811
4812			info = hci_ev_skb_pull(hdev, skb,
4813					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4814					       sizeof(*info));
4815			if (!info) {
4816				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4817					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4818				goto unlock;
4819			}
4820
4821			bacpy(&data.bdaddr, &info->bdaddr);
4822			data.pscan_rep_mode	= info->pscan_rep_mode;
4823			data.pscan_period_mode	= info->pscan_period_mode;
4824			data.pscan_mode		= 0x00;
4825			memcpy(data.dev_class, info->dev_class, 3);
4826			data.clock_offset	= info->clock_offset;
4827			data.rssi		= info->rssi;
4828			data.ssp_mode		= 0x00;
4829
4830			flags = hci_inquiry_cache_update(hdev, &data, false);
4831
4832			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4833					  info->dev_class, info->rssi,
4834					  flags, NULL, 0, NULL, 0, 0);
4835		}
4836	} else {
4837		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4838			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4839	}
4840unlock:
4841	hci_dev_unlock(hdev);
4842}
4843
4844static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4845					struct sk_buff *skb)
4846{
4847	struct hci_ev_remote_ext_features *ev = data;
4848	struct hci_conn *conn;
4849
4850	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4851
4852	hci_dev_lock(hdev);
4853
4854	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4855	if (!conn)
4856		goto unlock;
4857
4858	if (ev->page < HCI_MAX_PAGES)
4859		memcpy(conn->features[ev->page], ev->features, 8);
4860
4861	if (!ev->status && ev->page == 0x01) {
4862		struct inquiry_entry *ie;
4863
4864		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4865		if (ie)
4866			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4867
4868		if (ev->features[0] & LMP_HOST_SSP) {
4869			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4870		} else {
4871			/* It is mandatory by the Bluetooth specification that
4872			 * Extended Inquiry Results are only used when Secure
4873			 * Simple Pairing is enabled, but some devices violate
4874			 * this.
4875			 *
4876			 * To make these devices work, the internal SSP
4877			 * enabled flag needs to be cleared if the remote host
4878			 * features do not indicate SSP support */
4879			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4880		}
4881
4882		if (ev->features[0] & LMP_HOST_SC)
4883			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4884	}
4885
4886	if (conn->state != BT_CONFIG)
4887		goto unlock;
4888
4889	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4890		struct hci_cp_remote_name_req cp;
4891		memset(&cp, 0, sizeof(cp));
4892		bacpy(&cp.bdaddr, &conn->dst);
4893		cp.pscan_rep_mode = 0x02;
4894		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4895	} else {
4896		mgmt_device_connected(hdev, conn, NULL, 0);
4897	}
4898
4899	if (!hci_outgoing_auth_needed(hdev, conn)) {
4900		conn->state = BT_CONNECTED;
4901		hci_connect_cfm(conn, ev->status);
4902		hci_conn_drop(conn);
4903	}
4904
4905unlock:
4906	hci_dev_unlock(hdev);
4907}
4908
4909static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4910				       struct sk_buff *skb)
4911{
4912	struct hci_ev_sync_conn_complete *ev = data;
4913	struct hci_conn *conn;
4914	u8 status = ev->status;
4915
4916	switch (ev->link_type) {
4917	case SCO_LINK:
4918	case ESCO_LINK:
4919		break;
4920	default:
4921		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4922		 * for HCI_Synchronous_Connection_Complete is limited to
4923		 * either SCO or eSCO
4924		 */
4925		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4926		return;
4927	}
4928
4929	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4930
4931	hci_dev_lock(hdev);
4932
4933	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4934	if (!conn) {
4935		if (ev->link_type == ESCO_LINK)
4936			goto unlock;
4937
4938		/* When the link type in the event indicates SCO connection
4939		 * and lookup of the connection object fails, then check
4940		 * if an eSCO connection object exists.
4941		 *
4942		 * The core limits the synchronous connections to either
4943		 * SCO or eSCO. The eSCO connection is preferred and tried
4944		 * to be setup first and until successfully established,
4945		 * the link type will be hinted as eSCO.
4946		 */
4947		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4948		if (!conn)
4949			goto unlock;
4950	}
4951
4952	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4953	 * Processing it more than once per connection can corrupt kernel memory.
4954	 *
4955	 * As the connection handle is set here for the first time, it indicates
4956	 * whether the connection is already set up.
4957	 */
4958	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
4959		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4960		goto unlock;
4961	}
4962
4963	switch (status) {
4964	case 0x00:
4965		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
4966		if (status) {
4967			conn->state = BT_CLOSED;
4968			break;
4969		}
4970
4971		conn->state  = BT_CONNECTED;
4972		conn->type   = ev->link_type;
4973
4974		hci_debugfs_create_conn(conn);
4975		hci_conn_add_sysfs(conn);
4976		break;
4977
4978	case 0x10:	/* Connection Accept Timeout */
4979	case 0x0d:	/* Connection Rejected due to Limited Resources */
4980	case 0x11:	/* Unsupported Feature or Parameter Value */
4981	case 0x1c:	/* SCO interval rejected */
4982	case 0x1a:	/* Unsupported Remote Feature */
4983	case 0x1e:	/* Invalid LMP Parameters */
4984	case 0x1f:	/* Unspecified error */
4985	case 0x20:	/* Unsupported LMP Parameter value */
4986		if (conn->out) {
4987			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4988					(hdev->esco_type & EDR_ESCO_MASK);
4989			if (hci_setup_sync(conn, conn->parent->handle))
4990				goto unlock;
4991		}
4992		fallthrough;
4993
4994	default:
4995		conn->state = BT_CLOSED;
4996		break;
4997	}
4998
4999	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5000	/* Notify only in case of SCO over HCI transport data path which
5001	 * is zero and non-zero value shall be non-HCI transport data path
5002	 */
5003	if (conn->codec.data_path == 0 && hdev->notify) {
5004		switch (ev->air_mode) {
5005		case 0x02:
5006			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5007			break;
5008		case 0x03:
5009			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5010			break;
5011		}
5012	}
5013
5014	hci_connect_cfm(conn, status);
5015	if (status)
5016		hci_conn_del(conn);
5017
5018unlock:
5019	hci_dev_unlock(hdev);
5020}
5021
5022static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5023{
5024	size_t parsed = 0;
5025
5026	while (parsed < eir_len) {
5027		u8 field_len = eir[0];
5028
5029		if (field_len == 0)
5030			return parsed;
5031
5032		parsed += field_len + 1;
5033		eir += field_len + 1;
5034	}
5035
5036	return eir_len;
5037}
5038
5039static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5040					    struct sk_buff *skb)
5041{
5042	struct hci_ev_ext_inquiry_result *ev = edata;
5043	struct inquiry_data data;
 
 
5044	size_t eir_len;
5045	int i;
5046
5047	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5048			     flex_array_size(ev, info, ev->num)))
5049		return;
5050
5051	bt_dev_dbg(hdev, "num %d", ev->num);
5052
5053	if (!ev->num)
5054		return;
5055
5056	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5057		return;
5058
5059	hci_dev_lock(hdev);
5060
5061	for (i = 0; i < ev->num; i++) {
5062		struct extended_inquiry_info *info = &ev->info[i];
5063		u32 flags;
5064		bool name_known;
5065
5066		bacpy(&data.bdaddr, &info->bdaddr);
5067		data.pscan_rep_mode	= info->pscan_rep_mode;
5068		data.pscan_period_mode	= info->pscan_period_mode;
5069		data.pscan_mode		= 0x00;
5070		memcpy(data.dev_class, info->dev_class, 3);
5071		data.clock_offset	= info->clock_offset;
5072		data.rssi		= info->rssi;
5073		data.ssp_mode		= 0x01;
5074
5075		if (hci_dev_test_flag(hdev, HCI_MGMT))
5076			name_known = eir_get_data(info->data,
5077						  sizeof(info->data),
5078						  EIR_NAME_COMPLETE, NULL);
5079		else
5080			name_known = true;
5081
5082		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5083
5084		eir_len = eir_get_length(info->data, sizeof(info->data));
5085
5086		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5087				  info->dev_class, info->rssi,
5088				  flags, info->data, eir_len, NULL, 0, 0);
5089	}
5090
5091	hci_dev_unlock(hdev);
5092}
5093
5094static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5095					 struct sk_buff *skb)
5096{
5097	struct hci_ev_key_refresh_complete *ev = data;
5098	struct hci_conn *conn;
5099
5100	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5101		   __le16_to_cpu(ev->handle));
5102
5103	hci_dev_lock(hdev);
5104
5105	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5106	if (!conn)
5107		goto unlock;
5108
5109	/* For BR/EDR the necessary steps are taken through the
5110	 * auth_complete event.
5111	 */
5112	if (conn->type != LE_LINK)
5113		goto unlock;
5114
5115	if (!ev->status)
5116		conn->sec_level = conn->pending_sec_level;
5117
5118	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5119
5120	if (ev->status && conn->state == BT_CONNECTED) {
5121		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5122		hci_conn_drop(conn);
5123		goto unlock;
5124	}
5125
5126	if (conn->state == BT_CONFIG) {
5127		if (!ev->status)
5128			conn->state = BT_CONNECTED;
5129
5130		hci_connect_cfm(conn, ev->status);
5131		hci_conn_drop(conn);
5132	} else {
5133		hci_auth_cfm(conn, ev->status);
5134
5135		hci_conn_hold(conn);
5136		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5137		hci_conn_drop(conn);
5138	}
5139
5140unlock:
5141	hci_dev_unlock(hdev);
5142}
5143
5144static u8 hci_get_auth_req(struct hci_conn *conn)
5145{
5146	/* If remote requests no-bonding follow that lead */
5147	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5148	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5149		return conn->remote_auth | (conn->auth_type & 0x01);
5150
5151	/* If both remote and local have enough IO capabilities, require
5152	 * MITM protection
5153	 */
5154	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5155	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5156		return conn->remote_auth | 0x01;
5157
5158	/* No MITM protection possible so ignore remote requirement */
5159	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5160}
5161
5162static u8 bredr_oob_data_present(struct hci_conn *conn)
5163{
5164	struct hci_dev *hdev = conn->hdev;
5165	struct oob_data *data;
5166
5167	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5168	if (!data)
5169		return 0x00;
5170
5171	if (bredr_sc_enabled(hdev)) {
5172		/* When Secure Connections is enabled, then just
5173		 * return the present value stored with the OOB
5174		 * data. The stored value contains the right present
5175		 * information. However it can only be trusted when
5176		 * not in Secure Connection Only mode.
5177		 */
5178		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5179			return data->present;
5180
5181		/* When Secure Connections Only mode is enabled, then
5182		 * the P-256 values are required. If they are not
5183		 * available, then do not declare that OOB data is
5184		 * present.
5185		 */
5186		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5187		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5188			return 0x00;
5189
5190		return 0x02;
5191	}
5192
5193	/* When Secure Connections is not enabled or actually
5194	 * not supported by the hardware, then check that if
5195	 * P-192 data values are present.
5196	 */
5197	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5198	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5199		return 0x00;
5200
5201	return 0x01;
5202}
5203
5204static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5205				    struct sk_buff *skb)
5206{
5207	struct hci_ev_io_capa_request *ev = data;
5208	struct hci_conn *conn;
5209
5210	bt_dev_dbg(hdev, "");
5211
5212	hci_dev_lock(hdev);
5213
5214	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5215	if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5216		goto unlock;
5217
5218	/* Assume remote supports SSP since it has triggered this event */
5219	set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5220
5221	hci_conn_hold(conn);
5222
5223	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5224		goto unlock;
5225
5226	/* Allow pairing if we're pairable, the initiators of the
5227	 * pairing or if the remote is not requesting bonding.
5228	 */
5229	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5230	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5231	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5232		struct hci_cp_io_capability_reply cp;
5233
5234		bacpy(&cp.bdaddr, &ev->bdaddr);
5235		/* Change the IO capability from KeyboardDisplay
5236		 * to DisplayYesNo as it is not supported by BT spec. */
5237		cp.capability = (conn->io_capability == 0x04) ?
5238				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5239
5240		/* If we are initiators, there is no remote information yet */
5241		if (conn->remote_auth == 0xff) {
5242			/* Request MITM protection if our IO caps allow it
5243			 * except for the no-bonding case.
5244			 */
5245			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5246			    conn->auth_type != HCI_AT_NO_BONDING)
5247				conn->auth_type |= 0x01;
5248		} else {
5249			conn->auth_type = hci_get_auth_req(conn);
5250		}
5251
5252		/* If we're not bondable, force one of the non-bondable
5253		 * authentication requirement values.
5254		 */
5255		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5256			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5257
5258		cp.authentication = conn->auth_type;
5259		cp.oob_data = bredr_oob_data_present(conn);
5260
5261		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5262			     sizeof(cp), &cp);
5263	} else {
5264		struct hci_cp_io_capability_neg_reply cp;
5265
5266		bacpy(&cp.bdaddr, &ev->bdaddr);
5267		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5268
5269		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5270			     sizeof(cp), &cp);
5271	}
5272
5273unlock:
5274	hci_dev_unlock(hdev);
5275}
5276
5277static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5278				  struct sk_buff *skb)
5279{
5280	struct hci_ev_io_capa_reply *ev = data;
5281	struct hci_conn *conn;
5282
5283	bt_dev_dbg(hdev, "");
5284
5285	hci_dev_lock(hdev);
5286
5287	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5288	if (!conn)
5289		goto unlock;
5290
5291	conn->remote_cap = ev->capability;
5292	conn->remote_auth = ev->authentication;
5293
5294unlock:
5295	hci_dev_unlock(hdev);
5296}
5297
5298static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5299					 struct sk_buff *skb)
5300{
5301	struct hci_ev_user_confirm_req *ev = data;
5302	int loc_mitm, rem_mitm, confirm_hint = 0;
5303	struct hci_conn *conn;
5304
5305	bt_dev_dbg(hdev, "");
5306
5307	hci_dev_lock(hdev);
5308
5309	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5310		goto unlock;
5311
5312	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5313	if (!conn)
5314		goto unlock;
5315
5316	loc_mitm = (conn->auth_type & 0x01);
5317	rem_mitm = (conn->remote_auth & 0x01);
5318
5319	/* If we require MITM but the remote device can't provide that
5320	 * (it has NoInputNoOutput) then reject the confirmation
5321	 * request. We check the security level here since it doesn't
5322	 * necessarily match conn->auth_type.
5323	 */
5324	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5325	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5326		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5327		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5328			     sizeof(ev->bdaddr), &ev->bdaddr);
5329		goto unlock;
5330	}
5331
5332	/* If no side requires MITM protection; use JUST_CFM method */
5333	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5334	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5335
5336		/* If we're not the initiator of request authorization and the
5337		 * local IO capability is not NoInputNoOutput, use JUST_WORKS
5338		 * method (mgmt_user_confirm with confirm_hint set to 1).
 
 
5339		 */
5340		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5341		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) {
5342			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5343			confirm_hint = 1;
5344			goto confirm;
5345		}
5346
5347		/* If there already exists link key in local host, leave the
5348		 * decision to user space since the remote device could be
5349		 * legitimate or malicious.
5350		 */
5351		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5352			bt_dev_dbg(hdev, "Local host already has link key");
5353			confirm_hint = 1;
5354			goto confirm;
5355		}
5356
5357		BT_DBG("Auto-accept of user confirmation with %ums delay",
5358		       hdev->auto_accept_delay);
5359
5360		if (hdev->auto_accept_delay > 0) {
5361			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5362			queue_delayed_work(conn->hdev->workqueue,
5363					   &conn->auto_accept_work, delay);
5364			goto unlock;
5365		}
5366
5367		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5368			     sizeof(ev->bdaddr), &ev->bdaddr);
5369		goto unlock;
5370	}
5371
5372confirm:
5373	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5374				  le32_to_cpu(ev->passkey), confirm_hint);
5375
5376unlock:
5377	hci_dev_unlock(hdev);
5378}
5379
5380static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5381					 struct sk_buff *skb)
5382{
5383	struct hci_ev_user_passkey_req *ev = data;
5384
5385	bt_dev_dbg(hdev, "");
5386
5387	if (hci_dev_test_flag(hdev, HCI_MGMT))
5388		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5389}
5390
5391static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5392					struct sk_buff *skb)
5393{
5394	struct hci_ev_user_passkey_notify *ev = data;
5395	struct hci_conn *conn;
5396
5397	bt_dev_dbg(hdev, "");
5398
5399	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5400	if (!conn)
5401		return;
5402
5403	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5404	conn->passkey_entered = 0;
5405
5406	if (hci_dev_test_flag(hdev, HCI_MGMT))
5407		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5408					 conn->dst_type, conn->passkey_notify,
5409					 conn->passkey_entered);
5410}
5411
5412static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5413				    struct sk_buff *skb)
5414{
5415	struct hci_ev_keypress_notify *ev = data;
5416	struct hci_conn *conn;
5417
5418	bt_dev_dbg(hdev, "");
5419
5420	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5421	if (!conn)
5422		return;
5423
5424	switch (ev->type) {
5425	case HCI_KEYPRESS_STARTED:
5426		conn->passkey_entered = 0;
5427		return;
5428
5429	case HCI_KEYPRESS_ENTERED:
5430		conn->passkey_entered++;
5431		break;
5432
5433	case HCI_KEYPRESS_ERASED:
5434		conn->passkey_entered--;
5435		break;
5436
5437	case HCI_KEYPRESS_CLEARED:
5438		conn->passkey_entered = 0;
5439		break;
5440
5441	case HCI_KEYPRESS_COMPLETED:
5442		return;
5443	}
5444
5445	if (hci_dev_test_flag(hdev, HCI_MGMT))
5446		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5447					 conn->dst_type, conn->passkey_notify,
5448					 conn->passkey_entered);
5449}
5450
5451static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5452					 struct sk_buff *skb)
5453{
5454	struct hci_ev_simple_pair_complete *ev = data;
5455	struct hci_conn *conn;
5456
5457	bt_dev_dbg(hdev, "");
5458
5459	hci_dev_lock(hdev);
5460
5461	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5462	if (!conn || !hci_conn_ssp_enabled(conn))
5463		goto unlock;
5464
5465	/* Reset the authentication requirement to unknown */
5466	conn->remote_auth = 0xff;
5467
5468	/* To avoid duplicate auth_failed events to user space we check
5469	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5470	 * initiated the authentication. A traditional auth_complete
5471	 * event gets always produced as initiator and is also mapped to
5472	 * the mgmt_auth_failed event */
5473	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5474		mgmt_auth_failed(conn, ev->status);
5475
5476	hci_conn_drop(conn);
5477
5478unlock:
5479	hci_dev_unlock(hdev);
5480}
5481
5482static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5483					 struct sk_buff *skb)
5484{
5485	struct hci_ev_remote_host_features *ev = data;
5486	struct inquiry_entry *ie;
5487	struct hci_conn *conn;
5488
5489	bt_dev_dbg(hdev, "");
5490
5491	hci_dev_lock(hdev);
5492
5493	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5494	if (conn)
5495		memcpy(conn->features[1], ev->features, 8);
5496
5497	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5498	if (ie)
5499		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5500
5501	hci_dev_unlock(hdev);
5502}
5503
5504static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5505					    struct sk_buff *skb)
5506{
5507	struct hci_ev_remote_oob_data_request *ev = edata;
5508	struct oob_data *data;
5509
5510	bt_dev_dbg(hdev, "");
5511
5512	hci_dev_lock(hdev);
5513
5514	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5515		goto unlock;
5516
5517	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5518	if (!data) {
5519		struct hci_cp_remote_oob_data_neg_reply cp;
5520
5521		bacpy(&cp.bdaddr, &ev->bdaddr);
5522		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5523			     sizeof(cp), &cp);
5524		goto unlock;
5525	}
5526
5527	if (bredr_sc_enabled(hdev)) {
5528		struct hci_cp_remote_oob_ext_data_reply cp;
5529
5530		bacpy(&cp.bdaddr, &ev->bdaddr);
5531		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5532			memset(cp.hash192, 0, sizeof(cp.hash192));
5533			memset(cp.rand192, 0, sizeof(cp.rand192));
5534		} else {
5535			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5536			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5537		}
5538		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5539		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5540
5541		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5542			     sizeof(cp), &cp);
5543	} else {
5544		struct hci_cp_remote_oob_data_reply cp;
5545
5546		bacpy(&cp.bdaddr, &ev->bdaddr);
5547		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5548		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5549
5550		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5551			     sizeof(cp), &cp);
5552	}
5553
5554unlock:
5555	hci_dev_unlock(hdev);
5556}
5557
5558static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5559				u8 bdaddr_type, bdaddr_t *local_rpa)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5560{
5561	if (conn->out) {
5562		conn->dst_type = bdaddr_type;
5563		conn->resp_addr_type = bdaddr_type;
5564		bacpy(&conn->resp_addr, bdaddr);
 
 
 
5565
5566		/* Check if the controller has set a Local RPA then it must be
5567		 * used instead or hdev->rpa.
5568		 */
5569		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5570			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5571			bacpy(&conn->init_addr, local_rpa);
5572		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5573			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5574			bacpy(&conn->init_addr, &conn->hdev->rpa);
5575		} else {
5576			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5577						  &conn->init_addr_type);
5578		}
5579	} else {
5580		conn->resp_addr_type = conn->hdev->adv_addr_type;
5581		/* Check if the controller has set a Local RPA then it must be
5582		 * used instead or hdev->rpa.
5583		 */
5584		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5585			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5586			bacpy(&conn->resp_addr, local_rpa);
5587		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5588			/* In case of ext adv, resp_addr will be updated in
5589			 * Adv Terminated event.
5590			 */
5591			if (!ext_adv_capable(conn->hdev))
5592				bacpy(&conn->resp_addr,
5593				      &conn->hdev->random_addr);
5594		} else {
5595			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5596		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5597
5598		conn->init_addr_type = bdaddr_type;
5599		bacpy(&conn->init_addr, bdaddr);
5600
5601		/* For incoming connections, set the default minimum
5602		 * and maximum connection interval. They will be used
5603		 * to check if the parameters are in range and if not
5604		 * trigger the connection update procedure.
5605		 */
5606		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5607		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5608	}
 
 
5609}
 
5610
5611static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5612				 bdaddr_t *bdaddr, u8 bdaddr_type,
5613				 bdaddr_t *local_rpa, u8 role, u16 handle,
5614				 u16 interval, u16 latency,
5615				 u16 supervision_timeout)
5616{
 
5617	struct hci_conn_params *params;
5618	struct hci_conn *conn;
5619	struct smp_irk *irk;
5620	u8 addr_type;
5621
 
 
5622	hci_dev_lock(hdev);
5623
5624	/* All controllers implicitly stop advertising in the event of a
5625	 * connection, so ensure that the state bit is cleared.
5626	 */
5627	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5628
5629	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5630	if (!conn) {
5631		/* In case of error status and there is no connection pending
5632		 * just unlock as there is nothing to cleanup.
5633		 */
5634		if (status)
5635			goto unlock;
5636
5637		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5638		if (IS_ERR(conn)) {
5639			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5640			goto unlock;
5641		}
5642
5643		conn->dst_type = bdaddr_type;
5644
5645		/* If we didn't have a hci_conn object previously
5646		 * but we're in central role this must be something
5647		 * initiated using an accept list. Since accept list based
5648		 * connections are not "first class citizens" we don't
5649		 * have full tracking of them. Therefore, we go ahead
5650		 * with a "best effort" approach of determining the
5651		 * initiator address based on the HCI_PRIVACY flag.
5652		 */
5653		if (conn->out) {
5654			conn->resp_addr_type = bdaddr_type;
5655			bacpy(&conn->resp_addr, bdaddr);
5656			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5657				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5658				bacpy(&conn->init_addr, &hdev->rpa);
5659			} else {
5660				hci_copy_identity_address(hdev,
5661							  &conn->init_addr,
5662							  &conn->init_addr_type);
5663			}
5664		}
5665	} else {
5666		cancel_delayed_work(&conn->le_conn_timeout);
5667	}
5668
5669	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5670	 * Processing it more than once per connection can corrupt kernel memory.
5671	 *
5672	 * As the connection handle is set here for the first time, it indicates
5673	 * whether the connection is already set up.
5674	 */
5675	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5676		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5677		goto unlock;
 
 
 
 
 
 
 
 
 
 
 
5678	}
5679
5680	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5681
5682	/* Lookup the identity address from the stored connection
5683	 * address and address type.
5684	 *
5685	 * When establishing connections to an identity address, the
5686	 * connection procedure will store the resolvable random
5687	 * address first. Now if it can be converted back into the
5688	 * identity address, start using the identity address from
5689	 * now on.
5690	 */
5691	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5692	if (irk) {
5693		bacpy(&conn->dst, &irk->bdaddr);
5694		conn->dst_type = irk->addr_type;
5695	}
5696
5697	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5698
5699	/* All connection failure handling is taken care of by the
5700	 * hci_conn_failed function which is triggered by the HCI
5701	 * request completion callbacks used for connecting.
5702	 */
5703	if (status || hci_conn_set_handle(conn, handle))
5704		goto unlock;
5705
5706	/* Drop the connection if it has been aborted */
5707	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5708		hci_conn_drop(conn);
5709		goto unlock;
5710	}
5711
5712	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5713		addr_type = BDADDR_LE_PUBLIC;
5714	else
5715		addr_type = BDADDR_LE_RANDOM;
5716
5717	/* Drop the connection if the device is blocked */
5718	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5719		hci_conn_drop(conn);
5720		goto unlock;
5721	}
5722
5723	mgmt_device_connected(hdev, conn, NULL, 0);
 
5724
5725	conn->sec_level = BT_SECURITY_LOW;
 
5726	conn->state = BT_CONFIG;
5727
5728	/* Store current advertising instance as connection advertising instance
5729	 * when sotfware rotation is in use so it can be re-enabled when
5730	 * disconnected.
5731	 */
5732	if (!ext_adv_capable(hdev))
5733		conn->adv_instance = hdev->cur_adv_instance;
5734
5735	conn->le_conn_interval = interval;
5736	conn->le_conn_latency = latency;
5737	conn->le_supv_timeout = supervision_timeout;
5738
5739	hci_debugfs_create_conn(conn);
5740	hci_conn_add_sysfs(conn);
5741
5742	/* The remote features procedure is defined for central
5743	 * role only. So only in case of an initiated connection
5744	 * request the remote features.
5745	 *
5746	 * If the local controller supports peripheral-initiated features
5747	 * exchange, then requesting the remote features in peripheral
5748	 * role is possible. Otherwise just transition into the
5749	 * connected state without requesting the remote features.
5750	 */
5751	if (conn->out ||
5752	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5753		struct hci_cp_le_read_remote_features cp;
 
5754
5755		cp.handle = __cpu_to_le16(conn->handle);
5756
5757		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5758			     sizeof(cp), &cp);
5759
5760		hci_conn_hold(conn);
 
 
 
 
5761	} else {
5762		conn->state = BT_CONNECTED;
5763		hci_connect_cfm(conn, status);
5764	}
5765
5766	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5767					   conn->dst_type);
5768	if (params) {
5769		hci_pend_le_list_del_init(params);
5770		if (params->conn) {
5771			hci_conn_drop(params->conn);
5772			hci_conn_put(params->conn);
5773			params->conn = NULL;
5774		}
5775	}
5776
5777unlock:
5778	hci_update_passive_scan(hdev);
5779	hci_dev_unlock(hdev);
5780}
5781
5782static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5783				     struct sk_buff *skb)
5784{
5785	struct hci_ev_le_conn_complete *ev = data;
5786
5787	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5788
5789	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5790			     NULL, ev->role, le16_to_cpu(ev->handle),
5791			     le16_to_cpu(ev->interval),
5792			     le16_to_cpu(ev->latency),
5793			     le16_to_cpu(ev->supervision_timeout));
5794}
5795
5796static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5797					 struct sk_buff *skb)
5798{
5799	struct hci_ev_le_enh_conn_complete *ev = data;
5800
5801	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5802
5803	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5804			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5805			     le16_to_cpu(ev->interval),
5806			     le16_to_cpu(ev->latency),
5807			     le16_to_cpu(ev->supervision_timeout));
5808}
5809
5810static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5811				    struct sk_buff *skb)
5812{
5813	struct hci_evt_le_ext_adv_set_term *ev = data;
5814	struct hci_conn *conn;
5815	struct adv_info *adv, *n;
5816
5817	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5818
5819	/* The Bluetooth Core 5.3 specification clearly states that this event
5820	 * shall not be sent when the Host disables the advertising set. So in
5821	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5822	 *
5823	 * When the Host disables an advertising set, all cleanup is done via
5824	 * its command callback and not needed to be duplicated here.
5825	 */
5826	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5827		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5828		return;
5829	}
5830
5831	hci_dev_lock(hdev);
5832
5833	adv = hci_find_adv_instance(hdev, ev->handle);
5834
5835	if (ev->status) {
5836		if (!adv)
5837			goto unlock;
5838
5839		/* Remove advertising as it has been terminated */
5840		hci_remove_adv_instance(hdev, ev->handle);
5841		mgmt_advertising_removed(NULL, hdev, ev->handle);
5842
5843		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5844			if (adv->enabled)
5845				goto unlock;
5846		}
5847
5848		/* We are no longer advertising, clear HCI_LE_ADV */
5849		hci_dev_clear_flag(hdev, HCI_LE_ADV);
5850		goto unlock;
5851	}
5852
5853	if (adv)
5854		adv->enabled = false;
5855
5856	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5857	if (conn) {
5858		/* Store handle in the connection so the correct advertising
5859		 * instance can be re-enabled when disconnected.
5860		 */
5861		conn->adv_instance = ev->handle;
5862
5863		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5864		    bacmp(&conn->resp_addr, BDADDR_ANY))
5865			goto unlock;
5866
5867		if (!ev->handle) {
5868			bacpy(&conn->resp_addr, &hdev->random_addr);
5869			goto unlock;
5870		}
5871
5872		if (adv)
5873			bacpy(&conn->resp_addr, &adv->random_addr);
5874	}
5875
5876unlock:
5877	hci_dev_unlock(hdev);
5878}
5879
5880static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5881					    struct sk_buff *skb)
5882{
5883	struct hci_ev_le_conn_update_complete *ev = data;
5884	struct hci_conn *conn;
5885
5886	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5887
5888	if (ev->status)
5889		return;
5890
5891	hci_dev_lock(hdev);
5892
5893	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5894	if (conn) {
5895		conn->le_conn_interval = le16_to_cpu(ev->interval);
5896		conn->le_conn_latency = le16_to_cpu(ev->latency);
5897		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5898	}
5899
5900	hci_dev_unlock(hdev);
5901}
5902
5903/* This function requires the caller holds hdev->lock */
5904static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5905					      bdaddr_t *addr,
5906					      u8 addr_type, bool addr_resolved,
5907					      u8 adv_type, u8 phy, u8 sec_phy)
5908{
5909	struct hci_conn *conn;
5910	struct hci_conn_params *params;
5911
5912	/* If the event is not connectable don't proceed further */
5913	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5914		return NULL;
5915
5916	/* Ignore if the device is blocked or hdev is suspended */
5917	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
5918	    hdev->suspended)
5919		return NULL;
5920
5921	/* Most controller will fail if we try to create new connections
5922	 * while we have an existing one in peripheral role.
5923	 */
5924	if (hdev->conn_hash.le_num_peripheral > 0 &&
5925	    (test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) ||
5926	     !(hdev->le_states[3] & 0x10)))
5927		return NULL;
5928
5929	/* If we're not connectable only connect devices that we have in
5930	 * our pend_le_conns list.
5931	 */
5932	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5933					   addr_type);
5934	if (!params)
5935		return NULL;
5936
5937	if (!params->explicit_connect) {
5938		switch (params->auto_connect) {
5939		case HCI_AUTO_CONN_DIRECT:
5940			/* Only devices advertising with ADV_DIRECT_IND are
5941			 * triggering a connection attempt. This is allowing
5942			 * incoming connections from peripheral devices.
5943			 */
5944			if (adv_type != LE_ADV_DIRECT_IND)
5945				return NULL;
5946			break;
5947		case HCI_AUTO_CONN_ALWAYS:
5948			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5949			 * are triggering a connection attempt. This means
5950			 * that incoming connections from peripheral device are
5951			 * accepted and also outgoing connections to peripheral
5952			 * devices are established when found.
5953			 */
5954			break;
5955		default:
5956			return NULL;
5957		}
5958	}
5959
5960	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5961			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5962			      HCI_ROLE_MASTER, phy, sec_phy);
5963	if (!IS_ERR(conn)) {
5964		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5965		 * by higher layer that tried to connect, if no then
5966		 * store the pointer since we don't really have any
5967		 * other owner of the object besides the params that
5968		 * triggered it. This way we can abort the connection if
5969		 * the parameters get removed and keep the reference
5970		 * count consistent once the connection is established.
5971		 */
5972
5973		if (!params->explicit_connect)
5974			params->conn = hci_conn_get(conn);
5975
5976		return conn;
5977	}
5978
5979	switch (PTR_ERR(conn)) {
5980	case -EBUSY:
5981		/* If hci_connect() returns -EBUSY it means there is already
5982		 * an LE connection attempt going on. Since controllers don't
5983		 * support more than one connection attempt at the time, we
5984		 * don't consider this an error case.
5985		 */
5986		break;
5987	default:
5988		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5989		return NULL;
5990	}
5991
5992	return NULL;
5993}
5994
5995static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5996			       u8 bdaddr_type, bdaddr_t *direct_addr,
5997			       u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
5998			       u8 *data, u8 len, bool ext_adv, bool ctl_time,
5999			       u64 instant)
6000{
6001	struct discovery_state *d = &hdev->discovery;
6002	struct smp_irk *irk;
6003	struct hci_conn *conn;
6004	bool match, bdaddr_resolved;
6005	u32 flags;
6006	u8 *ptr;
6007
6008	switch (type) {
6009	case LE_ADV_IND:
6010	case LE_ADV_DIRECT_IND:
6011	case LE_ADV_SCAN_IND:
6012	case LE_ADV_NONCONN_IND:
6013	case LE_ADV_SCAN_RSP:
6014		break;
6015	default:
6016		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6017				       "type: 0x%02x", type);
6018		return;
6019	}
6020
6021	if (len > max_adv_len(hdev)) {
6022		bt_dev_err_ratelimited(hdev,
6023				       "adv larger than maximum supported");
6024		return;
6025	}
6026
6027	/* Find the end of the data in case the report contains padded zero
6028	 * bytes at the end causing an invalid length value.
6029	 *
6030	 * When data is NULL, len is 0 so there is no need for extra ptr
6031	 * check as 'ptr < data + 0' is already false in such case.
6032	 */
6033	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6034		if (ptr + 1 + *ptr > data + len)
6035			break;
6036	}
6037
6038	/* Adjust for actual length. This handles the case when remote
6039	 * device is advertising with incorrect data length.
6040	 */
6041	len = ptr - data;
 
 
 
 
6042
6043	/* If the direct address is present, then this report is from
6044	 * a LE Direct Advertising Report event. In that case it is
6045	 * important to see if the address is matching the local
6046	 * controller address.
6047	 */
6048	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6049		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6050						  &bdaddr_resolved);
6051
6052		/* Only resolvable random addresses are valid for these
6053		 * kind of reports and others can be ignored.
6054		 */
6055		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6056			return;
6057
6058		/* If the controller is not using resolvable random
6059		 * addresses, then this report can be ignored.
6060		 */
6061		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6062			return;
6063
6064		/* If the local IRK of the controller does not match
6065		 * with the resolvable random address provided, then
6066		 * this report can be ignored.
6067		 */
6068		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6069			return;
6070	}
6071
6072	/* Check if we need to convert to identity address */
6073	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6074	if (irk) {
6075		bdaddr = &irk->bdaddr;
6076		bdaddr_type = irk->addr_type;
6077	}
6078
6079	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6080
6081	/* Check if we have been requested to connect to this device.
6082	 *
6083	 * direct_addr is set only for directed advertising reports (it is NULL
6084	 * for advertising reports) and is already verified to be RPA above.
6085	 */
6086	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6087				     type, phy, sec_phy);
6088	if (!ext_adv && conn && type == LE_ADV_IND &&
6089	    len <= max_adv_len(hdev)) {
6090		/* Store report for later inclusion by
6091		 * mgmt_device_connected
6092		 */
6093		memcpy(conn->le_adv_data, data, len);
6094		conn->le_adv_data_len = len;
6095	}
6096
6097	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6098		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6099	else
6100		flags = 0;
6101
6102	/* All scan results should be sent up for Mesh systems */
6103	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6104		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6105				  rssi, flags, data, len, NULL, 0, instant);
6106		return;
6107	}
6108
6109	/* Passive scanning shouldn't trigger any device found events,
6110	 * except for devices marked as CONN_REPORT for which we do send
6111	 * device found events, or advertisement monitoring requested.
6112	 */
6113	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6114		if (type == LE_ADV_DIRECT_IND)
6115			return;
6116
6117		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6118					       bdaddr, bdaddr_type) &&
6119		    idr_is_empty(&hdev->adv_monitors_idr))
6120			return;
6121
 
 
 
 
6122		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6123				  rssi, flags, data, len, NULL, 0, 0);
6124		return;
6125	}
6126
6127	/* When receiving a scan response, then there is no way to
 
 
 
 
 
6128	 * know if the remote device is connectable or not. However
6129	 * since scan responses are merged with a previously seen
6130	 * advertising report, the flags field from that report
6131	 * will be used.
6132	 *
6133	 * In the unlikely case that a controller just sends a scan
6134	 * response event that doesn't match the pending report, then
6135	 * it is marked as a standalone SCAN_RSP.
6136	 */
6137	if (type == LE_ADV_SCAN_RSP)
6138		flags = MGMT_DEV_FOUND_SCAN_RSP;
 
 
 
6139
6140	/* If there's nothing pending either store the data from this
6141	 * event or send an immediate device found event if the data
6142	 * should not be stored for later.
6143	 */
6144	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6145		/* If the report will trigger a SCAN_REQ store it for
6146		 * later merging.
6147		 */
6148		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6149			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6150						 rssi, flags, data, len);
6151			return;
6152		}
6153
6154		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6155				  rssi, flags, data, len, NULL, 0, 0);
6156		return;
6157	}
6158
6159	/* Check if the pending report is for the same device as the new one */
6160	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6161		 bdaddr_type == d->last_adv_addr_type);
6162
6163	/* If the pending data doesn't match this report or this isn't a
6164	 * scan response (e.g. we got a duplicate ADV_IND) then force
6165	 * sending of the pending data.
6166	 */
6167	if (type != LE_ADV_SCAN_RSP || !match) {
6168		/* Send out whatever is in the cache, but skip duplicates */
6169		if (!match)
6170			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6171					  d->last_adv_addr_type, NULL,
6172					  d->last_adv_rssi, d->last_adv_flags,
6173					  d->last_adv_data,
6174					  d->last_adv_data_len, NULL, 0, 0);
6175
6176		/* If the new report will trigger a SCAN_REQ store it for
6177		 * later merging.
6178		 */
6179		if (!ext_adv && (type == LE_ADV_IND ||
6180				 type == LE_ADV_SCAN_IND)) {
6181			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6182						 rssi, flags, data, len);
6183			return;
6184		}
6185
6186		/* The advertising reports cannot be merged, so clear
6187		 * the pending report and send out a device found event.
6188		 */
6189		clear_pending_adv_report(hdev);
6190		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6191				  rssi, flags, data, len, NULL, 0, 0);
6192		return;
6193	}
6194
6195	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6196	 * the new event is a SCAN_RSP. We can therefore proceed with
6197	 * sending a merged device found event.
6198	 */
6199	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6200			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6201			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6202	clear_pending_adv_report(hdev);
6203}
6204
6205static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6206				  struct sk_buff *skb)
6207{
6208	struct hci_ev_le_advertising_report *ev = data;
6209	u64 instant = jiffies;
6210
6211	if (!ev->num)
6212		return;
6213
6214	hci_dev_lock(hdev);
6215
6216	while (ev->num--) {
6217		struct hci_ev_le_advertising_info *info;
6218		s8 rssi;
6219
6220		info = hci_le_ev_skb_pull(hdev, skb,
6221					  HCI_EV_LE_ADVERTISING_REPORT,
6222					  sizeof(*info));
6223		if (!info)
6224			break;
6225
6226		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6227					info->length + 1))
6228			break;
6229
6230		if (info->length <= max_adv_len(hdev)) {
6231			rssi = info->data[info->length];
6232			process_adv_report(hdev, info->type, &info->bdaddr,
6233					   info->bdaddr_type, NULL, 0,
6234					   HCI_ADV_PHY_1M, 0, rssi,
6235					   info->data, info->length, false,
6236					   false, instant);
6237		} else {
6238			bt_dev_err(hdev, "Dropping invalid advertising data");
6239		}
6240	}
6241
6242	hci_dev_unlock(hdev);
6243}
6244
6245static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6246{
6247	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6248		switch (evt_type) {
6249		case LE_LEGACY_ADV_IND:
6250			return LE_ADV_IND;
6251		case LE_LEGACY_ADV_DIRECT_IND:
6252			return LE_ADV_DIRECT_IND;
6253		case LE_LEGACY_ADV_SCAN_IND:
6254			return LE_ADV_SCAN_IND;
6255		case LE_LEGACY_NONCONN_IND:
6256			return LE_ADV_NONCONN_IND;
6257		case LE_LEGACY_SCAN_RSP_ADV:
6258		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6259			return LE_ADV_SCAN_RSP;
6260		}
6261
6262		goto invalid;
6263	}
6264
6265	if (evt_type & LE_EXT_ADV_CONN_IND) {
6266		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6267			return LE_ADV_DIRECT_IND;
6268
6269		return LE_ADV_IND;
6270	}
6271
6272	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6273		return LE_ADV_SCAN_RSP;
6274
6275	if (evt_type & LE_EXT_ADV_SCAN_IND)
6276		return LE_ADV_SCAN_IND;
6277
6278	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6279	    evt_type & LE_EXT_ADV_DIRECT_IND)
6280		return LE_ADV_NONCONN_IND;
6281
6282invalid:
6283	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6284			       evt_type);
6285
6286	return LE_ADV_INVALID;
6287}
6288
6289static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6290				      struct sk_buff *skb)
6291{
6292	struct hci_ev_le_ext_adv_report *ev = data;
6293	u64 instant = jiffies;
6294
6295	if (!ev->num)
6296		return;
6297
6298	hci_dev_lock(hdev);
6299
6300	while (ev->num--) {
6301		struct hci_ev_le_ext_adv_info *info;
6302		u8 legacy_evt_type;
6303		u16 evt_type;
6304
6305		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6306					  sizeof(*info));
6307		if (!info)
6308			break;
6309
6310		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6311					info->length))
6312			break;
6313
6314		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6315		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6316
6317		if (test_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
6318			     &hdev->quirks)) {
6319			info->primary_phy &= 0x1f;
6320			info->secondary_phy &= 0x1f;
6321		}
6322
6323		if (legacy_evt_type != LE_ADV_INVALID) {
6324			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6325					   info->bdaddr_type, NULL, 0,
6326					   info->primary_phy,
6327					   info->secondary_phy,
6328					   info->rssi, info->data, info->length,
6329					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6330					   false, instant);
6331		}
6332	}
6333
6334	hci_dev_unlock(hdev);
6335}
6336
6337static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6338{
6339	struct hci_cp_le_pa_term_sync cp;
6340
6341	memset(&cp, 0, sizeof(cp));
6342	cp.handle = handle;
6343
6344	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6345}
6346
6347static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6348					    struct sk_buff *skb)
6349{
6350	struct hci_ev_le_pa_sync_established *ev = data;
6351	int mask = hdev->link_mode;
6352	__u8 flags = 0;
6353	struct hci_conn *pa_sync, *conn;
6354
6355	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6356
6357	hci_dev_lock(hdev);
6358
6359	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6360
6361	conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr,
6362					ev->bdaddr_type);
6363	if (!conn) {
6364		bt_dev_err(hdev,
6365			   "Unable to find connection for dst %pMR sid 0x%2.2x",
6366			   &ev->bdaddr, ev->sid);
6367		goto unlock;
6368	}
6369
6370	clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
6371
6372	conn->sync_handle = le16_to_cpu(ev->handle);
6373	conn->sid = HCI_SID_INVALID;
6374
6375	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6376	if (!(mask & HCI_LM_ACCEPT)) {
6377		hci_le_pa_term_sync(hdev, ev->handle);
6378		goto unlock;
6379	}
6380
6381	if (!(flags & HCI_PROTO_DEFER))
6382		goto unlock;
6383
6384	/* Add connection to indicate PA sync event */
6385	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6386				     HCI_ROLE_SLAVE);
6387
6388	if (IS_ERR(pa_sync))
6389		goto unlock;
6390
6391	pa_sync->sync_handle = le16_to_cpu(ev->handle);
6392
6393	if (ev->status) {
6394		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6395
6396		/* Notify iso layer */
6397		hci_connect_cfm(pa_sync, ev->status);
6398	}
6399
6400unlock:
6401	/* Handle any other pending PA sync command */
6402	hci_pa_create_sync_pending(hdev);
6403
6404	hci_dev_unlock(hdev);
6405}
6406
6407static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6408				      struct sk_buff *skb)
6409{
6410	struct hci_ev_le_per_adv_report *ev = data;
6411	int mask = hdev->link_mode;
6412	__u8 flags = 0;
6413	struct hci_conn *pa_sync;
6414
6415	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6416
6417	hci_dev_lock(hdev);
6418
6419	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6420	if (!(mask & HCI_LM_ACCEPT))
6421		goto unlock;
6422
6423	if (!(flags & HCI_PROTO_DEFER))
6424		goto unlock;
6425
6426	pa_sync = hci_conn_hash_lookup_pa_sync_handle
6427			(hdev,
6428			le16_to_cpu(ev->sync_handle));
6429
6430	if (!pa_sync)
6431		goto unlock;
6432
6433	if (ev->data_status == LE_PA_DATA_COMPLETE &&
6434	    !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
6435		/* Notify iso layer */
6436		hci_connect_cfm(pa_sync, 0);
6437
6438		/* Notify MGMT layer */
6439		mgmt_device_connected(hdev, pa_sync, NULL, 0);
6440	}
6441
6442unlock:
6443	hci_dev_unlock(hdev);
6444}
6445
6446static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6447					    struct sk_buff *skb)
6448{
6449	struct hci_ev_le_remote_feat_complete *ev = data;
6450	struct hci_conn *conn;
6451
6452	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6453
6454	hci_dev_lock(hdev);
6455
6456	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6457	if (conn) {
6458		if (!ev->status)
6459			memcpy(conn->features[0], ev->features, 8);
6460
6461		if (conn->state == BT_CONFIG) {
6462			__u8 status;
6463
6464			/* If the local controller supports peripheral-initiated
6465			 * features exchange, but the remote controller does
6466			 * not, then it is possible that the error code 0x1a
6467			 * for unsupported remote feature gets returned.
6468			 *
6469			 * In this specific case, allow the connection to
6470			 * transition into connected state and mark it as
6471			 * successful.
6472			 */
6473			if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6474			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6475				status = 0x00;
6476			else
6477				status = ev->status;
6478
6479			conn->state = BT_CONNECTED;
6480			hci_connect_cfm(conn, status);
6481			hci_conn_drop(conn);
6482		}
6483	}
6484
6485	hci_dev_unlock(hdev);
6486}
6487
6488static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6489				   struct sk_buff *skb)
6490{
6491	struct hci_ev_le_ltk_req *ev = data;
6492	struct hci_cp_le_ltk_reply cp;
6493	struct hci_cp_le_ltk_neg_reply neg;
6494	struct hci_conn *conn;
6495	struct smp_ltk *ltk;
6496
6497	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6498
6499	hci_dev_lock(hdev);
6500
6501	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6502	if (conn == NULL)
6503		goto not_found;
6504
6505	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6506	if (!ltk)
6507		goto not_found;
6508
6509	if (smp_ltk_is_sc(ltk)) {
6510		/* With SC both EDiv and Rand are set to zero */
6511		if (ev->ediv || ev->rand)
6512			goto not_found;
6513	} else {
6514		/* For non-SC keys check that EDiv and Rand match */
6515		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6516			goto not_found;
6517	}
6518
6519	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6520	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6521	cp.handle = cpu_to_le16(conn->handle);
6522
6523	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6524
6525	conn->enc_key_size = ltk->enc_size;
6526
6527	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6528
6529	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6530	 * temporary key used to encrypt a connection following
6531	 * pairing. It is used during the Encrypted Session Setup to
6532	 * distribute the keys. Later, security can be re-established
6533	 * using a distributed LTK.
6534	 */
6535	if (ltk->type == SMP_STK) {
6536		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6537		list_del_rcu(&ltk->list);
6538		kfree_rcu(ltk, rcu);
6539	} else {
6540		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6541	}
6542
6543	hci_dev_unlock(hdev);
6544
6545	return;
6546
6547not_found:
6548	neg.handle = ev->handle;
6549	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6550	hci_dev_unlock(hdev);
6551}
6552
6553static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6554				      u8 reason)
6555{
6556	struct hci_cp_le_conn_param_req_neg_reply cp;
6557
6558	cp.handle = cpu_to_le16(handle);
6559	cp.reason = reason;
6560
6561	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6562		     &cp);
6563}
6564
6565static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6566					     struct sk_buff *skb)
6567{
6568	struct hci_ev_le_remote_conn_param_req *ev = data;
6569	struct hci_cp_le_conn_param_req_reply cp;
6570	struct hci_conn *hcon;
6571	u16 handle, min, max, latency, timeout;
6572
6573	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6574
6575	handle = le16_to_cpu(ev->handle);
6576	min = le16_to_cpu(ev->interval_min);
6577	max = le16_to_cpu(ev->interval_max);
6578	latency = le16_to_cpu(ev->latency);
6579	timeout = le16_to_cpu(ev->timeout);
6580
6581	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6582	if (!hcon || hcon->state != BT_CONNECTED)
6583		return send_conn_param_neg_reply(hdev, handle,
6584						 HCI_ERROR_UNKNOWN_CONN_ID);
6585
6586	if (max > hcon->le_conn_max_interval)
6587		return send_conn_param_neg_reply(hdev, handle,
6588						 HCI_ERROR_INVALID_LL_PARAMS);
6589
6590	if (hci_check_conn_params(min, max, latency, timeout))
6591		return send_conn_param_neg_reply(hdev, handle,
6592						 HCI_ERROR_INVALID_LL_PARAMS);
6593
6594	if (hcon->role == HCI_ROLE_MASTER) {
6595		struct hci_conn_params *params;
6596		u8 store_hint;
6597
6598		hci_dev_lock(hdev);
6599
6600		params = hci_conn_params_lookup(hdev, &hcon->dst,
6601						hcon->dst_type);
6602		if (params) {
6603			params->conn_min_interval = min;
6604			params->conn_max_interval = max;
6605			params->conn_latency = latency;
6606			params->supervision_timeout = timeout;
6607			store_hint = 0x01;
6608		} else {
6609			store_hint = 0x00;
6610		}
6611
6612		hci_dev_unlock(hdev);
6613
6614		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6615				    store_hint, min, max, latency, timeout);
6616	}
6617
6618	cp.handle = ev->handle;
6619	cp.interval_min = ev->interval_min;
6620	cp.interval_max = ev->interval_max;
6621	cp.latency = ev->latency;
6622	cp.timeout = ev->timeout;
6623	cp.min_ce_len = 0;
6624	cp.max_ce_len = 0;
6625
6626	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6627}
6628
6629static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6630					 struct sk_buff *skb)
6631{
6632	struct hci_ev_le_direct_adv_report *ev = data;
6633	u64 instant = jiffies;
6634	int i;
6635
6636	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6637				flex_array_size(ev, info, ev->num)))
6638		return;
6639
6640	if (!ev->num)
6641		return;
6642
6643	hci_dev_lock(hdev);
 
6644
6645	for (i = 0; i < ev->num; i++) {
6646		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
 
6647
6648		process_adv_report(hdev, info->type, &info->bdaddr,
6649				   info->bdaddr_type, &info->direct_addr,
6650				   info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6651				   info->rssi, NULL, 0, false, false, instant);
6652	}
6653
6654	hci_dev_unlock(hdev);
6655}
6656
6657static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6658				  struct sk_buff *skb)
6659{
6660	struct hci_ev_le_phy_update_complete *ev = data;
6661	struct hci_conn *conn;
6662
6663	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6664
6665	if (ev->status)
6666		return;
 
 
6667
6668	hci_dev_lock(hdev);
 
 
6669
6670	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6671	if (!conn)
6672		goto unlock;
6673
6674	conn->le_tx_phy = ev->tx_phy;
6675	conn->le_rx_phy = ev->rx_phy;
 
6676
6677unlock:
6678	hci_dev_unlock(hdev);
6679}
6680
6681static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6682					struct sk_buff *skb)
6683{
6684	struct hci_evt_le_cis_established *ev = data;
6685	struct hci_conn *conn;
6686	struct bt_iso_qos *qos;
6687	bool pending = false;
6688	u16 handle = __le16_to_cpu(ev->handle);
6689	u32 c_sdu_interval, p_sdu_interval;
6690
6691	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
 
 
6692
6693	hci_dev_lock(hdev);
6694
6695	conn = hci_conn_hash_lookup_handle(hdev, handle);
6696	if (!conn) {
6697		bt_dev_err(hdev,
6698			   "Unable to find connection with handle 0x%4.4x",
6699			   handle);
6700		goto unlock;
6701	}
6702
6703	if (conn->type != ISO_LINK) {
6704		bt_dev_err(hdev,
6705			   "Invalid connection link type handle 0x%4.4x",
6706			   handle);
6707		goto unlock;
6708	}
6709
6710	qos = &conn->iso_qos;
6711
6712	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6713
6714	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
6715	 * page 3075:
6716	 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
6717	 * ISO_Interval + SDU_Interval_C_To_P
6718	 * ...
6719	 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
6720	 *					Transport_Latency
6721	 */
6722	c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6723			 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
6724			get_unaligned_le24(ev->c_latency);
6725	p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6726			 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
6727			get_unaligned_le24(ev->p_latency);
6728
6729	switch (conn->role) {
6730	case HCI_ROLE_SLAVE:
6731		qos->ucast.in.interval = c_sdu_interval;
6732		qos->ucast.out.interval = p_sdu_interval;
6733		/* Convert Transport Latency (us) to Latency (msec) */
6734		qos->ucast.in.latency =
6735			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6736					  1000);
6737		qos->ucast.out.latency =
6738			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6739					  1000);
6740		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6741		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6742		qos->ucast.in.phy = ev->c_phy;
6743		qos->ucast.out.phy = ev->p_phy;
6744		break;
6745	case HCI_ROLE_MASTER:
6746		qos->ucast.in.interval = p_sdu_interval;
6747		qos->ucast.out.interval = c_sdu_interval;
6748		/* Convert Transport Latency (us) to Latency (msec) */
6749		qos->ucast.out.latency =
6750			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6751					  1000);
6752		qos->ucast.in.latency =
6753			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6754					  1000);
6755		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6756		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6757		qos->ucast.out.phy = ev->c_phy;
6758		qos->ucast.in.phy = ev->p_phy;
6759		break;
6760	}
6761
6762	if (!ev->status) {
6763		conn->state = BT_CONNECTED;
6764		hci_debugfs_create_conn(conn);
6765		hci_conn_add_sysfs(conn);
6766		hci_iso_setup_path(conn);
6767		goto unlock;
6768	}
6769
6770	conn->state = BT_CLOSED;
6771	hci_connect_cfm(conn, ev->status);
6772	hci_conn_del(conn);
6773
6774unlock:
6775	if (pending)
6776		hci_le_create_cis_pending(hdev);
6777
6778	hci_dev_unlock(hdev);
6779}
6780
6781static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
 
6782{
6783	struct hci_cp_le_reject_cis cp;
 
6784
6785	memset(&cp, 0, sizeof(cp));
6786	cp.handle = handle;
6787	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6788	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6789}
6790
6791static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6792{
6793	struct hci_cp_le_accept_cis cp;
 
6794
6795	memset(&cp, 0, sizeof(cp));
6796	cp.handle = handle;
6797	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6798}
6799
6800static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6801			       struct sk_buff *skb)
6802{
6803	struct hci_evt_le_cis_req *ev = data;
6804	u16 acl_handle, cis_handle;
6805	struct hci_conn *acl, *cis;
6806	int mask;
6807	__u8 flags = 0;
6808
6809	acl_handle = __le16_to_cpu(ev->acl_handle);
6810	cis_handle = __le16_to_cpu(ev->cis_handle);
6811
6812	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6813		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6814
6815	hci_dev_lock(hdev);
6816
6817	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6818	if (!acl)
6819		goto unlock;
6820
6821	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6822	if (!(mask & HCI_LM_ACCEPT)) {
6823		hci_le_reject_cis(hdev, ev->cis_handle);
6824		goto unlock;
6825	}
6826
6827	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6828	if (!cis) {
6829		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
6830				   cis_handle);
6831		if (IS_ERR(cis)) {
6832			hci_le_reject_cis(hdev, ev->cis_handle);
6833			goto unlock;
6834		}
6835	}
6836
6837	cis->iso_qos.ucast.cig = ev->cig_id;
6838	cis->iso_qos.ucast.cis = ev->cis_id;
6839
6840	if (!(flags & HCI_PROTO_DEFER)) {
6841		hci_le_accept_cis(hdev, ev->cis_handle);
6842	} else {
6843		cis->state = BT_CONNECT2;
6844		hci_connect_cfm(cis, 0);
6845	}
6846
6847unlock:
6848	hci_dev_unlock(hdev);
6849}
6850
6851static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6852{
6853	u8 handle = PTR_UINT(data);
6854
6855	return hci_le_terminate_big_sync(hdev, handle,
6856					 HCI_ERROR_LOCAL_HOST_TERM);
6857}
6858
6859static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6860					   struct sk_buff *skb)
6861{
6862	struct hci_evt_le_create_big_complete *ev = data;
6863	struct hci_conn *conn;
6864	__u8 i = 0;
6865
6866	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6867
6868	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6869				flex_array_size(ev, bis_handle, ev->num_bis)))
6870		return;
6871
6872	hci_dev_lock(hdev);
6873
6874	/* Connect all BISes that are bound to the BIG */
6875	while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
6876						      BT_BOUND))) {
6877		if (ev->status) {
6878			hci_connect_cfm(conn, ev->status);
6879			hci_conn_del(conn);
6880			continue;
6881		}
6882
6883		if (hci_conn_set_handle(conn,
6884					__le16_to_cpu(ev->bis_handle[i++])))
6885			continue;
6886
6887		conn->state = BT_CONNECTED;
6888		set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6889		hci_debugfs_create_conn(conn);
6890		hci_conn_add_sysfs(conn);
6891		hci_iso_setup_path(conn);
6892	}
6893
6894	if (!ev->status && !i)
6895		/* If no BISes have been connected for the BIG,
6896		 * terminate. This is in case all bound connections
6897		 * have been closed before the BIG creation
6898		 * has completed.
6899		 */
6900		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6901				   UINT_PTR(ev->handle), NULL);
6902
6903	hci_dev_unlock(hdev);
6904}
6905
6906static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6907					    struct sk_buff *skb)
6908{
6909	struct hci_evt_le_big_sync_estabilished *ev = data;
6910	struct hci_conn *bis, *conn;
6911	int i;
 
 
 
6912
6913	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6914
6915	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6916				flex_array_size(ev, bis, ev->num_bis)))
6917		return;
6918
6919	hci_dev_lock(hdev);
6920
6921	conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
6922						  ev->num_bis);
6923	if (!conn) {
6924		bt_dev_err(hdev,
6925			   "Unable to find connection for big 0x%2.2x",
6926			   ev->handle);
6927		goto unlock;
6928	}
6929
6930	clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
 
 
 
 
 
 
 
6931
6932	conn->num_bis = 0;
6933	memset(conn->bis, 0, sizeof(conn->num_bis));
6934
6935	for (i = 0; i < ev->num_bis; i++) {
6936		u16 handle = le16_to_cpu(ev->bis[i]);
6937		__le32 interval;
 
6938
6939		bis = hci_conn_hash_lookup_handle(hdev, handle);
6940		if (!bis) {
6941			if (handle > HCI_CONN_HANDLE_MAX) {
6942				bt_dev_dbg(hdev, "ignore too large handle %u", handle);
6943				continue;
6944			}
6945			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
6946					   HCI_ROLE_SLAVE, handle);
6947			if (IS_ERR(bis))
6948				continue;
6949		}
6950
6951		if (ev->status != 0x42)
6952			/* Mark PA sync as established */
6953			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
6954
6955		bis->sync_handle = conn->sync_handle;
6956		bis->iso_qos.bcast.big = ev->handle;
6957		memset(&interval, 0, sizeof(interval));
6958		memcpy(&interval, ev->latency, sizeof(ev->latency));
6959		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
6960		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
6961		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
6962		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
6963
6964		if (!ev->status) {
6965			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
6966			hci_iso_setup_path(bis);
6967		}
6968	}
6969
6970	/* In case BIG sync failed, notify each failed connection to
6971	 * the user after all hci connections have been added
6972	 */
6973	if (ev->status)
6974		for (i = 0; i < ev->num_bis; i++) {
6975			u16 handle = le16_to_cpu(ev->bis[i]);
6976
6977			bis = hci_conn_hash_lookup_handle(hdev, handle);
6978			if (!bis)
6979				continue;
6980
6981			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
6982			hci_connect_cfm(bis, ev->status);
6983		}
6984
6985unlock:
6986	/* Handle any other pending BIG sync command */
6987	hci_le_big_create_sync_pending(hdev);
6988
6989	hci_dev_unlock(hdev);
6990}
 
6991
6992static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
6993					   struct sk_buff *skb)
6994{
6995	struct hci_evt_le_big_info_adv_report *ev = data;
6996	int mask = hdev->link_mode;
6997	__u8 flags = 0;
6998	struct hci_conn *pa_sync;
6999
7000	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
 
 
7001
7002	hci_dev_lock(hdev);
 
 
 
7003
7004	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7005	if (!(mask & HCI_LM_ACCEPT))
7006		goto unlock;
 
7007
7008	if (!(flags & HCI_PROTO_DEFER))
7009		goto unlock;
 
7010
7011	pa_sync = hci_conn_hash_lookup_pa_sync_handle
7012			(hdev,
7013			le16_to_cpu(ev->sync_handle));
7014
7015	if (!pa_sync)
7016		goto unlock;
 
7017
7018	pa_sync->iso_qos.bcast.encryption = ev->encryption;
 
 
7019
7020	/* Notify iso layer */
7021	hci_connect_cfm(pa_sync, 0);
 
7022
7023unlock:
7024	hci_dev_unlock(hdev);
7025}
7026
7027#define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7028[_op] = { \
7029	.func = _func, \
7030	.min_len = _min_len, \
7031	.max_len = _max_len, \
7032}
7033
7034#define HCI_LE_EV(_op, _func, _len) \
7035	HCI_LE_EV_VL(_op, _func, _len, _len)
7036
7037#define HCI_LE_EV_STATUS(_op, _func) \
7038	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7039
7040/* Entries in this table shall have their position according to the subevent
7041 * opcode they handle so the use of the macros above is recommend since it does
7042 * attempt to initialize at its proper index using Designated Initializers that
7043 * way events without a callback function can be ommited.
7044 */
7045static const struct hci_le_ev {
7046	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7047	u16  min_len;
7048	u16  max_len;
7049} hci_le_ev_table[U8_MAX + 1] = {
7050	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7051	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7052		  sizeof(struct hci_ev_le_conn_complete)),
7053	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7054	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7055		     sizeof(struct hci_ev_le_advertising_report),
7056		     HCI_MAX_EVENT_SIZE),
7057	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7058	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7059		  hci_le_conn_update_complete_evt,
7060		  sizeof(struct hci_ev_le_conn_update_complete)),
7061	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7062	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7063		  hci_le_remote_feat_complete_evt,
7064		  sizeof(struct hci_ev_le_remote_feat_complete)),
7065	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7066	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7067		  sizeof(struct hci_ev_le_ltk_req)),
7068	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7069	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7070		  hci_le_remote_conn_param_req_evt,
7071		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7072	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7073	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7074		  hci_le_enh_conn_complete_evt,
7075		  sizeof(struct hci_ev_le_enh_conn_complete)),
7076	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7077	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7078		     sizeof(struct hci_ev_le_direct_adv_report),
7079		     HCI_MAX_EVENT_SIZE),
7080	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7081	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7082		  sizeof(struct hci_ev_le_phy_update_complete)),
7083	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7084	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7085		     sizeof(struct hci_ev_le_ext_adv_report),
7086		     HCI_MAX_EVENT_SIZE),
7087	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7088	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7089		  hci_le_pa_sync_estabilished_evt,
7090		  sizeof(struct hci_ev_le_pa_sync_established)),
7091	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7092	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7093				 hci_le_per_adv_report_evt,
7094				 sizeof(struct hci_ev_le_per_adv_report),
7095				 HCI_MAX_EVENT_SIZE),
7096	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7097	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7098		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7099	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7100	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7101		  sizeof(struct hci_evt_le_cis_established)),
7102	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7103	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7104		  sizeof(struct hci_evt_le_cis_req)),
7105	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7106	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7107		     hci_le_create_big_complete_evt,
7108		     sizeof(struct hci_evt_le_create_big_complete),
7109		     HCI_MAX_EVENT_SIZE),
7110	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7111	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7112		     hci_le_big_sync_established_evt,
7113		     sizeof(struct hci_evt_le_big_sync_estabilished),
7114		     HCI_MAX_EVENT_SIZE),
7115	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7116	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7117		     hci_le_big_info_adv_report_evt,
7118		     sizeof(struct hci_evt_le_big_info_adv_report),
7119		     HCI_MAX_EVENT_SIZE),
7120};
7121
7122static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7123			    struct sk_buff *skb, u16 *opcode, u8 *status,
7124			    hci_req_complete_t *req_complete,
7125			    hci_req_complete_skb_t *req_complete_skb)
7126{
7127	struct hci_ev_le_meta *ev = data;
7128	const struct hci_le_ev *subev;
7129
7130	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7131
7132	/* Only match event if command OGF is for LE */
7133	if (hdev->req_skb &&
7134	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7135	    hci_skb_event(hdev->req_skb) == ev->subevent) {
7136		*opcode = hci_skb_opcode(hdev->req_skb);
7137		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7138				     req_complete_skb);
7139	}
7140
7141	subev = &hci_le_ev_table[ev->subevent];
7142	if (!subev->func)
7143		return;
7144
7145	if (skb->len < subev->min_len) {
7146		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7147			   ev->subevent, skb->len, subev->min_len);
7148		return;
7149	}
7150
7151	/* Just warn if the length is over max_len size it still be
7152	 * possible to partially parse the event so leave to callback to
7153	 * decide if that is acceptable.
7154	 */
7155	if (skb->len > subev->max_len)
7156		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7157			    ev->subevent, skb->len, subev->max_len);
7158	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7159	if (!data)
7160		return;
7161
7162	subev->func(hdev, data, skb);
7163}
 
7164
7165static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7166				 u8 event, struct sk_buff *skb)
7167{
7168	struct hci_ev_cmd_complete *ev;
7169	struct hci_event_hdr *hdr;
7170
7171	if (!skb)
7172		return false;
 
7173
7174	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7175	if (!hdr)
7176		return false;
7177
7178	if (event) {
7179		if (hdr->evt != event)
7180			return false;
7181		return true;
7182	}
7183
7184	/* Check if request ended in Command Status - no way to retrieve
7185	 * any extra parameters in this case.
7186	 */
7187	if (hdr->evt == HCI_EV_CMD_STATUS)
7188		return false;
7189
7190	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7191		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7192			   hdr->evt);
7193		return false;
7194	}
7195
7196	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7197	if (!ev)
7198		return false;
7199
7200	if (opcode != __le16_to_cpu(ev->opcode)) {
7201		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7202		       __le16_to_cpu(ev->opcode));
7203		return false;
7204	}
7205
7206	return true;
7207}
 
7208
7209static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7210				  struct sk_buff *skb)
7211{
7212	struct hci_ev_le_advertising_info *adv;
7213	struct hci_ev_le_direct_adv_info *direct_adv;
7214	struct hci_ev_le_ext_adv_info *ext_adv;
7215	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7216	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7217
7218	hci_dev_lock(hdev);
 
 
7219
7220	/* If we are currently suspended and this is the first BT event seen,
7221	 * save the wake reason associated with the event.
7222	 */
7223	if (!hdev->suspended || hdev->wake_reason)
7224		goto unlock;
7225
7226	/* Default to remote wake. Values for wake_reason are documented in the
7227	 * Bluez mgmt api docs.
7228	 */
7229	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7230
7231	/* Once configured for remote wakeup, we should only wake up for
7232	 * reconnections. It's useful to see which device is waking us up so
7233	 * keep track of the bdaddr of the connection event that woke us up.
7234	 */
7235	if (event == HCI_EV_CONN_REQUEST) {
7236		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7237		hdev->wake_addr_type = BDADDR_BREDR;
7238	} else if (event == HCI_EV_CONN_COMPLETE) {
7239		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7240		hdev->wake_addr_type = BDADDR_BREDR;
7241	} else if (event == HCI_EV_LE_META) {
7242		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7243		u8 subevent = le_ev->subevent;
7244		u8 *ptr = &skb->data[sizeof(*le_ev)];
7245		u8 num_reports = *ptr;
7246
7247		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7248		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7249		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7250		    num_reports) {
7251			adv = (void *)(ptr + 1);
7252			direct_adv = (void *)(ptr + 1);
7253			ext_adv = (void *)(ptr + 1);
7254
7255			switch (subevent) {
7256			case HCI_EV_LE_ADVERTISING_REPORT:
7257				bacpy(&hdev->wake_addr, &adv->bdaddr);
7258				hdev->wake_addr_type = adv->bdaddr_type;
7259				break;
7260			case HCI_EV_LE_DIRECT_ADV_REPORT:
7261				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7262				hdev->wake_addr_type = direct_adv->bdaddr_type;
7263				break;
7264			case HCI_EV_LE_EXT_ADV_REPORT:
7265				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7266				hdev->wake_addr_type = ext_adv->bdaddr_type;
7267				break;
7268			}
7269		}
7270	} else {
7271		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7272	}
7273
7274unlock:
7275	hci_dev_unlock(hdev);
7276}
 
7277
7278#define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7279[_op] = { \
7280	.req = false, \
7281	.func = _func, \
7282	.min_len = _min_len, \
7283	.max_len = _max_len, \
7284}
7285
7286#define HCI_EV(_op, _func, _len) \
7287	HCI_EV_VL(_op, _func, _len, _len)
7288
7289#define HCI_EV_STATUS(_op, _func) \
7290	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7291
7292#define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7293[_op] = { \
7294	.req = true, \
7295	.func_req = _func, \
7296	.min_len = _min_len, \
7297	.max_len = _max_len, \
7298}
7299
7300#define HCI_EV_REQ(_op, _func, _len) \
7301	HCI_EV_REQ_VL(_op, _func, _len, _len)
7302
7303/* Entries in this table shall have their position according to the event opcode
7304 * they handle so the use of the macros above is recommend since it does attempt
7305 * to initialize at its proper index using Designated Initializers that way
7306 * events without a callback function don't have entered.
7307 */
7308static const struct hci_ev {
7309	bool req;
7310	union {
7311		void (*func)(struct hci_dev *hdev, void *data,
7312			     struct sk_buff *skb);
7313		void (*func_req)(struct hci_dev *hdev, void *data,
7314				 struct sk_buff *skb, u16 *opcode, u8 *status,
7315				 hci_req_complete_t *req_complete,
7316				 hci_req_complete_skb_t *req_complete_skb);
7317	};
7318	u16  min_len;
7319	u16  max_len;
7320} hci_ev_table[U8_MAX + 1] = {
7321	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7322	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7323	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7324	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7325		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7326	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7327	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7328	       sizeof(struct hci_ev_conn_complete)),
7329	/* [0x04 = HCI_EV_CONN_REQUEST] */
7330	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7331	       sizeof(struct hci_ev_conn_request)),
7332	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7333	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7334	       sizeof(struct hci_ev_disconn_complete)),
7335	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7336	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7337	       sizeof(struct hci_ev_auth_complete)),
7338	/* [0x07 = HCI_EV_REMOTE_NAME] */
7339	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7340	       sizeof(struct hci_ev_remote_name)),
7341	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7342	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7343	       sizeof(struct hci_ev_encrypt_change)),
7344	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7345	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7346	       hci_change_link_key_complete_evt,
7347	       sizeof(struct hci_ev_change_link_key_complete)),
7348	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7349	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7350	       sizeof(struct hci_ev_remote_features)),
7351	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7352	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7353		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7354	/* [0x0f = HCI_EV_CMD_STATUS] */
7355	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7356		   sizeof(struct hci_ev_cmd_status)),
7357	/* [0x10 = HCI_EV_CMD_STATUS] */
7358	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7359	       sizeof(struct hci_ev_hardware_error)),
7360	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7361	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7362	       sizeof(struct hci_ev_role_change)),
7363	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7364	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7365		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7366	/* [0x14 = HCI_EV_MODE_CHANGE] */
7367	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7368	       sizeof(struct hci_ev_mode_change)),
7369	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7370	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7371	       sizeof(struct hci_ev_pin_code_req)),
7372	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7373	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7374	       sizeof(struct hci_ev_link_key_req)),
7375	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7376	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7377	       sizeof(struct hci_ev_link_key_notify)),
7378	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7379	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7380	       sizeof(struct hci_ev_clock_offset)),
7381	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7382	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7383	       sizeof(struct hci_ev_pkt_type_change)),
7384	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7385	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7386	       sizeof(struct hci_ev_pscan_rep_mode)),
7387	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7388	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7389		  hci_inquiry_result_with_rssi_evt,
7390		  sizeof(struct hci_ev_inquiry_result_rssi),
7391		  HCI_MAX_EVENT_SIZE),
7392	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7393	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7394	       sizeof(struct hci_ev_remote_ext_features)),
7395	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7396	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7397	       sizeof(struct hci_ev_sync_conn_complete)),
7398	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7399	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7400		  hci_extended_inquiry_result_evt,
7401		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7402	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7403	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7404	       sizeof(struct hci_ev_key_refresh_complete)),
7405	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7406	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7407	       sizeof(struct hci_ev_io_capa_request)),
7408	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7409	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7410	       sizeof(struct hci_ev_io_capa_reply)),
7411	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7412	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7413	       sizeof(struct hci_ev_user_confirm_req)),
7414	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7415	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7416	       sizeof(struct hci_ev_user_passkey_req)),
7417	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7418	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7419	       sizeof(struct hci_ev_remote_oob_data_request)),
7420	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7421	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7422	       sizeof(struct hci_ev_simple_pair_complete)),
7423	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7424	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7425	       sizeof(struct hci_ev_user_passkey_notify)),
7426	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7427	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7428	       sizeof(struct hci_ev_keypress_notify)),
7429	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7430	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7431	       sizeof(struct hci_ev_remote_host_features)),
7432	/* [0x3e = HCI_EV_LE_META] */
7433	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7434		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7435	/* [0xff = HCI_EV_VENDOR] */
7436	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7437};
7438
7439static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7440			   u16 *opcode, u8 *status,
7441			   hci_req_complete_t *req_complete,
7442			   hci_req_complete_skb_t *req_complete_skb)
7443{
7444	const struct hci_ev *ev = &hci_ev_table[event];
7445	void *data;
7446
7447	if (!ev->func)
7448		return;
7449
7450	if (skb->len < ev->min_len) {
7451		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7452			   event, skb->len, ev->min_len);
7453		return;
7454	}
7455
7456	/* Just warn if the length is over max_len size it still be
7457	 * possible to partially parse the event so leave to callback to
7458	 * decide if that is acceptable.
7459	 */
7460	if (skb->len > ev->max_len)
7461		bt_dev_warn_ratelimited(hdev,
7462					"unexpected event 0x%2.2x length: %u > %u",
7463					event, skb->len, ev->max_len);
7464
7465	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7466	if (!data)
7467		return;
7468
7469	if (ev->req)
7470		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7471			     req_complete_skb);
7472	else
7473		ev->func(hdev, data, skb);
7474}
7475
7476void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7477{
7478	struct hci_event_hdr *hdr = (void *) skb->data;
7479	hci_req_complete_t req_complete = NULL;
7480	hci_req_complete_skb_t req_complete_skb = NULL;
7481	struct sk_buff *orig_skb = NULL;
7482	u8 status = 0, event, req_evt = 0;
7483	u16 opcode = HCI_OP_NOP;
7484
7485	if (skb->len < sizeof(*hdr)) {
7486		bt_dev_err(hdev, "Malformed HCI Event");
7487		goto done;
7488	}
7489
7490	kfree_skb(hdev->recv_event);
7491	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7492
7493	event = hdr->evt;
7494	if (!event) {
7495		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7496			    event);
7497		goto done;
7498	}
7499
7500	/* Only match event if command OGF is not for LE */
7501	if (hdev->req_skb &&
7502	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7503	    hci_skb_event(hdev->req_skb) == event) {
7504		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7505				     status, &req_complete, &req_complete_skb);
7506		req_evt = event;
7507	}
7508
7509	/* If it looks like we might end up having to call
7510	 * req_complete_skb, store a pristine copy of the skb since the
7511	 * various handlers may modify the original one through
7512	 * skb_pull() calls, etc.
7513	 */
7514	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7515	    event == HCI_EV_CMD_COMPLETE)
7516		orig_skb = skb_clone(skb, GFP_KERNEL);
7517
7518	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7519
7520	/* Store wake reason if we're suspended */
7521	hci_store_wake_reason(hdev, event, skb);
7522
7523	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7524
7525	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7526		       &req_complete_skb);
7527
7528	if (req_complete) {
7529		req_complete(hdev, status, opcode);
7530	} else if (req_complete_skb) {
7531		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7532			kfree_skb(orig_skb);
7533			orig_skb = NULL;
7534		}
7535		req_complete_skb(hdev, status, opcode, orig_skb);
7536	}
7537
7538done:
7539	kfree_skb(orig_skb);
7540	kfree_skb(skb);
7541	hdev->stat.evt_rx++;
7542}