Linux Audio

Check our new training course

Loading...
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI event handling. */
  26
  27#include <asm/unaligned.h>
  28
  29#include <net/bluetooth/bluetooth.h>
  30#include <net/bluetooth/hci_core.h>
  31#include <net/bluetooth/mgmt.h>
  32
  33#include "hci_request.h"
  34#include "hci_debugfs.h"
  35#include "a2mp.h"
  36#include "amp.h"
  37#include "smp.h"
  38
  39#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
  40		 "\x00\x00\x00\x00\x00\x00\x00\x00"
  41
  42/* Handle HCI Event packets */
  43
  44static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
  45{
  46	__u8 status = *((__u8 *) skb->data);
  47
  48	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  49
  50	if (status)
  51		return;
  52
  53	clear_bit(HCI_INQUIRY, &hdev->flags);
  54	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
  55	wake_up_bit(&hdev->flags, HCI_INQUIRY);
  56
  57	hci_dev_lock(hdev);
  58	/* Set discovery state to stopped if we're not doing LE active
  59	 * scanning.
  60	 */
  61	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
  62	    hdev->le_scan_type != LE_SCAN_ACTIVE)
  63		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
  64	hci_dev_unlock(hdev);
  65
  66	hci_conn_check_pending(hdev);
  67}
  68
  69static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
  70{
  71	__u8 status = *((__u8 *) skb->data);
  72
  73	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  74
  75	if (status)
  76		return;
  77
  78	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
  79}
  80
  81static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
  82{
  83	__u8 status = *((__u8 *) skb->data);
  84
  85	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  86
  87	if (status)
  88		return;
  89
  90	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
  91
  92	hci_conn_check_pending(hdev);
  93}
  94
  95static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
  96					  struct sk_buff *skb)
  97{
  98	BT_DBG("%s", hdev->name);
  99}
 100
 101static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
 102{
 103	struct hci_rp_role_discovery *rp = (void *) skb->data;
 104	struct hci_conn *conn;
 105
 106	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 107
 108	if (rp->status)
 109		return;
 110
 111	hci_dev_lock(hdev);
 112
 113	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 114	if (conn)
 115		conn->role = rp->role;
 116
 117	hci_dev_unlock(hdev);
 118}
 119
 120static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
 121{
 122	struct hci_rp_read_link_policy *rp = (void *) skb->data;
 123	struct hci_conn *conn;
 124
 125	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 126
 127	if (rp->status)
 128		return;
 129
 130	hci_dev_lock(hdev);
 131
 132	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 133	if (conn)
 134		conn->link_policy = __le16_to_cpu(rp->policy);
 135
 136	hci_dev_unlock(hdev);
 137}
 138
 139static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
 140{
 141	struct hci_rp_write_link_policy *rp = (void *) skb->data;
 142	struct hci_conn *conn;
 143	void *sent;
 144
 145	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 146
 147	if (rp->status)
 148		return;
 149
 150	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
 151	if (!sent)
 152		return;
 153
 154	hci_dev_lock(hdev);
 155
 156	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 157	if (conn)
 158		conn->link_policy = get_unaligned_le16(sent + 2);
 159
 160	hci_dev_unlock(hdev);
 161}
 162
 163static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
 164					struct sk_buff *skb)
 165{
 166	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
 167
 168	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 169
 170	if (rp->status)
 171		return;
 172
 173	hdev->link_policy = __le16_to_cpu(rp->policy);
 174}
 175
 176static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
 177					 struct sk_buff *skb)
 178{
 179	__u8 status = *((__u8 *) skb->data);
 180	void *sent;
 181
 182	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 183
 184	if (status)
 185		return;
 186
 187	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
 188	if (!sent)
 189		return;
 190
 191	hdev->link_policy = get_unaligned_le16(sent);
 192}
 193
 194static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
 195{
 196	__u8 status = *((__u8 *) skb->data);
 197
 198	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 199
 200	clear_bit(HCI_RESET, &hdev->flags);
 201
 202	if (status)
 203		return;
 204
 205	/* Reset all non-persistent flags */
 206	hci_dev_clear_volatile_flags(hdev);
 207
 208	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 209
 210	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
 211	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 212
 213	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
 214	hdev->adv_data_len = 0;
 215
 216	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
 217	hdev->scan_rsp_data_len = 0;
 218
 219	hdev->le_scan_type = LE_SCAN_PASSIVE;
 220
 221	hdev->ssp_debug_mode = 0;
 222
 223	hci_bdaddr_list_clear(&hdev->le_white_list);
 224}
 225
 226static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
 227					struct sk_buff *skb)
 228{
 229	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
 230	struct hci_cp_read_stored_link_key *sent;
 231
 232	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 233
 234	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
 235	if (!sent)
 236		return;
 237
 238	if (!rp->status && sent->read_all == 0x01) {
 239		hdev->stored_max_keys = rp->max_keys;
 240		hdev->stored_num_keys = rp->num_keys;
 241	}
 242}
 243
 244static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
 245					  struct sk_buff *skb)
 246{
 247	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
 248
 249	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 250
 251	if (rp->status)
 252		return;
 253
 254	if (rp->num_keys <= hdev->stored_num_keys)
 255		hdev->stored_num_keys -= rp->num_keys;
 256	else
 257		hdev->stored_num_keys = 0;
 258}
 259
 260static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 261{
 262	__u8 status = *((__u8 *) skb->data);
 263	void *sent;
 264
 265	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 266
 267	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
 268	if (!sent)
 269		return;
 270
 271	hci_dev_lock(hdev);
 272
 273	if (hci_dev_test_flag(hdev, HCI_MGMT))
 274		mgmt_set_local_name_complete(hdev, sent, status);
 275	else if (!status)
 276		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
 277
 278	hci_dev_unlock(hdev);
 279}
 280
 281static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 282{
 283	struct hci_rp_read_local_name *rp = (void *) skb->data;
 284
 285	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 286
 287	if (rp->status)
 288		return;
 289
 290	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 291	    hci_dev_test_flag(hdev, HCI_CONFIG))
 292		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
 293}
 294
 295static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
 296{
 297	__u8 status = *((__u8 *) skb->data);
 298	void *sent;
 299
 300	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 301
 302	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
 303	if (!sent)
 304		return;
 305
 306	hci_dev_lock(hdev);
 307
 308	if (!status) {
 309		__u8 param = *((__u8 *) sent);
 310
 311		if (param == AUTH_ENABLED)
 312			set_bit(HCI_AUTH, &hdev->flags);
 313		else
 314			clear_bit(HCI_AUTH, &hdev->flags);
 315	}
 316
 317	if (hci_dev_test_flag(hdev, HCI_MGMT))
 318		mgmt_auth_enable_complete(hdev, status);
 319
 320	hci_dev_unlock(hdev);
 321}
 322
 323static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
 324{
 325	__u8 status = *((__u8 *) skb->data);
 326	__u8 param;
 327	void *sent;
 328
 329	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 330
 331	if (status)
 332		return;
 333
 334	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
 335	if (!sent)
 336		return;
 337
 338	param = *((__u8 *) sent);
 339
 340	if (param)
 341		set_bit(HCI_ENCRYPT, &hdev->flags);
 342	else
 343		clear_bit(HCI_ENCRYPT, &hdev->flags);
 344}
 345
 346static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 347{
 348	__u8 status = *((__u8 *) skb->data);
 349	__u8 param;
 350	void *sent;
 351
 352	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 353
 354	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
 355	if (!sent)
 356		return;
 357
 358	param = *((__u8 *) sent);
 359
 360	hci_dev_lock(hdev);
 361
 362	if (status) {
 363		hdev->discov_timeout = 0;
 364		goto done;
 365	}
 366
 367	if (param & SCAN_INQUIRY)
 368		set_bit(HCI_ISCAN, &hdev->flags);
 369	else
 370		clear_bit(HCI_ISCAN, &hdev->flags);
 371
 372	if (param & SCAN_PAGE)
 373		set_bit(HCI_PSCAN, &hdev->flags);
 374	else
 375		clear_bit(HCI_PSCAN, &hdev->flags);
 376
 377done:
 378	hci_dev_unlock(hdev);
 379}
 380
 381static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
 382{
 383	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
 384
 385	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 386
 387	if (rp->status)
 388		return;
 389
 390	memcpy(hdev->dev_class, rp->dev_class, 3);
 391
 392	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
 393	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
 394}
 395
 396static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
 397{
 398	__u8 status = *((__u8 *) skb->data);
 399	void *sent;
 400
 401	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 402
 403	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
 404	if (!sent)
 405		return;
 406
 407	hci_dev_lock(hdev);
 408
 409	if (status == 0)
 410		memcpy(hdev->dev_class, sent, 3);
 411
 412	if (hci_dev_test_flag(hdev, HCI_MGMT))
 413		mgmt_set_class_of_dev_complete(hdev, sent, status);
 414
 415	hci_dev_unlock(hdev);
 416}
 417
 418static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
 419{
 420	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
 421	__u16 setting;
 422
 423	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 424
 425	if (rp->status)
 426		return;
 427
 428	setting = __le16_to_cpu(rp->voice_setting);
 429
 430	if (hdev->voice_setting == setting)
 431		return;
 432
 433	hdev->voice_setting = setting;
 434
 435	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
 436
 437	if (hdev->notify)
 438		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 439}
 440
 441static void hci_cc_write_voice_setting(struct hci_dev *hdev,
 442				       struct sk_buff *skb)
 443{
 444	__u8 status = *((__u8 *) skb->data);
 445	__u16 setting;
 446	void *sent;
 447
 448	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 449
 450	if (status)
 451		return;
 452
 453	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
 454	if (!sent)
 455		return;
 456
 457	setting = get_unaligned_le16(sent);
 458
 459	if (hdev->voice_setting == setting)
 460		return;
 461
 462	hdev->voice_setting = setting;
 463
 464	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
 465
 466	if (hdev->notify)
 467		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 468}
 469
 470static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
 471					  struct sk_buff *skb)
 472{
 473	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
 474
 475	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 476
 477	if (rp->status)
 478		return;
 479
 480	hdev->num_iac = rp->num_iac;
 481
 482	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
 483}
 484
 485static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
 486{
 487	__u8 status = *((__u8 *) skb->data);
 488	struct hci_cp_write_ssp_mode *sent;
 489
 490	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 491
 492	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
 493	if (!sent)
 494		return;
 495
 496	hci_dev_lock(hdev);
 497
 498	if (!status) {
 499		if (sent->mode)
 500			hdev->features[1][0] |= LMP_HOST_SSP;
 501		else
 502			hdev->features[1][0] &= ~LMP_HOST_SSP;
 503	}
 504
 505	if (hci_dev_test_flag(hdev, HCI_MGMT))
 506		mgmt_ssp_enable_complete(hdev, sent->mode, status);
 507	else if (!status) {
 508		if (sent->mode)
 509			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
 510		else
 511			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
 512	}
 513
 514	hci_dev_unlock(hdev);
 515}
 516
 517static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
 518{
 519	u8 status = *((u8 *) skb->data);
 520	struct hci_cp_write_sc_support *sent;
 521
 522	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 523
 524	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
 525	if (!sent)
 526		return;
 527
 528	hci_dev_lock(hdev);
 529
 530	if (!status) {
 531		if (sent->support)
 532			hdev->features[1][0] |= LMP_HOST_SC;
 533		else
 534			hdev->features[1][0] &= ~LMP_HOST_SC;
 535	}
 536
 537	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
 538		if (sent->support)
 539			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
 540		else
 541			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
 542	}
 543
 544	hci_dev_unlock(hdev);
 545}
 546
 547static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
 548{
 549	struct hci_rp_read_local_version *rp = (void *) skb->data;
 550
 551	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 552
 553	if (rp->status)
 554		return;
 555
 556	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 557	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
 558		hdev->hci_ver = rp->hci_ver;
 559		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
 560		hdev->lmp_ver = rp->lmp_ver;
 561		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
 562		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
 563	}
 564}
 565
 566static void hci_cc_read_local_commands(struct hci_dev *hdev,
 567				       struct sk_buff *skb)
 568{
 569	struct hci_rp_read_local_commands *rp = (void *) skb->data;
 570
 571	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 572
 573	if (rp->status)
 574		return;
 575
 576	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 577	    hci_dev_test_flag(hdev, HCI_CONFIG))
 578		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
 579}
 580
 581static void hci_cc_read_local_features(struct hci_dev *hdev,
 582				       struct sk_buff *skb)
 583{
 584	struct hci_rp_read_local_features *rp = (void *) skb->data;
 585
 586	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 587
 588	if (rp->status)
 589		return;
 590
 591	memcpy(hdev->features, rp->features, 8);
 592
 593	/* Adjust default settings according to features
 594	 * supported by device. */
 595
 596	if (hdev->features[0][0] & LMP_3SLOT)
 597		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
 598
 599	if (hdev->features[0][0] & LMP_5SLOT)
 600		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
 601
 602	if (hdev->features[0][1] & LMP_HV2) {
 603		hdev->pkt_type  |= (HCI_HV2);
 604		hdev->esco_type |= (ESCO_HV2);
 605	}
 606
 607	if (hdev->features[0][1] & LMP_HV3) {
 608		hdev->pkt_type  |= (HCI_HV3);
 609		hdev->esco_type |= (ESCO_HV3);
 610	}
 611
 612	if (lmp_esco_capable(hdev))
 613		hdev->esco_type |= (ESCO_EV3);
 614
 615	if (hdev->features[0][4] & LMP_EV4)
 616		hdev->esco_type |= (ESCO_EV4);
 617
 618	if (hdev->features[0][4] & LMP_EV5)
 619		hdev->esco_type |= (ESCO_EV5);
 620
 621	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
 622		hdev->esco_type |= (ESCO_2EV3);
 623
 624	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
 625		hdev->esco_type |= (ESCO_3EV3);
 626
 627	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
 628		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
 629}
 630
 631static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
 632					   struct sk_buff *skb)
 633{
 634	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
 635
 636	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 637
 638	if (rp->status)
 639		return;
 640
 641	if (hdev->max_page < rp->max_page)
 642		hdev->max_page = rp->max_page;
 643
 644	if (rp->page < HCI_MAX_PAGES)
 645		memcpy(hdev->features[rp->page], rp->features, 8);
 646}
 647
 648static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
 649					  struct sk_buff *skb)
 650{
 651	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
 652
 653	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 654
 655	if (rp->status)
 656		return;
 657
 658	hdev->flow_ctl_mode = rp->mode;
 659}
 660
 661static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
 662{
 663	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
 664
 665	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 666
 667	if (rp->status)
 668		return;
 669
 670	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
 671	hdev->sco_mtu  = rp->sco_mtu;
 672	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
 673	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
 674
 675	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
 676		hdev->sco_mtu  = 64;
 677		hdev->sco_pkts = 8;
 678	}
 679
 680	hdev->acl_cnt = hdev->acl_pkts;
 681	hdev->sco_cnt = hdev->sco_pkts;
 682
 683	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
 684	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
 685}
 686
 687static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
 688{
 689	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
 690
 691	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 692
 693	if (rp->status)
 694		return;
 695
 696	if (test_bit(HCI_INIT, &hdev->flags))
 697		bacpy(&hdev->bdaddr, &rp->bdaddr);
 698
 699	if (hci_dev_test_flag(hdev, HCI_SETUP))
 700		bacpy(&hdev->setup_addr, &rp->bdaddr);
 701}
 702
 703static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
 704					   struct sk_buff *skb)
 705{
 706	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
 707
 708	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 709
 710	if (rp->status)
 711		return;
 712
 713	if (test_bit(HCI_INIT, &hdev->flags)) {
 714		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
 715		hdev->page_scan_window = __le16_to_cpu(rp->window);
 716	}
 717}
 718
 719static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
 720					    struct sk_buff *skb)
 721{
 722	u8 status = *((u8 *) skb->data);
 723	struct hci_cp_write_page_scan_activity *sent;
 724
 725	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 726
 727	if (status)
 728		return;
 729
 730	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
 731	if (!sent)
 732		return;
 733
 734	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
 735	hdev->page_scan_window = __le16_to_cpu(sent->window);
 736}
 737
 738static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
 739					   struct sk_buff *skb)
 740{
 741	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
 742
 743	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 744
 745	if (rp->status)
 746		return;
 747
 748	if (test_bit(HCI_INIT, &hdev->flags))
 749		hdev->page_scan_type = rp->type;
 750}
 751
 752static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
 753					struct sk_buff *skb)
 754{
 755	u8 status = *((u8 *) skb->data);
 756	u8 *type;
 757
 758	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 759
 760	if (status)
 761		return;
 762
 763	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
 764	if (type)
 765		hdev->page_scan_type = *type;
 766}
 767
 768static void hci_cc_read_data_block_size(struct hci_dev *hdev,
 769					struct sk_buff *skb)
 770{
 771	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
 772
 773	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 774
 775	if (rp->status)
 776		return;
 777
 778	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
 779	hdev->block_len = __le16_to_cpu(rp->block_len);
 780	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
 781
 782	hdev->block_cnt = hdev->num_blocks;
 783
 784	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
 785	       hdev->block_cnt, hdev->block_len);
 786}
 787
 788static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
 789{
 790	struct hci_rp_read_clock *rp = (void *) skb->data;
 791	struct hci_cp_read_clock *cp;
 792	struct hci_conn *conn;
 793
 794	BT_DBG("%s", hdev->name);
 795
 796	if (skb->len < sizeof(*rp))
 797		return;
 798
 799	if (rp->status)
 800		return;
 801
 802	hci_dev_lock(hdev);
 803
 804	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
 805	if (!cp)
 806		goto unlock;
 807
 808	if (cp->which == 0x00) {
 809		hdev->clock = le32_to_cpu(rp->clock);
 810		goto unlock;
 811	}
 812
 813	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 814	if (conn) {
 815		conn->clock = le32_to_cpu(rp->clock);
 816		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
 817	}
 818
 819unlock:
 820	hci_dev_unlock(hdev);
 821}
 822
 823static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
 824				       struct sk_buff *skb)
 825{
 826	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
 827
 828	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 829
 830	if (rp->status)
 831		return;
 832
 833	hdev->amp_status = rp->amp_status;
 834	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
 835	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
 836	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
 837	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
 838	hdev->amp_type = rp->amp_type;
 839	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
 840	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
 841	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
 842	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
 843}
 844
 845static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
 846					 struct sk_buff *skb)
 847{
 848	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
 849
 850	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 851
 852	if (rp->status)
 853		return;
 854
 855	hdev->inq_tx_power = rp->tx_power;
 856}
 857
 858static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
 859{
 860	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
 861	struct hci_cp_pin_code_reply *cp;
 862	struct hci_conn *conn;
 863
 864	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 865
 866	hci_dev_lock(hdev);
 867
 868	if (hci_dev_test_flag(hdev, HCI_MGMT))
 869		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
 870
 871	if (rp->status)
 872		goto unlock;
 873
 874	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
 875	if (!cp)
 876		goto unlock;
 877
 878	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
 879	if (conn)
 880		conn->pin_length = cp->pin_len;
 881
 882unlock:
 883	hci_dev_unlock(hdev);
 884}
 885
 886static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
 887{
 888	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
 889
 890	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 891
 892	hci_dev_lock(hdev);
 893
 894	if (hci_dev_test_flag(hdev, HCI_MGMT))
 895		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
 896						 rp->status);
 897
 898	hci_dev_unlock(hdev);
 899}
 900
 901static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
 902				       struct sk_buff *skb)
 903{
 904	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
 905
 906	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 907
 908	if (rp->status)
 909		return;
 910
 911	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
 912	hdev->le_pkts = rp->le_max_pkt;
 913
 914	hdev->le_cnt = hdev->le_pkts;
 915
 916	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
 917}
 918
 919static void hci_cc_le_read_local_features(struct hci_dev *hdev,
 920					  struct sk_buff *skb)
 921{
 922	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
 923
 924	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 925
 926	if (rp->status)
 927		return;
 928
 929	memcpy(hdev->le_features, rp->features, 8);
 930}
 931
 932static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
 933					struct sk_buff *skb)
 934{
 935	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
 936
 937	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 938
 939	if (rp->status)
 940		return;
 941
 942	hdev->adv_tx_power = rp->tx_power;
 943}
 944
 945static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
 946{
 947	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 948
 949	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 950
 951	hci_dev_lock(hdev);
 952
 953	if (hci_dev_test_flag(hdev, HCI_MGMT))
 954		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
 955						 rp->status);
 956
 957	hci_dev_unlock(hdev);
 958}
 959
 960static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
 961					  struct sk_buff *skb)
 962{
 963	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 964
 965	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 966
 967	hci_dev_lock(hdev);
 968
 969	if (hci_dev_test_flag(hdev, HCI_MGMT))
 970		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
 971						     ACL_LINK, 0, rp->status);
 972
 973	hci_dev_unlock(hdev);
 974}
 975
 976static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
 977{
 978	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 979
 980	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 981
 982	hci_dev_lock(hdev);
 983
 984	if (hci_dev_test_flag(hdev, HCI_MGMT))
 985		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
 986						 0, rp->status);
 987
 988	hci_dev_unlock(hdev);
 989}
 990
 991static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
 992					  struct sk_buff *skb)
 993{
 994	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 995
 996	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 997
 998	hci_dev_lock(hdev);
 999
1000	if (hci_dev_test_flag(hdev, HCI_MGMT))
1001		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1002						     ACL_LINK, 0, rp->status);
1003
1004	hci_dev_unlock(hdev);
1005}
1006
1007static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1008				       struct sk_buff *skb)
1009{
1010	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1011
1012	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1013}
1014
1015static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1016					   struct sk_buff *skb)
1017{
1018	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1019
1020	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1021}
1022
1023static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1024{
1025	__u8 status = *((__u8 *) skb->data);
1026	bdaddr_t *sent;
1027
1028	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1029
1030	if (status)
1031		return;
1032
1033	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1034	if (!sent)
1035		return;
1036
1037	hci_dev_lock(hdev);
1038
1039	bacpy(&hdev->random_addr, sent);
1040
1041	hci_dev_unlock(hdev);
1042}
1043
1044static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1045{
1046	__u8 *sent, status = *((__u8 *) skb->data);
1047
1048	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1049
1050	if (status)
1051		return;
1052
1053	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1054	if (!sent)
1055		return;
1056
1057	hci_dev_lock(hdev);
1058
1059	/* If we're doing connection initiation as peripheral. Set a
1060	 * timeout in case something goes wrong.
1061	 */
1062	if (*sent) {
1063		struct hci_conn *conn;
1064
1065		hci_dev_set_flag(hdev, HCI_LE_ADV);
1066
1067		conn = hci_lookup_le_connect(hdev);
1068		if (conn)
1069			queue_delayed_work(hdev->workqueue,
1070					   &conn->le_conn_timeout,
1071					   conn->conn_timeout);
1072	} else {
1073		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1074	}
1075
1076	hci_dev_unlock(hdev);
1077}
1078
1079static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1080{
1081	struct hci_cp_le_set_scan_param *cp;
1082	__u8 status = *((__u8 *) skb->data);
1083
1084	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1085
1086	if (status)
1087		return;
1088
1089	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1090	if (!cp)
1091		return;
1092
1093	hci_dev_lock(hdev);
1094
1095	hdev->le_scan_type = cp->type;
1096
1097	hci_dev_unlock(hdev);
1098}
1099
1100static bool has_pending_adv_report(struct hci_dev *hdev)
1101{
1102	struct discovery_state *d = &hdev->discovery;
1103
1104	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1105}
1106
1107static void clear_pending_adv_report(struct hci_dev *hdev)
1108{
1109	struct discovery_state *d = &hdev->discovery;
1110
1111	bacpy(&d->last_adv_addr, BDADDR_ANY);
1112	d->last_adv_data_len = 0;
1113}
1114
1115static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1116				     u8 bdaddr_type, s8 rssi, u32 flags,
1117				     u8 *data, u8 len)
1118{
1119	struct discovery_state *d = &hdev->discovery;
1120
1121	bacpy(&d->last_adv_addr, bdaddr);
1122	d->last_adv_addr_type = bdaddr_type;
1123	d->last_adv_rssi = rssi;
1124	d->last_adv_flags = flags;
1125	memcpy(d->last_adv_data, data, len);
1126	d->last_adv_data_len = len;
1127}
1128
1129static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1130				      struct sk_buff *skb)
1131{
1132	struct hci_cp_le_set_scan_enable *cp;
1133	__u8 status = *((__u8 *) skb->data);
1134
1135	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1136
1137	if (status)
1138		return;
1139
1140	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1141	if (!cp)
1142		return;
1143
1144	hci_dev_lock(hdev);
1145
1146	switch (cp->enable) {
1147	case LE_SCAN_ENABLE:
1148		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1149		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1150			clear_pending_adv_report(hdev);
1151		break;
1152
1153	case LE_SCAN_DISABLE:
1154		/* We do this here instead of when setting DISCOVERY_STOPPED
1155		 * since the latter would potentially require waiting for
1156		 * inquiry to stop too.
1157		 */
1158		if (has_pending_adv_report(hdev)) {
1159			struct discovery_state *d = &hdev->discovery;
1160
1161			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1162					  d->last_adv_addr_type, NULL,
1163					  d->last_adv_rssi, d->last_adv_flags,
1164					  d->last_adv_data,
1165					  d->last_adv_data_len, NULL, 0);
1166		}
1167
1168		/* Cancel this timer so that we don't try to disable scanning
1169		 * when it's already disabled.
1170		 */
1171		cancel_delayed_work(&hdev->le_scan_disable);
1172
1173		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1174
1175		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1176		 * interrupted scanning due to a connect request. Mark
1177		 * therefore discovery as stopped. If this was not
1178		 * because of a connect request advertising might have
1179		 * been disabled because of active scanning, so
1180		 * re-enable it again if necessary.
1181		 */
1182		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1183			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1184		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1185			 hdev->discovery.state == DISCOVERY_FINDING)
1186			hci_req_reenable_advertising(hdev);
1187
1188		break;
1189
1190	default:
1191		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1192		break;
1193	}
1194
1195	hci_dev_unlock(hdev);
1196}
1197
1198static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1199					   struct sk_buff *skb)
1200{
1201	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1202
1203	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1204
1205	if (rp->status)
1206		return;
1207
1208	hdev->le_white_list_size = rp->size;
1209}
1210
1211static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1212				       struct sk_buff *skb)
1213{
1214	__u8 status = *((__u8 *) skb->data);
1215
1216	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1217
1218	if (status)
1219		return;
1220
1221	hci_bdaddr_list_clear(&hdev->le_white_list);
1222}
1223
1224static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1225					struct sk_buff *skb)
1226{
1227	struct hci_cp_le_add_to_white_list *sent;
1228	__u8 status = *((__u8 *) skb->data);
1229
1230	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1231
1232	if (status)
1233		return;
1234
1235	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1236	if (!sent)
1237		return;
1238
1239	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1240			   sent->bdaddr_type);
1241}
1242
1243static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1244					  struct sk_buff *skb)
1245{
1246	struct hci_cp_le_del_from_white_list *sent;
1247	__u8 status = *((__u8 *) skb->data);
1248
1249	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1250
1251	if (status)
1252		return;
1253
1254	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1255	if (!sent)
1256		return;
1257
1258	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1259			    sent->bdaddr_type);
1260}
1261
1262static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1263					    struct sk_buff *skb)
1264{
1265	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1266
1267	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1268
1269	if (rp->status)
1270		return;
1271
1272	memcpy(hdev->le_states, rp->le_states, 8);
1273}
1274
1275static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1276					struct sk_buff *skb)
1277{
1278	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1279
1280	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1281
1282	if (rp->status)
1283		return;
1284
1285	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1286	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1287}
1288
1289static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1290					 struct sk_buff *skb)
1291{
1292	struct hci_cp_le_write_def_data_len *sent;
1293	__u8 status = *((__u8 *) skb->data);
1294
1295	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1296
1297	if (status)
1298		return;
1299
1300	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1301	if (!sent)
1302		return;
1303
1304	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1305	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1306}
1307
1308static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1309					struct sk_buff *skb)
1310{
1311	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1312
1313	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1314
1315	if (rp->status)
1316		return;
1317
1318	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1319	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1320	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1321	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1322}
1323
1324static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1325					   struct sk_buff *skb)
1326{
1327	struct hci_cp_write_le_host_supported *sent;
1328	__u8 status = *((__u8 *) skb->data);
1329
1330	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1331
1332	if (status)
1333		return;
1334
1335	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1336	if (!sent)
1337		return;
1338
1339	hci_dev_lock(hdev);
1340
1341	if (sent->le) {
1342		hdev->features[1][0] |= LMP_HOST_LE;
1343		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1344	} else {
1345		hdev->features[1][0] &= ~LMP_HOST_LE;
1346		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1347		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1348	}
1349
1350	if (sent->simul)
1351		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1352	else
1353		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1354
1355	hci_dev_unlock(hdev);
1356}
1357
1358static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1359{
1360	struct hci_cp_le_set_adv_param *cp;
1361	u8 status = *((u8 *) skb->data);
1362
1363	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364
1365	if (status)
1366		return;
1367
1368	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1369	if (!cp)
1370		return;
1371
1372	hci_dev_lock(hdev);
1373	hdev->adv_addr_type = cp->own_address_type;
1374	hci_dev_unlock(hdev);
1375}
1376
1377static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1378{
1379	struct hci_rp_read_rssi *rp = (void *) skb->data;
1380	struct hci_conn *conn;
1381
1382	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1383
1384	if (rp->status)
1385		return;
1386
1387	hci_dev_lock(hdev);
1388
1389	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1390	if (conn)
1391		conn->rssi = rp->rssi;
1392
1393	hci_dev_unlock(hdev);
1394}
1395
1396static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1397{
1398	struct hci_cp_read_tx_power *sent;
1399	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1400	struct hci_conn *conn;
1401
1402	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1403
1404	if (rp->status)
1405		return;
1406
1407	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1408	if (!sent)
1409		return;
1410
1411	hci_dev_lock(hdev);
1412
1413	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1414	if (!conn)
1415		goto unlock;
1416
1417	switch (sent->type) {
1418	case 0x00:
1419		conn->tx_power = rp->tx_power;
1420		break;
1421	case 0x01:
1422		conn->max_tx_power = rp->tx_power;
1423		break;
1424	}
1425
1426unlock:
1427	hci_dev_unlock(hdev);
1428}
1429
1430static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1431{
1432	u8 status = *((u8 *) skb->data);
1433	u8 *mode;
1434
1435	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1436
1437	if (status)
1438		return;
1439
1440	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1441	if (mode)
1442		hdev->ssp_debug_mode = *mode;
1443}
1444
1445static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1446{
1447	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1448
1449	if (status) {
1450		hci_conn_check_pending(hdev);
1451		return;
1452	}
1453
1454	set_bit(HCI_INQUIRY, &hdev->flags);
1455}
1456
1457static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1458{
1459	struct hci_cp_create_conn *cp;
1460	struct hci_conn *conn;
1461
1462	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1463
1464	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1465	if (!cp)
1466		return;
1467
1468	hci_dev_lock(hdev);
1469
1470	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1471
1472	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1473
1474	if (status) {
1475		if (conn && conn->state == BT_CONNECT) {
1476			if (status != 0x0c || conn->attempt > 2) {
1477				conn->state = BT_CLOSED;
1478				hci_connect_cfm(conn, status);
1479				hci_conn_del(conn);
1480			} else
1481				conn->state = BT_CONNECT2;
1482		}
1483	} else {
1484		if (!conn) {
1485			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1486					    HCI_ROLE_MASTER);
1487			if (!conn)
1488				BT_ERR("No memory for new connection");
1489		}
1490	}
1491
1492	hci_dev_unlock(hdev);
1493}
1494
1495static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1496{
1497	struct hci_cp_add_sco *cp;
1498	struct hci_conn *acl, *sco;
1499	__u16 handle;
1500
1501	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1502
1503	if (!status)
1504		return;
1505
1506	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1507	if (!cp)
1508		return;
1509
1510	handle = __le16_to_cpu(cp->handle);
1511
1512	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1513
1514	hci_dev_lock(hdev);
1515
1516	acl = hci_conn_hash_lookup_handle(hdev, handle);
1517	if (acl) {
1518		sco = acl->link;
1519		if (sco) {
1520			sco->state = BT_CLOSED;
1521
1522			hci_connect_cfm(sco, status);
1523			hci_conn_del(sco);
1524		}
1525	}
1526
1527	hci_dev_unlock(hdev);
1528}
1529
1530static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1531{
1532	struct hci_cp_auth_requested *cp;
1533	struct hci_conn *conn;
1534
1535	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1536
1537	if (!status)
1538		return;
1539
1540	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1541	if (!cp)
1542		return;
1543
1544	hci_dev_lock(hdev);
1545
1546	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1547	if (conn) {
1548		if (conn->state == BT_CONFIG) {
1549			hci_connect_cfm(conn, status);
1550			hci_conn_drop(conn);
1551		}
1552	}
1553
1554	hci_dev_unlock(hdev);
1555}
1556
1557static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1558{
1559	struct hci_cp_set_conn_encrypt *cp;
1560	struct hci_conn *conn;
1561
1562	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1563
1564	if (!status)
1565		return;
1566
1567	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1568	if (!cp)
1569		return;
1570
1571	hci_dev_lock(hdev);
1572
1573	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1574	if (conn) {
1575		if (conn->state == BT_CONFIG) {
1576			hci_connect_cfm(conn, status);
1577			hci_conn_drop(conn);
1578		}
1579	}
1580
1581	hci_dev_unlock(hdev);
1582}
1583
1584static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1585				    struct hci_conn *conn)
1586{
1587	if (conn->state != BT_CONFIG || !conn->out)
1588		return 0;
1589
1590	if (conn->pending_sec_level == BT_SECURITY_SDP)
1591		return 0;
1592
1593	/* Only request authentication for SSP connections or non-SSP
1594	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1595	 * is requested.
1596	 */
1597	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1598	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1599	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1600	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1601		return 0;
1602
1603	return 1;
1604}
1605
1606static int hci_resolve_name(struct hci_dev *hdev,
1607				   struct inquiry_entry *e)
1608{
1609	struct hci_cp_remote_name_req cp;
1610
1611	memset(&cp, 0, sizeof(cp));
1612
1613	bacpy(&cp.bdaddr, &e->data.bdaddr);
1614	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1615	cp.pscan_mode = e->data.pscan_mode;
1616	cp.clock_offset = e->data.clock_offset;
1617
1618	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1619}
1620
1621static bool hci_resolve_next_name(struct hci_dev *hdev)
1622{
1623	struct discovery_state *discov = &hdev->discovery;
1624	struct inquiry_entry *e;
1625
1626	if (list_empty(&discov->resolve))
1627		return false;
1628
1629	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1630	if (!e)
1631		return false;
1632
1633	if (hci_resolve_name(hdev, e) == 0) {
1634		e->name_state = NAME_PENDING;
1635		return true;
1636	}
1637
1638	return false;
1639}
1640
1641static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1642				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1643{
1644	struct discovery_state *discov = &hdev->discovery;
1645	struct inquiry_entry *e;
1646
1647	/* Update the mgmt connected state if necessary. Be careful with
1648	 * conn objects that exist but are not (yet) connected however.
1649	 * Only those in BT_CONFIG or BT_CONNECTED states can be
1650	 * considered connected.
1651	 */
1652	if (conn &&
1653	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1654	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1655		mgmt_device_connected(hdev, conn, 0, name, name_len);
1656
1657	if (discov->state == DISCOVERY_STOPPED)
1658		return;
1659
1660	if (discov->state == DISCOVERY_STOPPING)
1661		goto discov_complete;
1662
1663	if (discov->state != DISCOVERY_RESOLVING)
1664		return;
1665
1666	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1667	/* If the device was not found in a list of found devices names of which
1668	 * are pending. there is no need to continue resolving a next name as it
1669	 * will be done upon receiving another Remote Name Request Complete
1670	 * Event */
1671	if (!e)
1672		return;
1673
1674	list_del(&e->list);
1675	if (name) {
1676		e->name_state = NAME_KNOWN;
1677		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1678				 e->data.rssi, name, name_len);
1679	} else {
1680		e->name_state = NAME_NOT_KNOWN;
1681	}
1682
1683	if (hci_resolve_next_name(hdev))
1684		return;
1685
1686discov_complete:
1687	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1688}
1689
1690static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1691{
1692	struct hci_cp_remote_name_req *cp;
1693	struct hci_conn *conn;
1694
1695	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1696
1697	/* If successful wait for the name req complete event before
1698	 * checking for the need to do authentication */
1699	if (!status)
1700		return;
1701
1702	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1703	if (!cp)
1704		return;
1705
1706	hci_dev_lock(hdev);
1707
1708	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1709
1710	if (hci_dev_test_flag(hdev, HCI_MGMT))
1711		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1712
1713	if (!conn)
1714		goto unlock;
1715
1716	if (!hci_outgoing_auth_needed(hdev, conn))
1717		goto unlock;
1718
1719	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1720		struct hci_cp_auth_requested auth_cp;
1721
1722		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1723
1724		auth_cp.handle = __cpu_to_le16(conn->handle);
1725		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1726			     sizeof(auth_cp), &auth_cp);
1727	}
1728
1729unlock:
1730	hci_dev_unlock(hdev);
1731}
1732
1733static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1734{
1735	struct hci_cp_read_remote_features *cp;
1736	struct hci_conn *conn;
1737
1738	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1739
1740	if (!status)
1741		return;
1742
1743	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1744	if (!cp)
1745		return;
1746
1747	hci_dev_lock(hdev);
1748
1749	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1750	if (conn) {
1751		if (conn->state == BT_CONFIG) {
1752			hci_connect_cfm(conn, status);
1753			hci_conn_drop(conn);
1754		}
1755	}
1756
1757	hci_dev_unlock(hdev);
1758}
1759
1760static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1761{
1762	struct hci_cp_read_remote_ext_features *cp;
1763	struct hci_conn *conn;
1764
1765	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1766
1767	if (!status)
1768		return;
1769
1770	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1771	if (!cp)
1772		return;
1773
1774	hci_dev_lock(hdev);
1775
1776	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1777	if (conn) {
1778		if (conn->state == BT_CONFIG) {
1779			hci_connect_cfm(conn, status);
1780			hci_conn_drop(conn);
1781		}
1782	}
1783
1784	hci_dev_unlock(hdev);
1785}
1786
1787static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1788{
1789	struct hci_cp_setup_sync_conn *cp;
1790	struct hci_conn *acl, *sco;
1791	__u16 handle;
1792
1793	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1794
1795	if (!status)
1796		return;
1797
1798	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1799	if (!cp)
1800		return;
1801
1802	handle = __le16_to_cpu(cp->handle);
1803
1804	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1805
1806	hci_dev_lock(hdev);
1807
1808	acl = hci_conn_hash_lookup_handle(hdev, handle);
1809	if (acl) {
1810		sco = acl->link;
1811		if (sco) {
1812			sco->state = BT_CLOSED;
1813
1814			hci_connect_cfm(sco, status);
1815			hci_conn_del(sco);
1816		}
1817	}
1818
1819	hci_dev_unlock(hdev);
1820}
1821
1822static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1823{
1824	struct hci_cp_sniff_mode *cp;
1825	struct hci_conn *conn;
1826
1827	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1828
1829	if (!status)
1830		return;
1831
1832	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1833	if (!cp)
1834		return;
1835
1836	hci_dev_lock(hdev);
1837
1838	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1839	if (conn) {
1840		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1841
1842		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1843			hci_sco_setup(conn, status);
1844	}
1845
1846	hci_dev_unlock(hdev);
1847}
1848
1849static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1850{
1851	struct hci_cp_exit_sniff_mode *cp;
1852	struct hci_conn *conn;
1853
1854	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1855
1856	if (!status)
1857		return;
1858
1859	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1860	if (!cp)
1861		return;
1862
1863	hci_dev_lock(hdev);
1864
1865	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1866	if (conn) {
1867		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1868
1869		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1870			hci_sco_setup(conn, status);
1871	}
1872
1873	hci_dev_unlock(hdev);
1874}
1875
1876static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1877{
1878	struct hci_cp_disconnect *cp;
1879	struct hci_conn *conn;
1880
1881	if (!status)
1882		return;
1883
1884	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1885	if (!cp)
1886		return;
1887
1888	hci_dev_lock(hdev);
1889
1890	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1891	if (conn)
1892		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1893				       conn->dst_type, status);
1894
1895	hci_dev_unlock(hdev);
1896}
1897
1898static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1899{
1900	struct hci_cp_le_create_conn *cp;
1901	struct hci_conn *conn;
1902
1903	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1904
1905	/* All connection failure handling is taken care of by the
1906	 * hci_le_conn_failed function which is triggered by the HCI
1907	 * request completion callbacks used for connecting.
1908	 */
1909	if (status)
1910		return;
1911
1912	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1913	if (!cp)
1914		return;
1915
1916	hci_dev_lock(hdev);
1917
1918	conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
1919				       cp->peer_addr_type);
1920	if (!conn)
1921		goto unlock;
1922
1923	/* Store the initiator and responder address information which
1924	 * is needed for SMP. These values will not change during the
1925	 * lifetime of the connection.
1926	 */
1927	conn->init_addr_type = cp->own_address_type;
1928	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1929		bacpy(&conn->init_addr, &hdev->random_addr);
1930	else
1931		bacpy(&conn->init_addr, &hdev->bdaddr);
1932
1933	conn->resp_addr_type = cp->peer_addr_type;
1934	bacpy(&conn->resp_addr, &cp->peer_addr);
1935
1936	/* We don't want the connection attempt to stick around
1937	 * indefinitely since LE doesn't have a page timeout concept
1938	 * like BR/EDR. Set a timer for any connection that doesn't use
1939	 * the white list for connecting.
1940	 */
1941	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1942		queue_delayed_work(conn->hdev->workqueue,
1943				   &conn->le_conn_timeout,
1944				   conn->conn_timeout);
1945
1946unlock:
1947	hci_dev_unlock(hdev);
1948}
1949
1950static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1951{
1952	struct hci_cp_le_read_remote_features *cp;
1953	struct hci_conn *conn;
1954
1955	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1956
1957	if (!status)
1958		return;
1959
1960	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1961	if (!cp)
1962		return;
1963
1964	hci_dev_lock(hdev);
1965
1966	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1967	if (conn) {
1968		if (conn->state == BT_CONFIG) {
1969			hci_connect_cfm(conn, status);
1970			hci_conn_drop(conn);
1971		}
1972	}
1973
1974	hci_dev_unlock(hdev);
1975}
1976
1977static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1978{
1979	struct hci_cp_le_start_enc *cp;
1980	struct hci_conn *conn;
1981
1982	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1983
1984	if (!status)
1985		return;
1986
1987	hci_dev_lock(hdev);
1988
1989	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1990	if (!cp)
1991		goto unlock;
1992
1993	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1994	if (!conn)
1995		goto unlock;
1996
1997	if (conn->state != BT_CONNECTED)
1998		goto unlock;
1999
2000	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2001	hci_conn_drop(conn);
2002
2003unlock:
2004	hci_dev_unlock(hdev);
2005}
2006
2007static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2008{
2009	struct hci_cp_switch_role *cp;
2010	struct hci_conn *conn;
2011
2012	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2013
2014	if (!status)
2015		return;
2016
2017	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2018	if (!cp)
2019		return;
2020
2021	hci_dev_lock(hdev);
2022
2023	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2024	if (conn)
2025		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2026
2027	hci_dev_unlock(hdev);
2028}
2029
2030static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2031{
2032	__u8 status = *((__u8 *) skb->data);
2033	struct discovery_state *discov = &hdev->discovery;
2034	struct inquiry_entry *e;
2035
2036	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037
2038	hci_conn_check_pending(hdev);
2039
2040	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2041		return;
2042
2043	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2044	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2045
2046	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2047		return;
2048
2049	hci_dev_lock(hdev);
2050
2051	if (discov->state != DISCOVERY_FINDING)
2052		goto unlock;
2053
2054	if (list_empty(&discov->resolve)) {
2055		/* When BR/EDR inquiry is active and no LE scanning is in
2056		 * progress, then change discovery state to indicate completion.
2057		 *
2058		 * When running LE scanning and BR/EDR inquiry simultaneously
2059		 * and the LE scan already finished, then change the discovery
2060		 * state to indicate completion.
2061		 */
2062		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2063		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2064			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2065		goto unlock;
2066	}
2067
2068	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2069	if (e && hci_resolve_name(hdev, e) == 0) {
2070		e->name_state = NAME_PENDING;
2071		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2072	} else {
2073		/* When BR/EDR inquiry is active and no LE scanning is in
2074		 * progress, then change discovery state to indicate completion.
2075		 *
2076		 * When running LE scanning and BR/EDR inquiry simultaneously
2077		 * and the LE scan already finished, then change the discovery
2078		 * state to indicate completion.
2079		 */
2080		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2081		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2082			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2083	}
2084
2085unlock:
2086	hci_dev_unlock(hdev);
2087}
2088
2089static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2090{
2091	struct inquiry_data data;
2092	struct inquiry_info *info = (void *) (skb->data + 1);
2093	int num_rsp = *((__u8 *) skb->data);
2094
2095	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2096
2097	if (!num_rsp)
2098		return;
2099
2100	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2101		return;
2102
2103	hci_dev_lock(hdev);
2104
2105	for (; num_rsp; num_rsp--, info++) {
2106		u32 flags;
2107
2108		bacpy(&data.bdaddr, &info->bdaddr);
2109		data.pscan_rep_mode	= info->pscan_rep_mode;
2110		data.pscan_period_mode	= info->pscan_period_mode;
2111		data.pscan_mode		= info->pscan_mode;
2112		memcpy(data.dev_class, info->dev_class, 3);
2113		data.clock_offset	= info->clock_offset;
2114		data.rssi		= HCI_RSSI_INVALID;
2115		data.ssp_mode		= 0x00;
2116
2117		flags = hci_inquiry_cache_update(hdev, &data, false);
2118
2119		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2120				  info->dev_class, HCI_RSSI_INVALID,
2121				  flags, NULL, 0, NULL, 0);
2122	}
2123
2124	hci_dev_unlock(hdev);
2125}
2126
2127static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2128{
2129	struct hci_ev_conn_complete *ev = (void *) skb->data;
2130	struct hci_conn *conn;
2131
2132	BT_DBG("%s", hdev->name);
2133
2134	hci_dev_lock(hdev);
2135
2136	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2137	if (!conn) {
2138		if (ev->link_type != SCO_LINK)
2139			goto unlock;
2140
2141		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2142		if (!conn)
2143			goto unlock;
2144
2145		conn->type = SCO_LINK;
2146	}
2147
2148	if (!ev->status) {
2149		conn->handle = __le16_to_cpu(ev->handle);
2150
2151		if (conn->type == ACL_LINK) {
2152			conn->state = BT_CONFIG;
2153			hci_conn_hold(conn);
2154
2155			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2156			    !hci_find_link_key(hdev, &ev->bdaddr))
2157				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2158			else
2159				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2160		} else
2161			conn->state = BT_CONNECTED;
2162
2163		hci_debugfs_create_conn(conn);
2164		hci_conn_add_sysfs(conn);
2165
2166		if (test_bit(HCI_AUTH, &hdev->flags))
2167			set_bit(HCI_CONN_AUTH, &conn->flags);
2168
2169		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2170			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2171
2172		/* Get remote features */
2173		if (conn->type == ACL_LINK) {
2174			struct hci_cp_read_remote_features cp;
2175			cp.handle = ev->handle;
2176			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2177				     sizeof(cp), &cp);
2178
2179			hci_req_update_scan(hdev);
2180		}
2181
2182		/* Set packet type for incoming connection */
2183		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2184			struct hci_cp_change_conn_ptype cp;
2185			cp.handle = ev->handle;
2186			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2187			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2188				     &cp);
2189		}
2190	} else {
2191		conn->state = BT_CLOSED;
2192		if (conn->type == ACL_LINK)
2193			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2194					    conn->dst_type, ev->status);
2195	}
2196
2197	if (conn->type == ACL_LINK)
2198		hci_sco_setup(conn, ev->status);
2199
2200	if (ev->status) {
2201		hci_connect_cfm(conn, ev->status);
2202		hci_conn_del(conn);
2203	} else if (ev->link_type != ACL_LINK)
2204		hci_connect_cfm(conn, ev->status);
2205
2206unlock:
2207	hci_dev_unlock(hdev);
2208
2209	hci_conn_check_pending(hdev);
2210}
2211
2212static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2213{
2214	struct hci_cp_reject_conn_req cp;
2215
2216	bacpy(&cp.bdaddr, bdaddr);
2217	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2218	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2219}
2220
2221static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2222{
2223	struct hci_ev_conn_request *ev = (void *) skb->data;
2224	int mask = hdev->link_mode;
2225	struct inquiry_entry *ie;
2226	struct hci_conn *conn;
2227	__u8 flags = 0;
2228
2229	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2230	       ev->link_type);
2231
2232	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2233				      &flags);
2234
2235	if (!(mask & HCI_LM_ACCEPT)) {
2236		hci_reject_conn(hdev, &ev->bdaddr);
2237		return;
2238	}
2239
2240	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2241				   BDADDR_BREDR)) {
2242		hci_reject_conn(hdev, &ev->bdaddr);
2243		return;
2244	}
2245
2246	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2247	 * connection. These features are only touched through mgmt so
2248	 * only do the checks if HCI_MGMT is set.
2249	 */
2250	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2251	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2252	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2253				    BDADDR_BREDR)) {
2254		    hci_reject_conn(hdev, &ev->bdaddr);
2255		    return;
2256	}
2257
2258	/* Connection accepted */
2259
2260	hci_dev_lock(hdev);
2261
2262	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2263	if (ie)
2264		memcpy(ie->data.dev_class, ev->dev_class, 3);
2265
2266	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2267			&ev->bdaddr);
2268	if (!conn) {
2269		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2270				    HCI_ROLE_SLAVE);
2271		if (!conn) {
2272			BT_ERR("No memory for new connection");
2273			hci_dev_unlock(hdev);
2274			return;
2275		}
2276	}
2277
2278	memcpy(conn->dev_class, ev->dev_class, 3);
2279
2280	hci_dev_unlock(hdev);
2281
2282	if (ev->link_type == ACL_LINK ||
2283	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2284		struct hci_cp_accept_conn_req cp;
2285		conn->state = BT_CONNECT;
2286
2287		bacpy(&cp.bdaddr, &ev->bdaddr);
2288
2289		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2290			cp.role = 0x00; /* Become master */
2291		else
2292			cp.role = 0x01; /* Remain slave */
2293
2294		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2295	} else if (!(flags & HCI_PROTO_DEFER)) {
2296		struct hci_cp_accept_sync_conn_req cp;
2297		conn->state = BT_CONNECT;
2298
2299		bacpy(&cp.bdaddr, &ev->bdaddr);
2300		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2301
2302		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2303		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2304		cp.max_latency    = cpu_to_le16(0xffff);
2305		cp.content_format = cpu_to_le16(hdev->voice_setting);
2306		cp.retrans_effort = 0xff;
2307
2308		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2309			     &cp);
2310	} else {
2311		conn->state = BT_CONNECT2;
2312		hci_connect_cfm(conn, 0);
2313	}
2314}
2315
2316static u8 hci_to_mgmt_reason(u8 err)
2317{
2318	switch (err) {
2319	case HCI_ERROR_CONNECTION_TIMEOUT:
2320		return MGMT_DEV_DISCONN_TIMEOUT;
2321	case HCI_ERROR_REMOTE_USER_TERM:
2322	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2323	case HCI_ERROR_REMOTE_POWER_OFF:
2324		return MGMT_DEV_DISCONN_REMOTE;
2325	case HCI_ERROR_LOCAL_HOST_TERM:
2326		return MGMT_DEV_DISCONN_LOCAL_HOST;
2327	default:
2328		return MGMT_DEV_DISCONN_UNKNOWN;
2329	}
2330}
2331
2332static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2333{
2334	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2335	u8 reason = hci_to_mgmt_reason(ev->reason);
2336	struct hci_conn_params *params;
2337	struct hci_conn *conn;
2338	bool mgmt_connected;
2339	u8 type;
2340
2341	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2342
2343	hci_dev_lock(hdev);
2344
2345	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2346	if (!conn)
2347		goto unlock;
2348
2349	if (ev->status) {
2350		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2351				       conn->dst_type, ev->status);
2352		goto unlock;
2353	}
2354
2355	conn->state = BT_CLOSED;
2356
2357	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
 
 
 
 
 
 
2358	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2359				reason, mgmt_connected);
2360
2361	if (conn->type == ACL_LINK) {
2362		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2363			hci_remove_link_key(hdev, &conn->dst);
2364
2365		hci_req_update_scan(hdev);
2366	}
2367
2368	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2369	if (params) {
2370		switch (params->auto_connect) {
2371		case HCI_AUTO_CONN_LINK_LOSS:
2372			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2373				break;
2374			/* Fall through */
2375
2376		case HCI_AUTO_CONN_DIRECT:
2377		case HCI_AUTO_CONN_ALWAYS:
2378			list_del_init(&params->action);
2379			list_add(&params->action, &hdev->pend_le_conns);
2380			hci_update_background_scan(hdev);
2381			break;
2382
2383		default:
2384			break;
2385		}
2386	}
2387
2388	type = conn->type;
2389
2390	hci_disconn_cfm(conn, ev->reason);
2391	hci_conn_del(conn);
2392
2393	/* Re-enable advertising if necessary, since it might
2394	 * have been disabled by the connection. From the
2395	 * HCI_LE_Set_Advertise_Enable command description in
2396	 * the core specification (v4.0):
2397	 * "The Controller shall continue advertising until the Host
2398	 * issues an LE_Set_Advertise_Enable command with
2399	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2400	 * or until a connection is created or until the Advertising
2401	 * is timed out due to Directed Advertising."
2402	 */
2403	if (type == LE_LINK)
2404		hci_req_reenable_advertising(hdev);
2405
2406unlock:
2407	hci_dev_unlock(hdev);
2408}
2409
2410static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2411{
2412	struct hci_ev_auth_complete *ev = (void *) skb->data;
2413	struct hci_conn *conn;
2414
2415	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2416
2417	hci_dev_lock(hdev);
2418
2419	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2420	if (!conn)
2421		goto unlock;
2422
2423	if (!ev->status) {
 
 
2424		if (!hci_conn_ssp_enabled(conn) &&
2425		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2426			BT_INFO("re-auth of legacy device is not possible.");
2427		} else {
2428			set_bit(HCI_CONN_AUTH, &conn->flags);
2429			conn->sec_level = conn->pending_sec_level;
2430		}
2431	} else {
 
 
 
2432		mgmt_auth_failed(conn, ev->status);
2433	}
2434
2435	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2436	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2437
2438	if (conn->state == BT_CONFIG) {
2439		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2440			struct hci_cp_set_conn_encrypt cp;
2441			cp.handle  = ev->handle;
2442			cp.encrypt = 0x01;
2443			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2444				     &cp);
2445		} else {
2446			conn->state = BT_CONNECTED;
2447			hci_connect_cfm(conn, ev->status);
2448			hci_conn_drop(conn);
2449		}
2450	} else {
2451		hci_auth_cfm(conn, ev->status);
2452
2453		hci_conn_hold(conn);
2454		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2455		hci_conn_drop(conn);
2456	}
2457
2458	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2459		if (!ev->status) {
2460			struct hci_cp_set_conn_encrypt cp;
2461			cp.handle  = ev->handle;
2462			cp.encrypt = 0x01;
2463			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2464				     &cp);
2465		} else {
2466			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2467			hci_encrypt_cfm(conn, ev->status, 0x00);
2468		}
2469	}
2470
2471unlock:
2472	hci_dev_unlock(hdev);
2473}
2474
2475static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2476{
2477	struct hci_ev_remote_name *ev = (void *) skb->data;
2478	struct hci_conn *conn;
2479
2480	BT_DBG("%s", hdev->name);
2481
2482	hci_conn_check_pending(hdev);
2483
2484	hci_dev_lock(hdev);
2485
2486	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2487
2488	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2489		goto check_auth;
2490
2491	if (ev->status == 0)
2492		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2493				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2494	else
2495		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2496
2497check_auth:
2498	if (!conn)
2499		goto unlock;
2500
2501	if (!hci_outgoing_auth_needed(hdev, conn))
2502		goto unlock;
2503
2504	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2505		struct hci_cp_auth_requested cp;
2506
2507		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2508
2509		cp.handle = __cpu_to_le16(conn->handle);
2510		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2511	}
2512
2513unlock:
2514	hci_dev_unlock(hdev);
2515}
2516
2517static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2518				       u16 opcode, struct sk_buff *skb)
2519{
2520	const struct hci_rp_read_enc_key_size *rp;
2521	struct hci_conn *conn;
2522	u16 handle;
2523
2524	BT_DBG("%s status 0x%02x", hdev->name, status);
2525
2526	if (!skb || skb->len < sizeof(*rp)) {
2527		BT_ERR("%s invalid HCI Read Encryption Key Size response",
2528		       hdev->name);
2529		return;
2530	}
2531
2532	rp = (void *)skb->data;
2533	handle = le16_to_cpu(rp->handle);
2534
2535	hci_dev_lock(hdev);
2536
2537	conn = hci_conn_hash_lookup_handle(hdev, handle);
2538	if (!conn)
2539		goto unlock;
2540
2541	/* If we fail to read the encryption key size, assume maximum
2542	 * (which is the same we do also when this HCI command isn't
2543	 * supported.
2544	 */
2545	if (rp->status) {
2546		BT_ERR("%s failed to read key size for handle %u", hdev->name,
2547		       handle);
2548		conn->enc_key_size = HCI_LINK_KEY_SIZE;
2549	} else {
2550		conn->enc_key_size = rp->key_size;
2551	}
2552
2553	if (conn->state == BT_CONFIG) {
2554		conn->state = BT_CONNECTED;
2555		hci_connect_cfm(conn, 0);
2556		hci_conn_drop(conn);
2557	} else {
2558		u8 encrypt;
2559
2560		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2561			encrypt = 0x00;
2562		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2563			encrypt = 0x02;
2564		else
2565			encrypt = 0x01;
2566
2567		hci_encrypt_cfm(conn, 0, encrypt);
2568	}
2569
2570unlock:
2571	hci_dev_unlock(hdev);
2572}
2573
2574static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2575{
2576	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2577	struct hci_conn *conn;
2578
2579	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2580
2581	hci_dev_lock(hdev);
2582
2583	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2584	if (!conn)
2585		goto unlock;
2586
2587	if (!ev->status) {
2588		if (ev->encrypt) {
2589			/* Encryption implies authentication */
2590			set_bit(HCI_CONN_AUTH, &conn->flags);
2591			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2592			conn->sec_level = conn->pending_sec_level;
2593
2594			/* P-256 authentication key implies FIPS */
2595			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2596				set_bit(HCI_CONN_FIPS, &conn->flags);
2597
2598			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2599			    conn->type == LE_LINK)
2600				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2601		} else {
2602			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2603			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2604		}
2605	}
2606
2607	/* We should disregard the current RPA and generate a new one
2608	 * whenever the encryption procedure fails.
2609	 */
2610	if (ev->status && conn->type == LE_LINK)
2611		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2612
2613	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2614
2615	if (ev->status && conn->state == BT_CONNECTED) {
 
 
 
2616		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2617		hci_conn_drop(conn);
2618		goto unlock;
2619	}
2620
2621	/* In Secure Connections Only mode, do not allow any connections
2622	 * that are not encrypted with AES-CCM using a P-256 authenticated
2623	 * combination key.
2624	 */
2625	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2626	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2627	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2628		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2629		hci_conn_drop(conn);
2630		goto unlock;
2631	}
2632
2633	/* Try reading the encryption key size for encrypted ACL links */
2634	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2635		struct hci_cp_read_enc_key_size cp;
2636		struct hci_request req;
2637
2638		/* Only send HCI_Read_Encryption_Key_Size if the
2639		 * controller really supports it. If it doesn't, assume
2640		 * the default size (16).
2641		 */
2642		if (!(hdev->commands[20] & 0x10)) {
2643			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2644			goto notify;
2645		}
2646
2647		hci_req_init(&req, hdev);
2648
2649		cp.handle = cpu_to_le16(conn->handle);
2650		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2651
2652		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2653			BT_ERR("Sending HCI Read Encryption Key Size failed");
2654			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2655			goto notify;
2656		}
2657
2658		goto unlock;
2659	}
2660
2661notify:
2662	if (conn->state == BT_CONFIG) {
2663		if (!ev->status)
2664			conn->state = BT_CONNECTED;
2665
2666		hci_connect_cfm(conn, ev->status);
2667		hci_conn_drop(conn);
2668	} else
2669		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2670
2671unlock:
2672	hci_dev_unlock(hdev);
2673}
2674
2675static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2676					     struct sk_buff *skb)
2677{
2678	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2679	struct hci_conn *conn;
2680
2681	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2682
2683	hci_dev_lock(hdev);
2684
2685	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2686	if (conn) {
2687		if (!ev->status)
2688			set_bit(HCI_CONN_SECURE, &conn->flags);
2689
2690		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2691
2692		hci_key_change_cfm(conn, ev->status);
2693	}
2694
2695	hci_dev_unlock(hdev);
2696}
2697
2698static void hci_remote_features_evt(struct hci_dev *hdev,
2699				    struct sk_buff *skb)
2700{
2701	struct hci_ev_remote_features *ev = (void *) skb->data;
2702	struct hci_conn *conn;
2703
2704	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2705
2706	hci_dev_lock(hdev);
2707
2708	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2709	if (!conn)
2710		goto unlock;
2711
2712	if (!ev->status)
2713		memcpy(conn->features[0], ev->features, 8);
2714
2715	if (conn->state != BT_CONFIG)
2716		goto unlock;
2717
2718	if (!ev->status && lmp_ext_feat_capable(hdev) &&
2719	    lmp_ext_feat_capable(conn)) {
2720		struct hci_cp_read_remote_ext_features cp;
2721		cp.handle = ev->handle;
2722		cp.page = 0x01;
2723		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2724			     sizeof(cp), &cp);
2725		goto unlock;
2726	}
2727
2728	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2729		struct hci_cp_remote_name_req cp;
2730		memset(&cp, 0, sizeof(cp));
2731		bacpy(&cp.bdaddr, &conn->dst);
2732		cp.pscan_rep_mode = 0x02;
2733		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2734	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2735		mgmt_device_connected(hdev, conn, 0, NULL, 0);
2736
2737	if (!hci_outgoing_auth_needed(hdev, conn)) {
2738		conn->state = BT_CONNECTED;
2739		hci_connect_cfm(conn, ev->status);
2740		hci_conn_drop(conn);
2741	}
2742
2743unlock:
2744	hci_dev_unlock(hdev);
2745}
2746
2747static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2748				 u16 *opcode, u8 *status,
2749				 hci_req_complete_t *req_complete,
2750				 hci_req_complete_skb_t *req_complete_skb)
2751{
2752	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2753
2754	*opcode = __le16_to_cpu(ev->opcode);
2755	*status = skb->data[sizeof(*ev)];
2756
2757	skb_pull(skb, sizeof(*ev));
2758
2759	switch (*opcode) {
2760	case HCI_OP_INQUIRY_CANCEL:
2761		hci_cc_inquiry_cancel(hdev, skb);
2762		break;
2763
2764	case HCI_OP_PERIODIC_INQ:
2765		hci_cc_periodic_inq(hdev, skb);
2766		break;
2767
2768	case HCI_OP_EXIT_PERIODIC_INQ:
2769		hci_cc_exit_periodic_inq(hdev, skb);
2770		break;
2771
2772	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2773		hci_cc_remote_name_req_cancel(hdev, skb);
2774		break;
2775
2776	case HCI_OP_ROLE_DISCOVERY:
2777		hci_cc_role_discovery(hdev, skb);
2778		break;
2779
2780	case HCI_OP_READ_LINK_POLICY:
2781		hci_cc_read_link_policy(hdev, skb);
2782		break;
2783
2784	case HCI_OP_WRITE_LINK_POLICY:
2785		hci_cc_write_link_policy(hdev, skb);
2786		break;
2787
2788	case HCI_OP_READ_DEF_LINK_POLICY:
2789		hci_cc_read_def_link_policy(hdev, skb);
2790		break;
2791
2792	case HCI_OP_WRITE_DEF_LINK_POLICY:
2793		hci_cc_write_def_link_policy(hdev, skb);
2794		break;
2795
2796	case HCI_OP_RESET:
2797		hci_cc_reset(hdev, skb);
2798		break;
2799
2800	case HCI_OP_READ_STORED_LINK_KEY:
2801		hci_cc_read_stored_link_key(hdev, skb);
2802		break;
2803
2804	case HCI_OP_DELETE_STORED_LINK_KEY:
2805		hci_cc_delete_stored_link_key(hdev, skb);
2806		break;
2807
2808	case HCI_OP_WRITE_LOCAL_NAME:
2809		hci_cc_write_local_name(hdev, skb);
2810		break;
2811
2812	case HCI_OP_READ_LOCAL_NAME:
2813		hci_cc_read_local_name(hdev, skb);
2814		break;
2815
2816	case HCI_OP_WRITE_AUTH_ENABLE:
2817		hci_cc_write_auth_enable(hdev, skb);
2818		break;
2819
2820	case HCI_OP_WRITE_ENCRYPT_MODE:
2821		hci_cc_write_encrypt_mode(hdev, skb);
2822		break;
2823
2824	case HCI_OP_WRITE_SCAN_ENABLE:
2825		hci_cc_write_scan_enable(hdev, skb);
2826		break;
2827
2828	case HCI_OP_READ_CLASS_OF_DEV:
2829		hci_cc_read_class_of_dev(hdev, skb);
2830		break;
2831
2832	case HCI_OP_WRITE_CLASS_OF_DEV:
2833		hci_cc_write_class_of_dev(hdev, skb);
2834		break;
2835
2836	case HCI_OP_READ_VOICE_SETTING:
2837		hci_cc_read_voice_setting(hdev, skb);
2838		break;
2839
2840	case HCI_OP_WRITE_VOICE_SETTING:
2841		hci_cc_write_voice_setting(hdev, skb);
2842		break;
2843
2844	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2845		hci_cc_read_num_supported_iac(hdev, skb);
2846		break;
2847
2848	case HCI_OP_WRITE_SSP_MODE:
2849		hci_cc_write_ssp_mode(hdev, skb);
2850		break;
2851
2852	case HCI_OP_WRITE_SC_SUPPORT:
2853		hci_cc_write_sc_support(hdev, skb);
2854		break;
2855
2856	case HCI_OP_READ_LOCAL_VERSION:
2857		hci_cc_read_local_version(hdev, skb);
2858		break;
2859
2860	case HCI_OP_READ_LOCAL_COMMANDS:
2861		hci_cc_read_local_commands(hdev, skb);
2862		break;
2863
2864	case HCI_OP_READ_LOCAL_FEATURES:
2865		hci_cc_read_local_features(hdev, skb);
2866		break;
2867
2868	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2869		hci_cc_read_local_ext_features(hdev, skb);
2870		break;
2871
2872	case HCI_OP_READ_BUFFER_SIZE:
2873		hci_cc_read_buffer_size(hdev, skb);
2874		break;
2875
2876	case HCI_OP_READ_BD_ADDR:
2877		hci_cc_read_bd_addr(hdev, skb);
2878		break;
2879
2880	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2881		hci_cc_read_page_scan_activity(hdev, skb);
2882		break;
2883
2884	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2885		hci_cc_write_page_scan_activity(hdev, skb);
2886		break;
2887
2888	case HCI_OP_READ_PAGE_SCAN_TYPE:
2889		hci_cc_read_page_scan_type(hdev, skb);
2890		break;
2891
2892	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2893		hci_cc_write_page_scan_type(hdev, skb);
2894		break;
2895
2896	case HCI_OP_READ_DATA_BLOCK_SIZE:
2897		hci_cc_read_data_block_size(hdev, skb);
2898		break;
2899
2900	case HCI_OP_READ_FLOW_CONTROL_MODE:
2901		hci_cc_read_flow_control_mode(hdev, skb);
2902		break;
2903
2904	case HCI_OP_READ_LOCAL_AMP_INFO:
2905		hci_cc_read_local_amp_info(hdev, skb);
2906		break;
2907
2908	case HCI_OP_READ_CLOCK:
2909		hci_cc_read_clock(hdev, skb);
2910		break;
2911
2912	case HCI_OP_READ_INQ_RSP_TX_POWER:
2913		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2914		break;
2915
2916	case HCI_OP_PIN_CODE_REPLY:
2917		hci_cc_pin_code_reply(hdev, skb);
2918		break;
2919
2920	case HCI_OP_PIN_CODE_NEG_REPLY:
2921		hci_cc_pin_code_neg_reply(hdev, skb);
2922		break;
2923
2924	case HCI_OP_READ_LOCAL_OOB_DATA:
2925		hci_cc_read_local_oob_data(hdev, skb);
2926		break;
2927
2928	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2929		hci_cc_read_local_oob_ext_data(hdev, skb);
2930		break;
2931
2932	case HCI_OP_LE_READ_BUFFER_SIZE:
2933		hci_cc_le_read_buffer_size(hdev, skb);
2934		break;
2935
2936	case HCI_OP_LE_READ_LOCAL_FEATURES:
2937		hci_cc_le_read_local_features(hdev, skb);
2938		break;
2939
2940	case HCI_OP_LE_READ_ADV_TX_POWER:
2941		hci_cc_le_read_adv_tx_power(hdev, skb);
2942		break;
2943
2944	case HCI_OP_USER_CONFIRM_REPLY:
2945		hci_cc_user_confirm_reply(hdev, skb);
2946		break;
2947
2948	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2949		hci_cc_user_confirm_neg_reply(hdev, skb);
2950		break;
2951
2952	case HCI_OP_USER_PASSKEY_REPLY:
2953		hci_cc_user_passkey_reply(hdev, skb);
2954		break;
2955
2956	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2957		hci_cc_user_passkey_neg_reply(hdev, skb);
2958		break;
2959
2960	case HCI_OP_LE_SET_RANDOM_ADDR:
2961		hci_cc_le_set_random_addr(hdev, skb);
2962		break;
2963
2964	case HCI_OP_LE_SET_ADV_ENABLE:
2965		hci_cc_le_set_adv_enable(hdev, skb);
2966		break;
2967
2968	case HCI_OP_LE_SET_SCAN_PARAM:
2969		hci_cc_le_set_scan_param(hdev, skb);
2970		break;
2971
2972	case HCI_OP_LE_SET_SCAN_ENABLE:
2973		hci_cc_le_set_scan_enable(hdev, skb);
2974		break;
2975
2976	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2977		hci_cc_le_read_white_list_size(hdev, skb);
2978		break;
2979
2980	case HCI_OP_LE_CLEAR_WHITE_LIST:
2981		hci_cc_le_clear_white_list(hdev, skb);
2982		break;
2983
2984	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2985		hci_cc_le_add_to_white_list(hdev, skb);
2986		break;
2987
2988	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2989		hci_cc_le_del_from_white_list(hdev, skb);
2990		break;
2991
2992	case HCI_OP_LE_READ_SUPPORTED_STATES:
2993		hci_cc_le_read_supported_states(hdev, skb);
2994		break;
2995
2996	case HCI_OP_LE_READ_DEF_DATA_LEN:
2997		hci_cc_le_read_def_data_len(hdev, skb);
2998		break;
2999
3000	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3001		hci_cc_le_write_def_data_len(hdev, skb);
3002		break;
3003
3004	case HCI_OP_LE_READ_MAX_DATA_LEN:
3005		hci_cc_le_read_max_data_len(hdev, skb);
3006		break;
3007
3008	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3009		hci_cc_write_le_host_supported(hdev, skb);
3010		break;
3011
3012	case HCI_OP_LE_SET_ADV_PARAM:
3013		hci_cc_set_adv_param(hdev, skb);
3014		break;
3015
3016	case HCI_OP_READ_RSSI:
3017		hci_cc_read_rssi(hdev, skb);
3018		break;
3019
3020	case HCI_OP_READ_TX_POWER:
3021		hci_cc_read_tx_power(hdev, skb);
3022		break;
3023
3024	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3025		hci_cc_write_ssp_debug_mode(hdev, skb);
3026		break;
3027
3028	default:
3029		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3030		break;
3031	}
3032
3033	if (*opcode != HCI_OP_NOP)
3034		cancel_delayed_work(&hdev->cmd_timer);
3035
3036	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3037		atomic_set(&hdev->cmd_cnt, 1);
3038
3039	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3040			     req_complete_skb);
3041
3042	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3043		queue_work(hdev->workqueue, &hdev->cmd_work);
3044}
3045
3046static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3047			       u16 *opcode, u8 *status,
3048			       hci_req_complete_t *req_complete,
3049			       hci_req_complete_skb_t *req_complete_skb)
3050{
3051	struct hci_ev_cmd_status *ev = (void *) skb->data;
3052
3053	skb_pull(skb, sizeof(*ev));
3054
3055	*opcode = __le16_to_cpu(ev->opcode);
3056	*status = ev->status;
3057
3058	switch (*opcode) {
3059	case HCI_OP_INQUIRY:
3060		hci_cs_inquiry(hdev, ev->status);
3061		break;
3062
3063	case HCI_OP_CREATE_CONN:
3064		hci_cs_create_conn(hdev, ev->status);
3065		break;
3066
3067	case HCI_OP_DISCONNECT:
3068		hci_cs_disconnect(hdev, ev->status);
3069		break;
3070
3071	case HCI_OP_ADD_SCO:
3072		hci_cs_add_sco(hdev, ev->status);
3073		break;
3074
3075	case HCI_OP_AUTH_REQUESTED:
3076		hci_cs_auth_requested(hdev, ev->status);
3077		break;
3078
3079	case HCI_OP_SET_CONN_ENCRYPT:
3080		hci_cs_set_conn_encrypt(hdev, ev->status);
3081		break;
3082
3083	case HCI_OP_REMOTE_NAME_REQ:
3084		hci_cs_remote_name_req(hdev, ev->status);
3085		break;
3086
3087	case HCI_OP_READ_REMOTE_FEATURES:
3088		hci_cs_read_remote_features(hdev, ev->status);
3089		break;
3090
3091	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3092		hci_cs_read_remote_ext_features(hdev, ev->status);
3093		break;
3094
3095	case HCI_OP_SETUP_SYNC_CONN:
3096		hci_cs_setup_sync_conn(hdev, ev->status);
3097		break;
3098
3099	case HCI_OP_SNIFF_MODE:
3100		hci_cs_sniff_mode(hdev, ev->status);
3101		break;
3102
3103	case HCI_OP_EXIT_SNIFF_MODE:
3104		hci_cs_exit_sniff_mode(hdev, ev->status);
3105		break;
3106
3107	case HCI_OP_SWITCH_ROLE:
3108		hci_cs_switch_role(hdev, ev->status);
3109		break;
3110
3111	case HCI_OP_LE_CREATE_CONN:
3112		hci_cs_le_create_conn(hdev, ev->status);
3113		break;
3114
3115	case HCI_OP_LE_READ_REMOTE_FEATURES:
3116		hci_cs_le_read_remote_features(hdev, ev->status);
3117		break;
3118
3119	case HCI_OP_LE_START_ENC:
3120		hci_cs_le_start_enc(hdev, ev->status);
3121		break;
3122
3123	default:
3124		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3125		break;
3126	}
3127
3128	if (*opcode != HCI_OP_NOP)
3129		cancel_delayed_work(&hdev->cmd_timer);
3130
3131	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3132		atomic_set(&hdev->cmd_cnt, 1);
3133
3134	/* Indicate request completion if the command failed. Also, if
3135	 * we're not waiting for a special event and we get a success
3136	 * command status we should try to flag the request as completed
3137	 * (since for this kind of commands there will not be a command
3138	 * complete event).
3139	 */
3140	if (ev->status ||
3141	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3142		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3143				     req_complete_skb);
3144
3145	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3146		queue_work(hdev->workqueue, &hdev->cmd_work);
3147}
3148
3149static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3150{
3151	struct hci_ev_hardware_error *ev = (void *) skb->data;
3152
3153	hdev->hw_error_code = ev->code;
3154
3155	queue_work(hdev->req_workqueue, &hdev->error_reset);
3156}
3157
3158static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3159{
3160	struct hci_ev_role_change *ev = (void *) skb->data;
3161	struct hci_conn *conn;
3162
3163	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3164
3165	hci_dev_lock(hdev);
3166
3167	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3168	if (conn) {
3169		if (!ev->status)
3170			conn->role = ev->role;
3171
3172		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3173
3174		hci_role_switch_cfm(conn, ev->status, ev->role);
3175	}
3176
3177	hci_dev_unlock(hdev);
3178}
3179
3180static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3181{
3182	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3183	int i;
3184
3185	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3186		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3187		return;
3188	}
3189
3190	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3191	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3192		BT_DBG("%s bad parameters", hdev->name);
3193		return;
3194	}
3195
3196	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3197
3198	for (i = 0; i < ev->num_hndl; i++) {
3199		struct hci_comp_pkts_info *info = &ev->handles[i];
3200		struct hci_conn *conn;
3201		__u16  handle, count;
3202
3203		handle = __le16_to_cpu(info->handle);
3204		count  = __le16_to_cpu(info->count);
3205
3206		conn = hci_conn_hash_lookup_handle(hdev, handle);
3207		if (!conn)
3208			continue;
3209
3210		conn->sent -= count;
3211
3212		switch (conn->type) {
3213		case ACL_LINK:
3214			hdev->acl_cnt += count;
3215			if (hdev->acl_cnt > hdev->acl_pkts)
3216				hdev->acl_cnt = hdev->acl_pkts;
3217			break;
3218
3219		case LE_LINK:
3220			if (hdev->le_pkts) {
3221				hdev->le_cnt += count;
3222				if (hdev->le_cnt > hdev->le_pkts)
3223					hdev->le_cnt = hdev->le_pkts;
3224			} else {
3225				hdev->acl_cnt += count;
3226				if (hdev->acl_cnt > hdev->acl_pkts)
3227					hdev->acl_cnt = hdev->acl_pkts;
3228			}
3229			break;
3230
3231		case SCO_LINK:
3232			hdev->sco_cnt += count;
3233			if (hdev->sco_cnt > hdev->sco_pkts)
3234				hdev->sco_cnt = hdev->sco_pkts;
3235			break;
3236
3237		default:
3238			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3239			break;
3240		}
3241	}
3242
3243	queue_work(hdev->workqueue, &hdev->tx_work);
3244}
3245
3246static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3247						 __u16 handle)
3248{
3249	struct hci_chan *chan;
3250
3251	switch (hdev->dev_type) {
3252	case HCI_BREDR:
3253		return hci_conn_hash_lookup_handle(hdev, handle);
3254	case HCI_AMP:
3255		chan = hci_chan_lookup_handle(hdev, handle);
3256		if (chan)
3257			return chan->conn;
3258		break;
3259	default:
3260		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3261		break;
3262	}
3263
3264	return NULL;
3265}
3266
3267static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3268{
3269	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3270	int i;
3271
3272	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3273		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3274		return;
3275	}
3276
3277	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3278	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3279		BT_DBG("%s bad parameters", hdev->name);
3280		return;
3281	}
3282
3283	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3284	       ev->num_hndl);
3285
3286	for (i = 0; i < ev->num_hndl; i++) {
3287		struct hci_comp_blocks_info *info = &ev->handles[i];
3288		struct hci_conn *conn = NULL;
3289		__u16  handle, block_count;
3290
3291		handle = __le16_to_cpu(info->handle);
3292		block_count = __le16_to_cpu(info->blocks);
3293
3294		conn = __hci_conn_lookup_handle(hdev, handle);
3295		if (!conn)
3296			continue;
3297
3298		conn->sent -= block_count;
3299
3300		switch (conn->type) {
3301		case ACL_LINK:
3302		case AMP_LINK:
3303			hdev->block_cnt += block_count;
3304			if (hdev->block_cnt > hdev->num_blocks)
3305				hdev->block_cnt = hdev->num_blocks;
3306			break;
3307
3308		default:
3309			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3310			break;
3311		}
3312	}
3313
3314	queue_work(hdev->workqueue, &hdev->tx_work);
3315}
3316
3317static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3318{
3319	struct hci_ev_mode_change *ev = (void *) skb->data;
3320	struct hci_conn *conn;
3321
3322	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3323
3324	hci_dev_lock(hdev);
3325
3326	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3327	if (conn) {
3328		conn->mode = ev->mode;
3329
3330		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3331					&conn->flags)) {
3332			if (conn->mode == HCI_CM_ACTIVE)
3333				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3334			else
3335				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3336		}
3337
3338		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3339			hci_sco_setup(conn, ev->status);
3340	}
3341
3342	hci_dev_unlock(hdev);
3343}
3344
3345static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3346{
3347	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3348	struct hci_conn *conn;
3349
3350	BT_DBG("%s", hdev->name);
3351
3352	hci_dev_lock(hdev);
3353
3354	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3355	if (!conn)
3356		goto unlock;
3357
3358	if (conn->state == BT_CONNECTED) {
3359		hci_conn_hold(conn);
3360		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3361		hci_conn_drop(conn);
3362	}
3363
3364	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3365	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3366		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3367			     sizeof(ev->bdaddr), &ev->bdaddr);
3368	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3369		u8 secure;
3370
3371		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3372			secure = 1;
3373		else
3374			secure = 0;
3375
3376		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3377	}
3378
3379unlock:
3380	hci_dev_unlock(hdev);
3381}
3382
3383static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3384{
3385	if (key_type == HCI_LK_CHANGED_COMBINATION)
3386		return;
3387
3388	conn->pin_length = pin_len;
3389	conn->key_type = key_type;
3390
3391	switch (key_type) {
3392	case HCI_LK_LOCAL_UNIT:
3393	case HCI_LK_REMOTE_UNIT:
3394	case HCI_LK_DEBUG_COMBINATION:
3395		return;
3396	case HCI_LK_COMBINATION:
3397		if (pin_len == 16)
3398			conn->pending_sec_level = BT_SECURITY_HIGH;
3399		else
3400			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3401		break;
3402	case HCI_LK_UNAUTH_COMBINATION_P192:
3403	case HCI_LK_UNAUTH_COMBINATION_P256:
3404		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3405		break;
3406	case HCI_LK_AUTH_COMBINATION_P192:
3407		conn->pending_sec_level = BT_SECURITY_HIGH;
3408		break;
3409	case HCI_LK_AUTH_COMBINATION_P256:
3410		conn->pending_sec_level = BT_SECURITY_FIPS;
3411		break;
3412	}
3413}
3414
3415static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3416{
3417	struct hci_ev_link_key_req *ev = (void *) skb->data;
3418	struct hci_cp_link_key_reply cp;
3419	struct hci_conn *conn;
3420	struct link_key *key;
3421
3422	BT_DBG("%s", hdev->name);
3423
3424	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3425		return;
3426
3427	hci_dev_lock(hdev);
3428
3429	key = hci_find_link_key(hdev, &ev->bdaddr);
3430	if (!key) {
3431		BT_DBG("%s link key not found for %pMR", hdev->name,
3432		       &ev->bdaddr);
3433		goto not_found;
3434	}
3435
3436	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3437	       &ev->bdaddr);
3438
3439	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3440	if (conn) {
3441		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3442
3443		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3444		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3445		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3446			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3447			goto not_found;
3448		}
3449
3450		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3451		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3452		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3453			BT_DBG("%s ignoring key unauthenticated for high security",
3454			       hdev->name);
3455			goto not_found;
3456		}
3457
3458		conn_set_key(conn, key->type, key->pin_len);
3459	}
3460
3461	bacpy(&cp.bdaddr, &ev->bdaddr);
3462	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3463
3464	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3465
3466	hci_dev_unlock(hdev);
3467
3468	return;
3469
3470not_found:
3471	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3472	hci_dev_unlock(hdev);
3473}
3474
3475static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3476{
3477	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3478	struct hci_conn *conn;
3479	struct link_key *key;
3480	bool persistent;
3481	u8 pin_len = 0;
3482
3483	BT_DBG("%s", hdev->name);
3484
3485	hci_dev_lock(hdev);
3486
3487	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3488	if (!conn)
3489		goto unlock;
3490
3491	hci_conn_hold(conn);
3492	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3493	hci_conn_drop(conn);
3494
3495	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3496	conn_set_key(conn, ev->key_type, conn->pin_length);
3497
3498	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3499		goto unlock;
3500
3501	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3502			        ev->key_type, pin_len, &persistent);
3503	if (!key)
3504		goto unlock;
3505
3506	/* Update connection information since adding the key will have
3507	 * fixed up the type in the case of changed combination keys.
3508	 */
3509	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3510		conn_set_key(conn, key->type, key->pin_len);
3511
3512	mgmt_new_link_key(hdev, key, persistent);
3513
3514	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3515	 * is set. If it's not set simply remove the key from the kernel
3516	 * list (we've still notified user space about it but with
3517	 * store_hint being 0).
3518	 */
3519	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3520	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3521		list_del_rcu(&key->list);
3522		kfree_rcu(key, rcu);
3523		goto unlock;
3524	}
3525
3526	if (persistent)
3527		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3528	else
3529		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3530
3531unlock:
3532	hci_dev_unlock(hdev);
3533}
3534
3535static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3536{
3537	struct hci_ev_clock_offset *ev = (void *) skb->data;
3538	struct hci_conn *conn;
3539
3540	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3541
3542	hci_dev_lock(hdev);
3543
3544	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3545	if (conn && !ev->status) {
3546		struct inquiry_entry *ie;
3547
3548		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3549		if (ie) {
3550			ie->data.clock_offset = ev->clock_offset;
3551			ie->timestamp = jiffies;
3552		}
3553	}
3554
3555	hci_dev_unlock(hdev);
3556}
3557
3558static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3559{
3560	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3561	struct hci_conn *conn;
3562
3563	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3564
3565	hci_dev_lock(hdev);
3566
3567	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3568	if (conn && !ev->status)
3569		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3570
3571	hci_dev_unlock(hdev);
3572}
3573
3574static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3575{
3576	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3577	struct inquiry_entry *ie;
3578
3579	BT_DBG("%s", hdev->name);
3580
3581	hci_dev_lock(hdev);
3582
3583	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3584	if (ie) {
3585		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3586		ie->timestamp = jiffies;
3587	}
3588
3589	hci_dev_unlock(hdev);
3590}
3591
3592static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3593					     struct sk_buff *skb)
3594{
3595	struct inquiry_data data;
3596	int num_rsp = *((__u8 *) skb->data);
3597
3598	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3599
3600	if (!num_rsp)
3601		return;
3602
3603	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3604		return;
3605
3606	hci_dev_lock(hdev);
3607
3608	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3609		struct inquiry_info_with_rssi_and_pscan_mode *info;
3610		info = (void *) (skb->data + 1);
3611
3612		for (; num_rsp; num_rsp--, info++) {
3613			u32 flags;
3614
3615			bacpy(&data.bdaddr, &info->bdaddr);
3616			data.pscan_rep_mode	= info->pscan_rep_mode;
3617			data.pscan_period_mode	= info->pscan_period_mode;
3618			data.pscan_mode		= info->pscan_mode;
3619			memcpy(data.dev_class, info->dev_class, 3);
3620			data.clock_offset	= info->clock_offset;
3621			data.rssi		= info->rssi;
3622			data.ssp_mode		= 0x00;
3623
3624			flags = hci_inquiry_cache_update(hdev, &data, false);
3625
3626			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3627					  info->dev_class, info->rssi,
3628					  flags, NULL, 0, NULL, 0);
3629		}
3630	} else {
3631		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3632
3633		for (; num_rsp; num_rsp--, info++) {
3634			u32 flags;
3635
3636			bacpy(&data.bdaddr, &info->bdaddr);
3637			data.pscan_rep_mode	= info->pscan_rep_mode;
3638			data.pscan_period_mode	= info->pscan_period_mode;
3639			data.pscan_mode		= 0x00;
3640			memcpy(data.dev_class, info->dev_class, 3);
3641			data.clock_offset	= info->clock_offset;
3642			data.rssi		= info->rssi;
3643			data.ssp_mode		= 0x00;
3644
3645			flags = hci_inquiry_cache_update(hdev, &data, false);
3646
3647			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3648					  info->dev_class, info->rssi,
3649					  flags, NULL, 0, NULL, 0);
3650		}
3651	}
3652
3653	hci_dev_unlock(hdev);
3654}
3655
3656static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3657					struct sk_buff *skb)
3658{
3659	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3660	struct hci_conn *conn;
3661
3662	BT_DBG("%s", hdev->name);
3663
3664	hci_dev_lock(hdev);
3665
3666	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3667	if (!conn)
3668		goto unlock;
3669
3670	if (ev->page < HCI_MAX_PAGES)
3671		memcpy(conn->features[ev->page], ev->features, 8);
3672
3673	if (!ev->status && ev->page == 0x01) {
3674		struct inquiry_entry *ie;
3675
3676		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3677		if (ie)
3678			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3679
3680		if (ev->features[0] & LMP_HOST_SSP) {
3681			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3682		} else {
3683			/* It is mandatory by the Bluetooth specification that
3684			 * Extended Inquiry Results are only used when Secure
3685			 * Simple Pairing is enabled, but some devices violate
3686			 * this.
3687			 *
3688			 * To make these devices work, the internal SSP
3689			 * enabled flag needs to be cleared if the remote host
3690			 * features do not indicate SSP support */
3691			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3692		}
3693
3694		if (ev->features[0] & LMP_HOST_SC)
3695			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3696	}
3697
3698	if (conn->state != BT_CONFIG)
3699		goto unlock;
3700
3701	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3702		struct hci_cp_remote_name_req cp;
3703		memset(&cp, 0, sizeof(cp));
3704		bacpy(&cp.bdaddr, &conn->dst);
3705		cp.pscan_rep_mode = 0x02;
3706		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3707	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3708		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3709
3710	if (!hci_outgoing_auth_needed(hdev, conn)) {
3711		conn->state = BT_CONNECTED;
3712		hci_connect_cfm(conn, ev->status);
3713		hci_conn_drop(conn);
3714	}
3715
3716unlock:
3717	hci_dev_unlock(hdev);
3718}
3719
3720static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3721				       struct sk_buff *skb)
3722{
3723	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3724	struct hci_conn *conn;
3725
3726	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3727
3728	hci_dev_lock(hdev);
3729
3730	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3731	if (!conn) {
3732		if (ev->link_type == ESCO_LINK)
3733			goto unlock;
3734
3735		/* When the link type in the event indicates SCO connection
3736		 * and lookup of the connection object fails, then check
3737		 * if an eSCO connection object exists.
3738		 *
3739		 * The core limits the synchronous connections to either
3740		 * SCO or eSCO. The eSCO connection is preferred and tried
3741		 * to be setup first and until successfully established,
3742		 * the link type will be hinted as eSCO.
3743		 */
3744		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3745		if (!conn)
3746			goto unlock;
3747	}
3748
3749	switch (ev->status) {
3750	case 0x00:
3751		conn->handle = __le16_to_cpu(ev->handle);
3752		conn->state  = BT_CONNECTED;
3753		conn->type   = ev->link_type;
3754
3755		hci_debugfs_create_conn(conn);
3756		hci_conn_add_sysfs(conn);
3757		break;
3758
3759	case 0x10:	/* Connection Accept Timeout */
3760	case 0x0d:	/* Connection Rejected due to Limited Resources */
3761	case 0x11:	/* Unsupported Feature or Parameter Value */
3762	case 0x1c:	/* SCO interval rejected */
3763	case 0x1a:	/* Unsupported Remote Feature */
3764	case 0x1f:	/* Unspecified error */
3765	case 0x20:	/* Unsupported LMP Parameter value */
3766		if (conn->out) {
3767			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3768					(hdev->esco_type & EDR_ESCO_MASK);
3769			if (hci_setup_sync(conn, conn->link->handle))
3770				goto unlock;
3771		}
3772		/* fall through */
3773
3774	default:
3775		conn->state = BT_CLOSED;
3776		break;
3777	}
3778
3779	hci_connect_cfm(conn, ev->status);
3780	if (ev->status)
3781		hci_conn_del(conn);
3782
3783unlock:
3784	hci_dev_unlock(hdev);
3785}
3786
3787static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3788{
3789	size_t parsed = 0;
3790
3791	while (parsed < eir_len) {
3792		u8 field_len = eir[0];
3793
3794		if (field_len == 0)
3795			return parsed;
3796
3797		parsed += field_len + 1;
3798		eir += field_len + 1;
3799	}
3800
3801	return eir_len;
3802}
3803
3804static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3805					    struct sk_buff *skb)
3806{
3807	struct inquiry_data data;
3808	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3809	int num_rsp = *((__u8 *) skb->data);
3810	size_t eir_len;
3811
3812	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3813
3814	if (!num_rsp)
3815		return;
3816
3817	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3818		return;
3819
3820	hci_dev_lock(hdev);
3821
3822	for (; num_rsp; num_rsp--, info++) {
3823		u32 flags;
3824		bool name_known;
3825
3826		bacpy(&data.bdaddr, &info->bdaddr);
3827		data.pscan_rep_mode	= info->pscan_rep_mode;
3828		data.pscan_period_mode	= info->pscan_period_mode;
3829		data.pscan_mode		= 0x00;
3830		memcpy(data.dev_class, info->dev_class, 3);
3831		data.clock_offset	= info->clock_offset;
3832		data.rssi		= info->rssi;
3833		data.ssp_mode		= 0x01;
3834
3835		if (hci_dev_test_flag(hdev, HCI_MGMT))
3836			name_known = eir_get_data(info->data,
3837						  sizeof(info->data),
3838						  EIR_NAME_COMPLETE, NULL);
3839		else
3840			name_known = true;
3841
3842		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3843
3844		eir_len = eir_get_length(info->data, sizeof(info->data));
3845
3846		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3847				  info->dev_class, info->rssi,
3848				  flags, info->data, eir_len, NULL, 0);
3849	}
3850
3851	hci_dev_unlock(hdev);
3852}
3853
3854static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3855					 struct sk_buff *skb)
3856{
3857	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3858	struct hci_conn *conn;
3859
3860	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3861	       __le16_to_cpu(ev->handle));
3862
3863	hci_dev_lock(hdev);
3864
3865	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3866	if (!conn)
3867		goto unlock;
3868
3869	/* For BR/EDR the necessary steps are taken through the
3870	 * auth_complete event.
3871	 */
3872	if (conn->type != LE_LINK)
3873		goto unlock;
3874
3875	if (!ev->status)
3876		conn->sec_level = conn->pending_sec_level;
3877
3878	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3879
3880	if (ev->status && conn->state == BT_CONNECTED) {
3881		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3882		hci_conn_drop(conn);
3883		goto unlock;
3884	}
3885
3886	if (conn->state == BT_CONFIG) {
3887		if (!ev->status)
3888			conn->state = BT_CONNECTED;
3889
3890		hci_connect_cfm(conn, ev->status);
3891		hci_conn_drop(conn);
3892	} else {
3893		hci_auth_cfm(conn, ev->status);
3894
3895		hci_conn_hold(conn);
3896		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3897		hci_conn_drop(conn);
3898	}
3899
3900unlock:
3901	hci_dev_unlock(hdev);
3902}
3903
3904static u8 hci_get_auth_req(struct hci_conn *conn)
3905{
3906	/* If remote requests no-bonding follow that lead */
3907	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3908	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3909		return conn->remote_auth | (conn->auth_type & 0x01);
3910
3911	/* If both remote and local have enough IO capabilities, require
3912	 * MITM protection
3913	 */
3914	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3915	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3916		return conn->remote_auth | 0x01;
3917
3918	/* No MITM protection possible so ignore remote requirement */
3919	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3920}
3921
3922static u8 bredr_oob_data_present(struct hci_conn *conn)
3923{
3924	struct hci_dev *hdev = conn->hdev;
3925	struct oob_data *data;
3926
3927	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3928	if (!data)
3929		return 0x00;
3930
3931	if (bredr_sc_enabled(hdev)) {
3932		/* When Secure Connections is enabled, then just
3933		 * return the present value stored with the OOB
3934		 * data. The stored value contains the right present
3935		 * information. However it can only be trusted when
3936		 * not in Secure Connection Only mode.
3937		 */
3938		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3939			return data->present;
3940
3941		/* When Secure Connections Only mode is enabled, then
3942		 * the P-256 values are required. If they are not
3943		 * available, then do not declare that OOB data is
3944		 * present.
3945		 */
3946		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3947		    !memcmp(data->hash256, ZERO_KEY, 16))
3948			return 0x00;
3949
3950		return 0x02;
3951	}
3952
3953	/* When Secure Connections is not enabled or actually
3954	 * not supported by the hardware, then check that if
3955	 * P-192 data values are present.
3956	 */
3957	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3958	    !memcmp(data->hash192, ZERO_KEY, 16))
3959		return 0x00;
3960
3961	return 0x01;
3962}
3963
3964static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3965{
3966	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3967	struct hci_conn *conn;
3968
3969	BT_DBG("%s", hdev->name);
3970
3971	hci_dev_lock(hdev);
3972
3973	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3974	if (!conn)
3975		goto unlock;
3976
3977	hci_conn_hold(conn);
3978
3979	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3980		goto unlock;
3981
3982	/* Allow pairing if we're pairable, the initiators of the
3983	 * pairing or if the remote is not requesting bonding.
3984	 */
3985	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3986	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3987	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3988		struct hci_cp_io_capability_reply cp;
3989
3990		bacpy(&cp.bdaddr, &ev->bdaddr);
3991		/* Change the IO capability from KeyboardDisplay
3992		 * to DisplayYesNo as it is not supported by BT spec. */
3993		cp.capability = (conn->io_capability == 0x04) ?
3994				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3995
3996		/* If we are initiators, there is no remote information yet */
3997		if (conn->remote_auth == 0xff) {
3998			/* Request MITM protection if our IO caps allow it
3999			 * except for the no-bonding case.
4000			 */
4001			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4002			    conn->auth_type != HCI_AT_NO_BONDING)
4003				conn->auth_type |= 0x01;
4004		} else {
4005			conn->auth_type = hci_get_auth_req(conn);
4006		}
4007
4008		/* If we're not bondable, force one of the non-bondable
4009		 * authentication requirement values.
4010		 */
4011		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4012			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4013
4014		cp.authentication = conn->auth_type;
4015		cp.oob_data = bredr_oob_data_present(conn);
4016
4017		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4018			     sizeof(cp), &cp);
4019	} else {
4020		struct hci_cp_io_capability_neg_reply cp;
4021
4022		bacpy(&cp.bdaddr, &ev->bdaddr);
4023		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4024
4025		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4026			     sizeof(cp), &cp);
4027	}
4028
4029unlock:
4030	hci_dev_unlock(hdev);
4031}
4032
4033static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4034{
4035	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4036	struct hci_conn *conn;
4037
4038	BT_DBG("%s", hdev->name);
4039
4040	hci_dev_lock(hdev);
4041
4042	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4043	if (!conn)
4044		goto unlock;
4045
4046	conn->remote_cap = ev->capability;
4047	conn->remote_auth = ev->authentication;
4048
4049unlock:
4050	hci_dev_unlock(hdev);
4051}
4052
4053static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4054					 struct sk_buff *skb)
4055{
4056	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4057	int loc_mitm, rem_mitm, confirm_hint = 0;
4058	struct hci_conn *conn;
4059
4060	BT_DBG("%s", hdev->name);
4061
4062	hci_dev_lock(hdev);
4063
4064	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4065		goto unlock;
4066
4067	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4068	if (!conn)
4069		goto unlock;
4070
4071	loc_mitm = (conn->auth_type & 0x01);
4072	rem_mitm = (conn->remote_auth & 0x01);
4073
4074	/* If we require MITM but the remote device can't provide that
4075	 * (it has NoInputNoOutput) then reject the confirmation
4076	 * request. We check the security level here since it doesn't
4077	 * necessarily match conn->auth_type.
4078	 */
4079	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4080	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4081		BT_DBG("Rejecting request: remote device can't provide MITM");
4082		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4083			     sizeof(ev->bdaddr), &ev->bdaddr);
4084		goto unlock;
4085	}
4086
4087	/* If no side requires MITM protection; auto-accept */
4088	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4089	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4090
4091		/* If we're not the initiators request authorization to
4092		 * proceed from user space (mgmt_user_confirm with
4093		 * confirm_hint set to 1). The exception is if neither
4094		 * side had MITM or if the local IO capability is
4095		 * NoInputNoOutput, in which case we do auto-accept
4096		 */
4097		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4098		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4099		    (loc_mitm || rem_mitm)) {
4100			BT_DBG("Confirming auto-accept as acceptor");
4101			confirm_hint = 1;
4102			goto confirm;
4103		}
4104
4105		BT_DBG("Auto-accept of user confirmation with %ums delay",
4106		       hdev->auto_accept_delay);
4107
4108		if (hdev->auto_accept_delay > 0) {
4109			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4110			queue_delayed_work(conn->hdev->workqueue,
4111					   &conn->auto_accept_work, delay);
4112			goto unlock;
4113		}
4114
4115		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4116			     sizeof(ev->bdaddr), &ev->bdaddr);
4117		goto unlock;
4118	}
4119
4120confirm:
4121	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4122				  le32_to_cpu(ev->passkey), confirm_hint);
4123
4124unlock:
4125	hci_dev_unlock(hdev);
4126}
4127
4128static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4129					 struct sk_buff *skb)
4130{
4131	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4132
4133	BT_DBG("%s", hdev->name);
4134
4135	if (hci_dev_test_flag(hdev, HCI_MGMT))
4136		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4137}
4138
4139static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4140					struct sk_buff *skb)
4141{
4142	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4143	struct hci_conn *conn;
4144
4145	BT_DBG("%s", hdev->name);
4146
4147	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4148	if (!conn)
4149		return;
4150
4151	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4152	conn->passkey_entered = 0;
4153
4154	if (hci_dev_test_flag(hdev, HCI_MGMT))
4155		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4156					 conn->dst_type, conn->passkey_notify,
4157					 conn->passkey_entered);
4158}
4159
4160static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4161{
4162	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4163	struct hci_conn *conn;
4164
4165	BT_DBG("%s", hdev->name);
4166
4167	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4168	if (!conn)
4169		return;
4170
4171	switch (ev->type) {
4172	case HCI_KEYPRESS_STARTED:
4173		conn->passkey_entered = 0;
4174		return;
4175
4176	case HCI_KEYPRESS_ENTERED:
4177		conn->passkey_entered++;
4178		break;
4179
4180	case HCI_KEYPRESS_ERASED:
4181		conn->passkey_entered--;
4182		break;
4183
4184	case HCI_KEYPRESS_CLEARED:
4185		conn->passkey_entered = 0;
4186		break;
4187
4188	case HCI_KEYPRESS_COMPLETED:
4189		return;
4190	}
4191
4192	if (hci_dev_test_flag(hdev, HCI_MGMT))
4193		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4194					 conn->dst_type, conn->passkey_notify,
4195					 conn->passkey_entered);
4196}
4197
4198static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4199					 struct sk_buff *skb)
4200{
4201	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4202	struct hci_conn *conn;
4203
4204	BT_DBG("%s", hdev->name);
4205
4206	hci_dev_lock(hdev);
4207
4208	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4209	if (!conn)
4210		goto unlock;
4211
4212	/* Reset the authentication requirement to unknown */
4213	conn->remote_auth = 0xff;
4214
4215	/* To avoid duplicate auth_failed events to user space we check
4216	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4217	 * initiated the authentication. A traditional auth_complete
4218	 * event gets always produced as initiator and is also mapped to
4219	 * the mgmt_auth_failed event */
4220	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4221		mgmt_auth_failed(conn, ev->status);
4222
4223	hci_conn_drop(conn);
4224
4225unlock:
4226	hci_dev_unlock(hdev);
4227}
4228
4229static void hci_remote_host_features_evt(struct hci_dev *hdev,
4230					 struct sk_buff *skb)
4231{
4232	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4233	struct inquiry_entry *ie;
4234	struct hci_conn *conn;
4235
4236	BT_DBG("%s", hdev->name);
4237
4238	hci_dev_lock(hdev);
4239
4240	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4241	if (conn)
4242		memcpy(conn->features[1], ev->features, 8);
4243
4244	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4245	if (ie)
4246		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4247
4248	hci_dev_unlock(hdev);
4249}
4250
4251static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4252					    struct sk_buff *skb)
4253{
4254	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4255	struct oob_data *data;
4256
4257	BT_DBG("%s", hdev->name);
4258
4259	hci_dev_lock(hdev);
4260
4261	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4262		goto unlock;
4263
4264	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4265	if (!data) {
4266		struct hci_cp_remote_oob_data_neg_reply cp;
4267
4268		bacpy(&cp.bdaddr, &ev->bdaddr);
4269		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4270			     sizeof(cp), &cp);
4271		goto unlock;
4272	}
4273
4274	if (bredr_sc_enabled(hdev)) {
4275		struct hci_cp_remote_oob_ext_data_reply cp;
4276
4277		bacpy(&cp.bdaddr, &ev->bdaddr);
4278		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4279			memset(cp.hash192, 0, sizeof(cp.hash192));
4280			memset(cp.rand192, 0, sizeof(cp.rand192));
4281		} else {
4282			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4283			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4284		}
4285		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4286		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4287
4288		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4289			     sizeof(cp), &cp);
4290	} else {
4291		struct hci_cp_remote_oob_data_reply cp;
4292
4293		bacpy(&cp.bdaddr, &ev->bdaddr);
4294		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4295		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4296
4297		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4298			     sizeof(cp), &cp);
4299	}
4300
4301unlock:
4302	hci_dev_unlock(hdev);
4303}
4304
4305#if IS_ENABLED(CONFIG_BT_HS)
4306static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4307{
4308	struct hci_ev_channel_selected *ev = (void *)skb->data;
4309	struct hci_conn *hcon;
4310
4311	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4312
4313	skb_pull(skb, sizeof(*ev));
4314
4315	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4316	if (!hcon)
4317		return;
4318
4319	amp_read_loc_assoc_final_data(hdev, hcon);
4320}
4321
4322static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4323				      struct sk_buff *skb)
4324{
4325	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4326	struct hci_conn *hcon, *bredr_hcon;
4327
4328	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4329	       ev->status);
4330
4331	hci_dev_lock(hdev);
4332
4333	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4334	if (!hcon) {
4335		hci_dev_unlock(hdev);
4336		return;
4337	}
4338
4339	if (ev->status) {
4340		hci_conn_del(hcon);
4341		hci_dev_unlock(hdev);
4342		return;
4343	}
4344
4345	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4346
4347	hcon->state = BT_CONNECTED;
4348	bacpy(&hcon->dst, &bredr_hcon->dst);
4349
4350	hci_conn_hold(hcon);
4351	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4352	hci_conn_drop(hcon);
4353
4354	hci_debugfs_create_conn(hcon);
4355	hci_conn_add_sysfs(hcon);
4356
4357	amp_physical_cfm(bredr_hcon, hcon);
4358
4359	hci_dev_unlock(hdev);
4360}
4361
4362static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4363{
4364	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4365	struct hci_conn *hcon;
4366	struct hci_chan *hchan;
4367	struct amp_mgr *mgr;
4368
4369	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4370	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4371	       ev->status);
4372
4373	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4374	if (!hcon)
4375		return;
4376
4377	/* Create AMP hchan */
4378	hchan = hci_chan_create(hcon);
4379	if (!hchan)
4380		return;
4381
4382	hchan->handle = le16_to_cpu(ev->handle);
4383
4384	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4385
4386	mgr = hcon->amp_mgr;
4387	if (mgr && mgr->bredr_chan) {
4388		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4389
4390		l2cap_chan_lock(bredr_chan);
4391
4392		bredr_chan->conn->mtu = hdev->block_mtu;
4393		l2cap_logical_cfm(bredr_chan, hchan, 0);
4394		hci_conn_hold(hcon);
4395
4396		l2cap_chan_unlock(bredr_chan);
4397	}
4398}
4399
4400static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4401					     struct sk_buff *skb)
4402{
4403	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4404	struct hci_chan *hchan;
4405
4406	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4407	       le16_to_cpu(ev->handle), ev->status);
4408
4409	if (ev->status)
4410		return;
4411
4412	hci_dev_lock(hdev);
4413
4414	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4415	if (!hchan)
4416		goto unlock;
4417
4418	amp_destroy_logical_link(hchan, ev->reason);
4419
4420unlock:
4421	hci_dev_unlock(hdev);
4422}
4423
4424static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4425					     struct sk_buff *skb)
4426{
4427	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4428	struct hci_conn *hcon;
4429
4430	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4431
4432	if (ev->status)
4433		return;
4434
4435	hci_dev_lock(hdev);
4436
4437	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4438	if (hcon) {
4439		hcon->state = BT_CLOSED;
4440		hci_conn_del(hcon);
4441	}
4442
4443	hci_dev_unlock(hdev);
4444}
4445#endif
4446
4447static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4448{
4449	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4450	struct hci_conn_params *params;
4451	struct hci_conn *conn;
4452	struct smp_irk *irk;
4453	u8 addr_type;
4454
4455	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4456
4457	hci_dev_lock(hdev);
4458
4459	/* All controllers implicitly stop advertising in the event of a
4460	 * connection, so ensure that the state bit is cleared.
4461	 */
4462	hci_dev_clear_flag(hdev, HCI_LE_ADV);
4463
4464	conn = hci_lookup_le_connect(hdev);
4465	if (!conn) {
4466		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4467		if (!conn) {
4468			BT_ERR("No memory for new connection");
4469			goto unlock;
4470		}
4471
4472		conn->dst_type = ev->bdaddr_type;
4473
4474		/* If we didn't have a hci_conn object previously
4475		 * but we're in master role this must be something
4476		 * initiated using a white list. Since white list based
4477		 * connections are not "first class citizens" we don't
4478		 * have full tracking of them. Therefore, we go ahead
4479		 * with a "best effort" approach of determining the
4480		 * initiator address based on the HCI_PRIVACY flag.
4481		 */
4482		if (conn->out) {
4483			conn->resp_addr_type = ev->bdaddr_type;
4484			bacpy(&conn->resp_addr, &ev->bdaddr);
4485			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4486				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4487				bacpy(&conn->init_addr, &hdev->rpa);
4488			} else {
4489				hci_copy_identity_address(hdev,
4490							  &conn->init_addr,
4491							  &conn->init_addr_type);
4492			}
4493		}
4494	} else {
4495		cancel_delayed_work(&conn->le_conn_timeout);
4496	}
4497
4498	if (!conn->out) {
4499		/* Set the responder (our side) address type based on
4500		 * the advertising address type.
4501		 */
4502		conn->resp_addr_type = hdev->adv_addr_type;
4503		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4504			bacpy(&conn->resp_addr, &hdev->random_addr);
4505		else
4506			bacpy(&conn->resp_addr, &hdev->bdaddr);
4507
4508		conn->init_addr_type = ev->bdaddr_type;
4509		bacpy(&conn->init_addr, &ev->bdaddr);
4510
4511		/* For incoming connections, set the default minimum
4512		 * and maximum connection interval. They will be used
4513		 * to check if the parameters are in range and if not
4514		 * trigger the connection update procedure.
4515		 */
4516		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4517		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4518	}
4519
4520	/* Lookup the identity address from the stored connection
4521	 * address and address type.
4522	 *
4523	 * When establishing connections to an identity address, the
4524	 * connection procedure will store the resolvable random
4525	 * address first. Now if it can be converted back into the
4526	 * identity address, start using the identity address from
4527	 * now on.
4528	 */
4529	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4530	if (irk) {
4531		bacpy(&conn->dst, &irk->bdaddr);
4532		conn->dst_type = irk->addr_type;
4533	}
4534
4535	if (ev->status) {
4536		hci_le_conn_failed(conn, ev->status);
4537		goto unlock;
4538	}
4539
4540	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4541		addr_type = BDADDR_LE_PUBLIC;
4542	else
4543		addr_type = BDADDR_LE_RANDOM;
4544
4545	/* Drop the connection if the device is blocked */
4546	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4547		hci_conn_drop(conn);
4548		goto unlock;
4549	}
4550
4551	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4552		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4553
4554	conn->sec_level = BT_SECURITY_LOW;
4555	conn->handle = __le16_to_cpu(ev->handle);
4556	conn->state = BT_CONFIG;
4557
4558	conn->le_conn_interval = le16_to_cpu(ev->interval);
4559	conn->le_conn_latency = le16_to_cpu(ev->latency);
4560	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4561
4562	hci_debugfs_create_conn(conn);
4563	hci_conn_add_sysfs(conn);
4564
4565	if (!ev->status) {
4566		/* The remote features procedure is defined for master
4567		 * role only. So only in case of an initiated connection
4568		 * request the remote features.
4569		 *
4570		 * If the local controller supports slave-initiated features
4571		 * exchange, then requesting the remote features in slave
4572		 * role is possible. Otherwise just transition into the
4573		 * connected state without requesting the remote features.
4574		 */
4575		if (conn->out ||
4576		    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4577			struct hci_cp_le_read_remote_features cp;
4578
4579			cp.handle = __cpu_to_le16(conn->handle);
4580
4581			hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4582				     sizeof(cp), &cp);
4583
4584			hci_conn_hold(conn);
4585		} else {
4586			conn->state = BT_CONNECTED;
4587			hci_connect_cfm(conn, ev->status);
4588		}
4589	} else {
4590		hci_connect_cfm(conn, ev->status);
4591	}
4592
4593	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4594					   conn->dst_type);
4595	if (params) {
4596		list_del_init(&params->action);
4597		if (params->conn) {
4598			hci_conn_drop(params->conn);
4599			hci_conn_put(params->conn);
4600			params->conn = NULL;
4601		}
4602	}
4603
4604unlock:
4605	hci_update_background_scan(hdev);
4606	hci_dev_unlock(hdev);
4607}
4608
4609static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4610					    struct sk_buff *skb)
4611{
4612	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4613	struct hci_conn *conn;
4614
4615	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4616
4617	if (ev->status)
4618		return;
4619
4620	hci_dev_lock(hdev);
4621
4622	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4623	if (conn) {
4624		conn->le_conn_interval = le16_to_cpu(ev->interval);
4625		conn->le_conn_latency = le16_to_cpu(ev->latency);
4626		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4627	}
4628
4629	hci_dev_unlock(hdev);
4630}
4631
4632/* This function requires the caller holds hdev->lock */
4633static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4634					      bdaddr_t *addr,
4635					      u8 addr_type, u8 adv_type)
4636{
4637	struct hci_conn *conn;
4638	struct hci_conn_params *params;
4639
4640	/* If the event is not connectable don't proceed further */
4641	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4642		return NULL;
4643
4644	/* Ignore if the device is blocked */
4645	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4646		return NULL;
4647
4648	/* Most controller will fail if we try to create new connections
4649	 * while we have an existing one in slave role.
4650	 */
4651	if (hdev->conn_hash.le_num_slave > 0)
4652		return NULL;
4653
4654	/* If we're not connectable only connect devices that we have in
4655	 * our pend_le_conns list.
4656	 */
4657	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4658					   addr_type);
4659	if (!params)
4660		return NULL;
4661
4662	if (!params->explicit_connect) {
4663		switch (params->auto_connect) {
4664		case HCI_AUTO_CONN_DIRECT:
4665			/* Only devices advertising with ADV_DIRECT_IND are
4666			 * triggering a connection attempt. This is allowing
4667			 * incoming connections from slave devices.
4668			 */
4669			if (adv_type != LE_ADV_DIRECT_IND)
4670				return NULL;
4671			break;
4672		case HCI_AUTO_CONN_ALWAYS:
4673			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
4674			 * are triggering a connection attempt. This means
4675			 * that incoming connectioms from slave device are
4676			 * accepted and also outgoing connections to slave
4677			 * devices are established when found.
4678			 */
4679			break;
4680		default:
4681			return NULL;
4682		}
4683	}
4684
4685	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4686			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4687	if (!IS_ERR(conn)) {
4688		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4689		 * by higher layer that tried to connect, if no then
4690		 * store the pointer since we don't really have any
4691		 * other owner of the object besides the params that
4692		 * triggered it. This way we can abort the connection if
4693		 * the parameters get removed and keep the reference
4694		 * count consistent once the connection is established.
4695		 */
4696
4697		if (!params->explicit_connect)
4698			params->conn = hci_conn_get(conn);
4699
4700		return conn;
4701	}
4702
4703	switch (PTR_ERR(conn)) {
4704	case -EBUSY:
4705		/* If hci_connect() returns -EBUSY it means there is already
4706		 * an LE connection attempt going on. Since controllers don't
4707		 * support more than one connection attempt at the time, we
4708		 * don't consider this an error case.
4709		 */
4710		break;
4711	default:
4712		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4713		return NULL;
4714	}
4715
4716	return NULL;
4717}
4718
4719static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4720			       u8 bdaddr_type, bdaddr_t *direct_addr,
4721			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4722{
4723	struct discovery_state *d = &hdev->discovery;
4724	struct smp_irk *irk;
4725	struct hci_conn *conn;
4726	bool match;
4727	u32 flags;
4728	u8 *ptr, real_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
4729
4730	/* Find the end of the data in case the report contains padded zero
4731	 * bytes at the end causing an invalid length value.
4732	 *
4733	 * When data is NULL, len is 0 so there is no need for extra ptr
4734	 * check as 'ptr < data + 0' is already false in such case.
4735	 */
4736	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
4737		if (ptr + 1 + *ptr > data + len)
4738			break;
4739	}
4740
4741	real_len = ptr - data;
4742
4743	/* Adjust for actual length */
4744	if (len != real_len) {
4745		BT_ERR_RATELIMITED("%s advertising data length corrected",
4746				   hdev->name);
4747		len = real_len;
4748	}
4749
4750	/* If the direct address is present, then this report is from
4751	 * a LE Direct Advertising Report event. In that case it is
4752	 * important to see if the address is matching the local
4753	 * controller address.
4754	 */
4755	if (direct_addr) {
4756		/* Only resolvable random addresses are valid for these
4757		 * kind of reports and others can be ignored.
4758		 */
4759		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4760			return;
4761
4762		/* If the controller is not using resolvable random
4763		 * addresses, then this report can be ignored.
4764		 */
4765		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4766			return;
4767
4768		/* If the local IRK of the controller does not match
4769		 * with the resolvable random address provided, then
4770		 * this report can be ignored.
4771		 */
4772		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4773			return;
4774	}
4775
4776	/* Check if we need to convert to identity address */
4777	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4778	if (irk) {
4779		bdaddr = &irk->bdaddr;
4780		bdaddr_type = irk->addr_type;
4781	}
4782
4783	/* Check if we have been requested to connect to this device */
4784	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4785	if (conn && type == LE_ADV_IND) {
4786		/* Store report for later inclusion by
4787		 * mgmt_device_connected
4788		 */
4789		memcpy(conn->le_adv_data, data, len);
4790		conn->le_adv_data_len = len;
4791	}
4792
4793	/* Passive scanning shouldn't trigger any device found events,
4794	 * except for devices marked as CONN_REPORT for which we do send
4795	 * device found events.
4796	 */
4797	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4798		if (type == LE_ADV_DIRECT_IND)
4799			return;
4800
4801		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4802					       bdaddr, bdaddr_type))
4803			return;
4804
4805		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4806			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4807		else
4808			flags = 0;
4809		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4810				  rssi, flags, data, len, NULL, 0);
4811		return;
4812	}
4813
4814	/* When receiving non-connectable or scannable undirected
4815	 * advertising reports, this means that the remote device is
4816	 * not connectable and then clearly indicate this in the
4817	 * device found event.
4818	 *
4819	 * When receiving a scan response, then there is no way to
4820	 * know if the remote device is connectable or not. However
4821	 * since scan responses are merged with a previously seen
4822	 * advertising report, the flags field from that report
4823	 * will be used.
4824	 *
4825	 * In the really unlikely case that a controller get confused
4826	 * and just sends a scan response event, then it is marked as
4827	 * not connectable as well.
4828	 */
4829	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4830	    type == LE_ADV_SCAN_RSP)
4831		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4832	else
4833		flags = 0;
4834
4835	/* If there's nothing pending either store the data from this
4836	 * event or send an immediate device found event if the data
4837	 * should not be stored for later.
4838	 */
4839	if (!has_pending_adv_report(hdev)) {
4840		/* If the report will trigger a SCAN_REQ store it for
4841		 * later merging.
4842		 */
4843		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4844			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4845						 rssi, flags, data, len);
4846			return;
4847		}
4848
4849		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4850				  rssi, flags, data, len, NULL, 0);
4851		return;
4852	}
4853
4854	/* Check if the pending report is for the same device as the new one */
4855	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4856		 bdaddr_type == d->last_adv_addr_type);
4857
4858	/* If the pending data doesn't match this report or this isn't a
4859	 * scan response (e.g. we got a duplicate ADV_IND) then force
4860	 * sending of the pending data.
4861	 */
4862	if (type != LE_ADV_SCAN_RSP || !match) {
4863		/* Send out whatever is in the cache, but skip duplicates */
4864		if (!match)
4865			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4866					  d->last_adv_addr_type, NULL,
4867					  d->last_adv_rssi, d->last_adv_flags,
4868					  d->last_adv_data,
4869					  d->last_adv_data_len, NULL, 0);
4870
4871		/* If the new report will trigger a SCAN_REQ store it for
4872		 * later merging.
4873		 */
4874		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4875			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4876						 rssi, flags, data, len);
4877			return;
4878		}
4879
4880		/* The advertising reports cannot be merged, so clear
4881		 * the pending report and send out a device found event.
4882		 */
4883		clear_pending_adv_report(hdev);
4884		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4885				  rssi, flags, data, len, NULL, 0);
4886		return;
4887	}
4888
4889	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4890	 * the new event is a SCAN_RSP. We can therefore proceed with
4891	 * sending a merged device found event.
4892	 */
4893	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4894			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4895			  d->last_adv_data, d->last_adv_data_len, data, len);
4896	clear_pending_adv_report(hdev);
4897}
4898
4899static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4900{
4901	u8 num_reports = skb->data[0];
4902	void *ptr = &skb->data[1];
4903
4904	hci_dev_lock(hdev);
4905
4906	while (num_reports--) {
4907		struct hci_ev_le_advertising_info *ev = ptr;
4908		s8 rssi;
4909
4910		rssi = ev->data[ev->length];
4911		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4912				   ev->bdaddr_type, NULL, 0, rssi,
4913				   ev->data, ev->length);
4914
4915		ptr += sizeof(*ev) + ev->length + 1;
4916	}
4917
4918	hci_dev_unlock(hdev);
4919}
4920
4921static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4922					    struct sk_buff *skb)
4923{
4924	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4925	struct hci_conn *conn;
4926
4927	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4928
4929	hci_dev_lock(hdev);
4930
4931	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4932	if (conn) {
4933		if (!ev->status)
4934			memcpy(conn->features[0], ev->features, 8);
4935
4936		if (conn->state == BT_CONFIG) {
4937			__u8 status;
4938
4939			/* If the local controller supports slave-initiated
4940			 * features exchange, but the remote controller does
4941			 * not, then it is possible that the error code 0x1a
4942			 * for unsupported remote feature gets returned.
4943			 *
4944			 * In this specific case, allow the connection to
4945			 * transition into connected state and mark it as
4946			 * successful.
4947			 */
4948			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4949			    !conn->out && ev->status == 0x1a)
4950				status = 0x00;
4951			else
4952				status = ev->status;
4953
4954			conn->state = BT_CONNECTED;
4955			hci_connect_cfm(conn, status);
4956			hci_conn_drop(conn);
4957		}
4958	}
4959
4960	hci_dev_unlock(hdev);
4961}
4962
4963static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4964{
4965	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4966	struct hci_cp_le_ltk_reply cp;
4967	struct hci_cp_le_ltk_neg_reply neg;
4968	struct hci_conn *conn;
4969	struct smp_ltk *ltk;
4970
4971	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4972
4973	hci_dev_lock(hdev);
4974
4975	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4976	if (conn == NULL)
4977		goto not_found;
4978
4979	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4980	if (!ltk)
4981		goto not_found;
4982
4983	if (smp_ltk_is_sc(ltk)) {
4984		/* With SC both EDiv and Rand are set to zero */
4985		if (ev->ediv || ev->rand)
4986			goto not_found;
4987	} else {
4988		/* For non-SC keys check that EDiv and Rand match */
4989		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4990			goto not_found;
4991	}
4992
4993	memcpy(cp.ltk, ltk->val, ltk->enc_size);
4994	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
4995	cp.handle = cpu_to_le16(conn->handle);
4996
4997	conn->pending_sec_level = smp_ltk_sec_level(ltk);
4998
4999	conn->enc_key_size = ltk->enc_size;
5000
5001	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5002
5003	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5004	 * temporary key used to encrypt a connection following
5005	 * pairing. It is used during the Encrypted Session Setup to
5006	 * distribute the keys. Later, security can be re-established
5007	 * using a distributed LTK.
5008	 */
5009	if (ltk->type == SMP_STK) {
5010		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5011		list_del_rcu(&ltk->list);
5012		kfree_rcu(ltk, rcu);
5013	} else {
5014		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5015	}
5016
5017	hci_dev_unlock(hdev);
5018
5019	return;
5020
5021not_found:
5022	neg.handle = ev->handle;
5023	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5024	hci_dev_unlock(hdev);
5025}
5026
5027static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5028				      u8 reason)
5029{
5030	struct hci_cp_le_conn_param_req_neg_reply cp;
5031
5032	cp.handle = cpu_to_le16(handle);
5033	cp.reason = reason;
5034
5035	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5036		     &cp);
5037}
5038
5039static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5040					     struct sk_buff *skb)
5041{
5042	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5043	struct hci_cp_le_conn_param_req_reply cp;
5044	struct hci_conn *hcon;
5045	u16 handle, min, max, latency, timeout;
5046
5047	handle = le16_to_cpu(ev->handle);
5048	min = le16_to_cpu(ev->interval_min);
5049	max = le16_to_cpu(ev->interval_max);
5050	latency = le16_to_cpu(ev->latency);
5051	timeout = le16_to_cpu(ev->timeout);
5052
5053	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5054	if (!hcon || hcon->state != BT_CONNECTED)
5055		return send_conn_param_neg_reply(hdev, handle,
5056						 HCI_ERROR_UNKNOWN_CONN_ID);
5057
5058	if (hci_check_conn_params(min, max, latency, timeout))
5059		return send_conn_param_neg_reply(hdev, handle,
5060						 HCI_ERROR_INVALID_LL_PARAMS);
5061
5062	if (hcon->role == HCI_ROLE_MASTER) {
5063		struct hci_conn_params *params;
5064		u8 store_hint;
5065
5066		hci_dev_lock(hdev);
5067
5068		params = hci_conn_params_lookup(hdev, &hcon->dst,
5069						hcon->dst_type);
5070		if (params) {
5071			params->conn_min_interval = min;
5072			params->conn_max_interval = max;
5073			params->conn_latency = latency;
5074			params->supervision_timeout = timeout;
5075			store_hint = 0x01;
5076		} else{
5077			store_hint = 0x00;
5078		}
5079
5080		hci_dev_unlock(hdev);
5081
5082		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5083				    store_hint, min, max, latency, timeout);
5084	}
5085
5086	cp.handle = ev->handle;
5087	cp.interval_min = ev->interval_min;
5088	cp.interval_max = ev->interval_max;
5089	cp.latency = ev->latency;
5090	cp.timeout = ev->timeout;
5091	cp.min_ce_len = 0;
5092	cp.max_ce_len = 0;
5093
5094	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5095}
5096
5097static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5098					 struct sk_buff *skb)
5099{
5100	u8 num_reports = skb->data[0];
5101	void *ptr = &skb->data[1];
5102
5103	hci_dev_lock(hdev);
5104
5105	while (num_reports--) {
5106		struct hci_ev_le_direct_adv_info *ev = ptr;
5107
5108		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5109				   ev->bdaddr_type, &ev->direct_addr,
5110				   ev->direct_addr_type, ev->rssi, NULL, 0);
5111
5112		ptr += sizeof(*ev);
5113	}
5114
5115	hci_dev_unlock(hdev);
5116}
5117
5118static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5119{
5120	struct hci_ev_le_meta *le_ev = (void *) skb->data;
5121
5122	skb_pull(skb, sizeof(*le_ev));
5123
5124	switch (le_ev->subevent) {
5125	case HCI_EV_LE_CONN_COMPLETE:
5126		hci_le_conn_complete_evt(hdev, skb);
5127		break;
5128
5129	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5130		hci_le_conn_update_complete_evt(hdev, skb);
5131		break;
5132
5133	case HCI_EV_LE_ADVERTISING_REPORT:
5134		hci_le_adv_report_evt(hdev, skb);
5135		break;
5136
5137	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5138		hci_le_remote_feat_complete_evt(hdev, skb);
5139		break;
5140
5141	case HCI_EV_LE_LTK_REQ:
5142		hci_le_ltk_request_evt(hdev, skb);
5143		break;
5144
5145	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5146		hci_le_remote_conn_param_req_evt(hdev, skb);
5147		break;
5148
5149	case HCI_EV_LE_DIRECT_ADV_REPORT:
5150		hci_le_direct_adv_report_evt(hdev, skb);
5151		break;
5152
5153	default:
5154		break;
5155	}
5156}
5157
5158static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5159				 u8 event, struct sk_buff *skb)
5160{
5161	struct hci_ev_cmd_complete *ev;
5162	struct hci_event_hdr *hdr;
5163
5164	if (!skb)
5165		return false;
5166
5167	if (skb->len < sizeof(*hdr)) {
5168		BT_ERR("Too short HCI event");
5169		return false;
5170	}
5171
5172	hdr = (void *) skb->data;
5173	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5174
5175	if (event) {
5176		if (hdr->evt != event)
5177			return false;
5178		return true;
5179	}
5180
5181	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5182		BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5183		return false;
5184	}
5185
5186	if (skb->len < sizeof(*ev)) {
5187		BT_ERR("Too short cmd_complete event");
5188		return false;
5189	}
5190
5191	ev = (void *) skb->data;
5192	skb_pull(skb, sizeof(*ev));
5193
5194	if (opcode != __le16_to_cpu(ev->opcode)) {
5195		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5196		       __le16_to_cpu(ev->opcode));
5197		return false;
5198	}
5199
5200	return true;
5201}
5202
5203void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5204{
5205	struct hci_event_hdr *hdr = (void *) skb->data;
5206	hci_req_complete_t req_complete = NULL;
5207	hci_req_complete_skb_t req_complete_skb = NULL;
5208	struct sk_buff *orig_skb = NULL;
5209	u8 status = 0, event = hdr->evt, req_evt = 0;
5210	u16 opcode = HCI_OP_NOP;
5211
5212	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5213		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5214		opcode = __le16_to_cpu(cmd_hdr->opcode);
5215		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5216				     &req_complete_skb);
5217		req_evt = event;
5218	}
5219
5220	/* If it looks like we might end up having to call
5221	 * req_complete_skb, store a pristine copy of the skb since the
5222	 * various handlers may modify the original one through
5223	 * skb_pull() calls, etc.
5224	 */
5225	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5226	    event == HCI_EV_CMD_COMPLETE)
5227		orig_skb = skb_clone(skb, GFP_KERNEL);
5228
5229	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5230
5231	switch (event) {
5232	case HCI_EV_INQUIRY_COMPLETE:
5233		hci_inquiry_complete_evt(hdev, skb);
5234		break;
5235
5236	case HCI_EV_INQUIRY_RESULT:
5237		hci_inquiry_result_evt(hdev, skb);
5238		break;
5239
5240	case HCI_EV_CONN_COMPLETE:
5241		hci_conn_complete_evt(hdev, skb);
5242		break;
5243
5244	case HCI_EV_CONN_REQUEST:
5245		hci_conn_request_evt(hdev, skb);
5246		break;
5247
5248	case HCI_EV_DISCONN_COMPLETE:
5249		hci_disconn_complete_evt(hdev, skb);
5250		break;
5251
5252	case HCI_EV_AUTH_COMPLETE:
5253		hci_auth_complete_evt(hdev, skb);
5254		break;
5255
5256	case HCI_EV_REMOTE_NAME:
5257		hci_remote_name_evt(hdev, skb);
5258		break;
5259
5260	case HCI_EV_ENCRYPT_CHANGE:
5261		hci_encrypt_change_evt(hdev, skb);
5262		break;
5263
5264	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5265		hci_change_link_key_complete_evt(hdev, skb);
5266		break;
5267
5268	case HCI_EV_REMOTE_FEATURES:
5269		hci_remote_features_evt(hdev, skb);
5270		break;
5271
5272	case HCI_EV_CMD_COMPLETE:
5273		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5274				     &req_complete, &req_complete_skb);
5275		break;
5276
5277	case HCI_EV_CMD_STATUS:
5278		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5279				   &req_complete_skb);
5280		break;
5281
5282	case HCI_EV_HARDWARE_ERROR:
5283		hci_hardware_error_evt(hdev, skb);
5284		break;
5285
5286	case HCI_EV_ROLE_CHANGE:
5287		hci_role_change_evt(hdev, skb);
5288		break;
5289
5290	case HCI_EV_NUM_COMP_PKTS:
5291		hci_num_comp_pkts_evt(hdev, skb);
5292		break;
5293
5294	case HCI_EV_MODE_CHANGE:
5295		hci_mode_change_evt(hdev, skb);
5296		break;
5297
5298	case HCI_EV_PIN_CODE_REQ:
5299		hci_pin_code_request_evt(hdev, skb);
5300		break;
5301
5302	case HCI_EV_LINK_KEY_REQ:
5303		hci_link_key_request_evt(hdev, skb);
5304		break;
5305
5306	case HCI_EV_LINK_KEY_NOTIFY:
5307		hci_link_key_notify_evt(hdev, skb);
5308		break;
5309
5310	case HCI_EV_CLOCK_OFFSET:
5311		hci_clock_offset_evt(hdev, skb);
5312		break;
5313
5314	case HCI_EV_PKT_TYPE_CHANGE:
5315		hci_pkt_type_change_evt(hdev, skb);
5316		break;
5317
5318	case HCI_EV_PSCAN_REP_MODE:
5319		hci_pscan_rep_mode_evt(hdev, skb);
5320		break;
5321
5322	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5323		hci_inquiry_result_with_rssi_evt(hdev, skb);
5324		break;
5325
5326	case HCI_EV_REMOTE_EXT_FEATURES:
5327		hci_remote_ext_features_evt(hdev, skb);
5328		break;
5329
5330	case HCI_EV_SYNC_CONN_COMPLETE:
5331		hci_sync_conn_complete_evt(hdev, skb);
5332		break;
5333
5334	case HCI_EV_EXTENDED_INQUIRY_RESULT:
5335		hci_extended_inquiry_result_evt(hdev, skb);
5336		break;
5337
5338	case HCI_EV_KEY_REFRESH_COMPLETE:
5339		hci_key_refresh_complete_evt(hdev, skb);
5340		break;
5341
5342	case HCI_EV_IO_CAPA_REQUEST:
5343		hci_io_capa_request_evt(hdev, skb);
5344		break;
5345
5346	case HCI_EV_IO_CAPA_REPLY:
5347		hci_io_capa_reply_evt(hdev, skb);
5348		break;
5349
5350	case HCI_EV_USER_CONFIRM_REQUEST:
5351		hci_user_confirm_request_evt(hdev, skb);
5352		break;
5353
5354	case HCI_EV_USER_PASSKEY_REQUEST:
5355		hci_user_passkey_request_evt(hdev, skb);
5356		break;
5357
5358	case HCI_EV_USER_PASSKEY_NOTIFY:
5359		hci_user_passkey_notify_evt(hdev, skb);
5360		break;
5361
5362	case HCI_EV_KEYPRESS_NOTIFY:
5363		hci_keypress_notify_evt(hdev, skb);
5364		break;
5365
5366	case HCI_EV_SIMPLE_PAIR_COMPLETE:
5367		hci_simple_pair_complete_evt(hdev, skb);
5368		break;
5369
5370	case HCI_EV_REMOTE_HOST_FEATURES:
5371		hci_remote_host_features_evt(hdev, skb);
5372		break;
5373
5374	case HCI_EV_LE_META:
5375		hci_le_meta_evt(hdev, skb);
5376		break;
5377
5378	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5379		hci_remote_oob_data_request_evt(hdev, skb);
5380		break;
5381
5382#if IS_ENABLED(CONFIG_BT_HS)
5383	case HCI_EV_CHANNEL_SELECTED:
5384		hci_chan_selected_evt(hdev, skb);
5385		break;
5386
5387	case HCI_EV_PHY_LINK_COMPLETE:
5388		hci_phy_link_complete_evt(hdev, skb);
5389		break;
5390
5391	case HCI_EV_LOGICAL_LINK_COMPLETE:
5392		hci_loglink_complete_evt(hdev, skb);
5393		break;
5394
5395	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5396		hci_disconn_loglink_complete_evt(hdev, skb);
5397		break;
5398
5399	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5400		hci_disconn_phylink_complete_evt(hdev, skb);
5401		break;
5402#endif
5403
5404	case HCI_EV_NUM_COMP_BLOCKS:
5405		hci_num_comp_blocks_evt(hdev, skb);
5406		break;
5407
5408	default:
5409		BT_DBG("%s event 0x%2.2x", hdev->name, event);
5410		break;
5411	}
5412
5413	if (req_complete) {
5414		req_complete(hdev, status, opcode);
5415	} else if (req_complete_skb) {
5416		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5417			kfree_skb(orig_skb);
5418			orig_skb = NULL;
5419		}
5420		req_complete_skb(hdev, status, opcode, orig_skb);
5421	}
5422
5423	kfree_skb(orig_skb);
5424	kfree_skb(skb);
5425	hdev->stat.evt_rx++;
5426}
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI event handling. */
  26
  27#include <asm/unaligned.h>
  28
  29#include <net/bluetooth/bluetooth.h>
  30#include <net/bluetooth/hci_core.h>
  31#include <net/bluetooth/mgmt.h>
  32
  33#include "hci_request.h"
  34#include "hci_debugfs.h"
  35#include "a2mp.h"
  36#include "amp.h"
  37#include "smp.h"
  38
  39#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
  40		 "\x00\x00\x00\x00\x00\x00\x00\x00"
  41
  42/* Handle HCI Event packets */
  43
  44static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
  45{
  46	__u8 status = *((__u8 *) skb->data);
  47
  48	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  49
  50	if (status)
  51		return;
  52
  53	clear_bit(HCI_INQUIRY, &hdev->flags);
  54	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
  55	wake_up_bit(&hdev->flags, HCI_INQUIRY);
  56
  57	hci_dev_lock(hdev);
  58	/* Set discovery state to stopped if we're not doing LE active
  59	 * scanning.
  60	 */
  61	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
  62	    hdev->le_scan_type != LE_SCAN_ACTIVE)
  63		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
  64	hci_dev_unlock(hdev);
  65
  66	hci_conn_check_pending(hdev);
  67}
  68
  69static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
  70{
  71	__u8 status = *((__u8 *) skb->data);
  72
  73	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  74
  75	if (status)
  76		return;
  77
  78	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
  79}
  80
  81static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
  82{
  83	__u8 status = *((__u8 *) skb->data);
  84
  85	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  86
  87	if (status)
  88		return;
  89
  90	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
  91
  92	hci_conn_check_pending(hdev);
  93}
  94
  95static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
  96					  struct sk_buff *skb)
  97{
  98	BT_DBG("%s", hdev->name);
  99}
 100
 101static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
 102{
 103	struct hci_rp_role_discovery *rp = (void *) skb->data;
 104	struct hci_conn *conn;
 105
 106	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 107
 108	if (rp->status)
 109		return;
 110
 111	hci_dev_lock(hdev);
 112
 113	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 114	if (conn)
 115		conn->role = rp->role;
 116
 117	hci_dev_unlock(hdev);
 118}
 119
 120static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
 121{
 122	struct hci_rp_read_link_policy *rp = (void *) skb->data;
 123	struct hci_conn *conn;
 124
 125	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 126
 127	if (rp->status)
 128		return;
 129
 130	hci_dev_lock(hdev);
 131
 132	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 133	if (conn)
 134		conn->link_policy = __le16_to_cpu(rp->policy);
 135
 136	hci_dev_unlock(hdev);
 137}
 138
 139static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
 140{
 141	struct hci_rp_write_link_policy *rp = (void *) skb->data;
 142	struct hci_conn *conn;
 143	void *sent;
 144
 145	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 146
 147	if (rp->status)
 148		return;
 149
 150	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
 151	if (!sent)
 152		return;
 153
 154	hci_dev_lock(hdev);
 155
 156	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 157	if (conn)
 158		conn->link_policy = get_unaligned_le16(sent + 2);
 159
 160	hci_dev_unlock(hdev);
 161}
 162
 163static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
 164					struct sk_buff *skb)
 165{
 166	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
 167
 168	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 169
 170	if (rp->status)
 171		return;
 172
 173	hdev->link_policy = __le16_to_cpu(rp->policy);
 174}
 175
 176static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
 177					 struct sk_buff *skb)
 178{
 179	__u8 status = *((__u8 *) skb->data);
 180	void *sent;
 181
 182	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 183
 184	if (status)
 185		return;
 186
 187	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
 188	if (!sent)
 189		return;
 190
 191	hdev->link_policy = get_unaligned_le16(sent);
 192}
 193
 194static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
 195{
 196	__u8 status = *((__u8 *) skb->data);
 197
 198	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 199
 200	clear_bit(HCI_RESET, &hdev->flags);
 201
 202	if (status)
 203		return;
 204
 205	/* Reset all non-persistent flags */
 206	hci_dev_clear_volatile_flags(hdev);
 207
 208	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 209
 210	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
 211	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 212
 213	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
 214	hdev->adv_data_len = 0;
 215
 216	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
 217	hdev->scan_rsp_data_len = 0;
 218
 219	hdev->le_scan_type = LE_SCAN_PASSIVE;
 220
 221	hdev->ssp_debug_mode = 0;
 222
 223	hci_bdaddr_list_clear(&hdev->le_white_list);
 224}
 225
 226static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
 227					struct sk_buff *skb)
 228{
 229	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
 230	struct hci_cp_read_stored_link_key *sent;
 231
 232	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 233
 234	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
 235	if (!sent)
 236		return;
 237
 238	if (!rp->status && sent->read_all == 0x01) {
 239		hdev->stored_max_keys = rp->max_keys;
 240		hdev->stored_num_keys = rp->num_keys;
 241	}
 242}
 243
 244static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
 245					  struct sk_buff *skb)
 246{
 247	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
 248
 249	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 250
 251	if (rp->status)
 252		return;
 253
 254	if (rp->num_keys <= hdev->stored_num_keys)
 255		hdev->stored_num_keys -= rp->num_keys;
 256	else
 257		hdev->stored_num_keys = 0;
 258}
 259
 260static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 261{
 262	__u8 status = *((__u8 *) skb->data);
 263	void *sent;
 264
 265	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 266
 267	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
 268	if (!sent)
 269		return;
 270
 271	hci_dev_lock(hdev);
 272
 273	if (hci_dev_test_flag(hdev, HCI_MGMT))
 274		mgmt_set_local_name_complete(hdev, sent, status);
 275	else if (!status)
 276		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
 277
 278	hci_dev_unlock(hdev);
 279}
 280
 281static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 282{
 283	struct hci_rp_read_local_name *rp = (void *) skb->data;
 284
 285	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 286
 287	if (rp->status)
 288		return;
 289
 290	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 291	    hci_dev_test_flag(hdev, HCI_CONFIG))
 292		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
 293}
 294
 295static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
 296{
 297	__u8 status = *((__u8 *) skb->data);
 298	void *sent;
 299
 300	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 301
 302	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
 303	if (!sent)
 304		return;
 305
 306	hci_dev_lock(hdev);
 307
 308	if (!status) {
 309		__u8 param = *((__u8 *) sent);
 310
 311		if (param == AUTH_ENABLED)
 312			set_bit(HCI_AUTH, &hdev->flags);
 313		else
 314			clear_bit(HCI_AUTH, &hdev->flags);
 315	}
 316
 317	if (hci_dev_test_flag(hdev, HCI_MGMT))
 318		mgmt_auth_enable_complete(hdev, status);
 319
 320	hci_dev_unlock(hdev);
 321}
 322
 323static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
 324{
 325	__u8 status = *((__u8 *) skb->data);
 326	__u8 param;
 327	void *sent;
 328
 329	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 330
 331	if (status)
 332		return;
 333
 334	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
 335	if (!sent)
 336		return;
 337
 338	param = *((__u8 *) sent);
 339
 340	if (param)
 341		set_bit(HCI_ENCRYPT, &hdev->flags);
 342	else
 343		clear_bit(HCI_ENCRYPT, &hdev->flags);
 344}
 345
 346static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 347{
 348	__u8 status = *((__u8 *) skb->data);
 349	__u8 param;
 350	void *sent;
 351
 352	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 353
 354	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
 355	if (!sent)
 356		return;
 357
 358	param = *((__u8 *) sent);
 359
 360	hci_dev_lock(hdev);
 361
 362	if (status) {
 363		hdev->discov_timeout = 0;
 364		goto done;
 365	}
 366
 367	if (param & SCAN_INQUIRY)
 368		set_bit(HCI_ISCAN, &hdev->flags);
 369	else
 370		clear_bit(HCI_ISCAN, &hdev->flags);
 371
 372	if (param & SCAN_PAGE)
 373		set_bit(HCI_PSCAN, &hdev->flags);
 374	else
 375		clear_bit(HCI_PSCAN, &hdev->flags);
 376
 377done:
 378	hci_dev_unlock(hdev);
 379}
 380
 381static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
 382{
 383	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
 384
 385	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 386
 387	if (rp->status)
 388		return;
 389
 390	memcpy(hdev->dev_class, rp->dev_class, 3);
 391
 392	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
 393	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
 394}
 395
 396static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
 397{
 398	__u8 status = *((__u8 *) skb->data);
 399	void *sent;
 400
 401	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 402
 403	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
 404	if (!sent)
 405		return;
 406
 407	hci_dev_lock(hdev);
 408
 409	if (status == 0)
 410		memcpy(hdev->dev_class, sent, 3);
 411
 412	if (hci_dev_test_flag(hdev, HCI_MGMT))
 413		mgmt_set_class_of_dev_complete(hdev, sent, status);
 414
 415	hci_dev_unlock(hdev);
 416}
 417
 418static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
 419{
 420	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
 421	__u16 setting;
 422
 423	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 424
 425	if (rp->status)
 426		return;
 427
 428	setting = __le16_to_cpu(rp->voice_setting);
 429
 430	if (hdev->voice_setting == setting)
 431		return;
 432
 433	hdev->voice_setting = setting;
 434
 435	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
 436
 437	if (hdev->notify)
 438		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 439}
 440
 441static void hci_cc_write_voice_setting(struct hci_dev *hdev,
 442				       struct sk_buff *skb)
 443{
 444	__u8 status = *((__u8 *) skb->data);
 445	__u16 setting;
 446	void *sent;
 447
 448	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 449
 450	if (status)
 451		return;
 452
 453	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
 454	if (!sent)
 455		return;
 456
 457	setting = get_unaligned_le16(sent);
 458
 459	if (hdev->voice_setting == setting)
 460		return;
 461
 462	hdev->voice_setting = setting;
 463
 464	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
 465
 466	if (hdev->notify)
 467		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 468}
 469
 470static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
 471					  struct sk_buff *skb)
 472{
 473	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
 474
 475	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 476
 477	if (rp->status)
 478		return;
 479
 480	hdev->num_iac = rp->num_iac;
 481
 482	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
 483}
 484
 485static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
 486{
 487	__u8 status = *((__u8 *) skb->data);
 488	struct hci_cp_write_ssp_mode *sent;
 489
 490	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 491
 492	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
 493	if (!sent)
 494		return;
 495
 496	hci_dev_lock(hdev);
 497
 498	if (!status) {
 499		if (sent->mode)
 500			hdev->features[1][0] |= LMP_HOST_SSP;
 501		else
 502			hdev->features[1][0] &= ~LMP_HOST_SSP;
 503	}
 504
 505	if (hci_dev_test_flag(hdev, HCI_MGMT))
 506		mgmt_ssp_enable_complete(hdev, sent->mode, status);
 507	else if (!status) {
 508		if (sent->mode)
 509			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
 510		else
 511			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
 512	}
 513
 514	hci_dev_unlock(hdev);
 515}
 516
 517static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
 518{
 519	u8 status = *((u8 *) skb->data);
 520	struct hci_cp_write_sc_support *sent;
 521
 522	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 523
 524	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
 525	if (!sent)
 526		return;
 527
 528	hci_dev_lock(hdev);
 529
 530	if (!status) {
 531		if (sent->support)
 532			hdev->features[1][0] |= LMP_HOST_SC;
 533		else
 534			hdev->features[1][0] &= ~LMP_HOST_SC;
 535	}
 536
 537	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
 538		if (sent->support)
 539			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
 540		else
 541			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
 542	}
 543
 544	hci_dev_unlock(hdev);
 545}
 546
 547static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
 548{
 549	struct hci_rp_read_local_version *rp = (void *) skb->data;
 550
 551	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 552
 553	if (rp->status)
 554		return;
 555
 556	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 557	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
 558		hdev->hci_ver = rp->hci_ver;
 559		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
 560		hdev->lmp_ver = rp->lmp_ver;
 561		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
 562		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
 563	}
 564}
 565
 566static void hci_cc_read_local_commands(struct hci_dev *hdev,
 567				       struct sk_buff *skb)
 568{
 569	struct hci_rp_read_local_commands *rp = (void *) skb->data;
 570
 571	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 572
 573	if (rp->status)
 574		return;
 575
 576	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 577	    hci_dev_test_flag(hdev, HCI_CONFIG))
 578		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
 579}
 580
 581static void hci_cc_read_local_features(struct hci_dev *hdev,
 582				       struct sk_buff *skb)
 583{
 584	struct hci_rp_read_local_features *rp = (void *) skb->data;
 585
 586	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 587
 588	if (rp->status)
 589		return;
 590
 591	memcpy(hdev->features, rp->features, 8);
 592
 593	/* Adjust default settings according to features
 594	 * supported by device. */
 595
 596	if (hdev->features[0][0] & LMP_3SLOT)
 597		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
 598
 599	if (hdev->features[0][0] & LMP_5SLOT)
 600		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
 601
 602	if (hdev->features[0][1] & LMP_HV2) {
 603		hdev->pkt_type  |= (HCI_HV2);
 604		hdev->esco_type |= (ESCO_HV2);
 605	}
 606
 607	if (hdev->features[0][1] & LMP_HV3) {
 608		hdev->pkt_type  |= (HCI_HV3);
 609		hdev->esco_type |= (ESCO_HV3);
 610	}
 611
 612	if (lmp_esco_capable(hdev))
 613		hdev->esco_type |= (ESCO_EV3);
 614
 615	if (hdev->features[0][4] & LMP_EV4)
 616		hdev->esco_type |= (ESCO_EV4);
 617
 618	if (hdev->features[0][4] & LMP_EV5)
 619		hdev->esco_type |= (ESCO_EV5);
 620
 621	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
 622		hdev->esco_type |= (ESCO_2EV3);
 623
 624	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
 625		hdev->esco_type |= (ESCO_3EV3);
 626
 627	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
 628		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
 629}
 630
 631static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
 632					   struct sk_buff *skb)
 633{
 634	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
 635
 636	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 637
 638	if (rp->status)
 639		return;
 640
 641	if (hdev->max_page < rp->max_page)
 642		hdev->max_page = rp->max_page;
 643
 644	if (rp->page < HCI_MAX_PAGES)
 645		memcpy(hdev->features[rp->page], rp->features, 8);
 646}
 647
 648static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
 649					  struct sk_buff *skb)
 650{
 651	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
 652
 653	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 654
 655	if (rp->status)
 656		return;
 657
 658	hdev->flow_ctl_mode = rp->mode;
 659}
 660
 661static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
 662{
 663	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
 664
 665	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 666
 667	if (rp->status)
 668		return;
 669
 670	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
 671	hdev->sco_mtu  = rp->sco_mtu;
 672	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
 673	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
 674
 675	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
 676		hdev->sco_mtu  = 64;
 677		hdev->sco_pkts = 8;
 678	}
 679
 680	hdev->acl_cnt = hdev->acl_pkts;
 681	hdev->sco_cnt = hdev->sco_pkts;
 682
 683	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
 684	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
 685}
 686
 687static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
 688{
 689	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
 690
 691	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 692
 693	if (rp->status)
 694		return;
 695
 696	if (test_bit(HCI_INIT, &hdev->flags))
 697		bacpy(&hdev->bdaddr, &rp->bdaddr);
 698
 699	if (hci_dev_test_flag(hdev, HCI_SETUP))
 700		bacpy(&hdev->setup_addr, &rp->bdaddr);
 701}
 702
 703static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
 704					   struct sk_buff *skb)
 705{
 706	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
 707
 708	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 709
 710	if (rp->status)
 711		return;
 712
 713	if (test_bit(HCI_INIT, &hdev->flags)) {
 714		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
 715		hdev->page_scan_window = __le16_to_cpu(rp->window);
 716	}
 717}
 718
 719static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
 720					    struct sk_buff *skb)
 721{
 722	u8 status = *((u8 *) skb->data);
 723	struct hci_cp_write_page_scan_activity *sent;
 724
 725	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 726
 727	if (status)
 728		return;
 729
 730	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
 731	if (!sent)
 732		return;
 733
 734	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
 735	hdev->page_scan_window = __le16_to_cpu(sent->window);
 736}
 737
 738static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
 739					   struct sk_buff *skb)
 740{
 741	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
 742
 743	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 744
 745	if (rp->status)
 746		return;
 747
 748	if (test_bit(HCI_INIT, &hdev->flags))
 749		hdev->page_scan_type = rp->type;
 750}
 751
 752static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
 753					struct sk_buff *skb)
 754{
 755	u8 status = *((u8 *) skb->data);
 756	u8 *type;
 757
 758	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 759
 760	if (status)
 761		return;
 762
 763	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
 764	if (type)
 765		hdev->page_scan_type = *type;
 766}
 767
 768static void hci_cc_read_data_block_size(struct hci_dev *hdev,
 769					struct sk_buff *skb)
 770{
 771	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
 772
 773	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 774
 775	if (rp->status)
 776		return;
 777
 778	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
 779	hdev->block_len = __le16_to_cpu(rp->block_len);
 780	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
 781
 782	hdev->block_cnt = hdev->num_blocks;
 783
 784	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
 785	       hdev->block_cnt, hdev->block_len);
 786}
 787
 788static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
 789{
 790	struct hci_rp_read_clock *rp = (void *) skb->data;
 791	struct hci_cp_read_clock *cp;
 792	struct hci_conn *conn;
 793
 794	BT_DBG("%s", hdev->name);
 795
 796	if (skb->len < sizeof(*rp))
 797		return;
 798
 799	if (rp->status)
 800		return;
 801
 802	hci_dev_lock(hdev);
 803
 804	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
 805	if (!cp)
 806		goto unlock;
 807
 808	if (cp->which == 0x00) {
 809		hdev->clock = le32_to_cpu(rp->clock);
 810		goto unlock;
 811	}
 812
 813	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 814	if (conn) {
 815		conn->clock = le32_to_cpu(rp->clock);
 816		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
 817	}
 818
 819unlock:
 820	hci_dev_unlock(hdev);
 821}
 822
 823static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
 824				       struct sk_buff *skb)
 825{
 826	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
 827
 828	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 829
 830	if (rp->status)
 831		return;
 832
 833	hdev->amp_status = rp->amp_status;
 834	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
 835	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
 836	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
 837	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
 838	hdev->amp_type = rp->amp_type;
 839	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
 840	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
 841	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
 842	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
 843}
 844
 845static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
 846					 struct sk_buff *skb)
 847{
 848	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
 849
 850	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 851
 852	if (rp->status)
 853		return;
 854
 855	hdev->inq_tx_power = rp->tx_power;
 856}
 857
 858static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
 859{
 860	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
 861	struct hci_cp_pin_code_reply *cp;
 862	struct hci_conn *conn;
 863
 864	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 865
 866	hci_dev_lock(hdev);
 867
 868	if (hci_dev_test_flag(hdev, HCI_MGMT))
 869		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
 870
 871	if (rp->status)
 872		goto unlock;
 873
 874	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
 875	if (!cp)
 876		goto unlock;
 877
 878	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
 879	if (conn)
 880		conn->pin_length = cp->pin_len;
 881
 882unlock:
 883	hci_dev_unlock(hdev);
 884}
 885
 886static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
 887{
 888	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
 889
 890	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 891
 892	hci_dev_lock(hdev);
 893
 894	if (hci_dev_test_flag(hdev, HCI_MGMT))
 895		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
 896						 rp->status);
 897
 898	hci_dev_unlock(hdev);
 899}
 900
 901static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
 902				       struct sk_buff *skb)
 903{
 904	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
 905
 906	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 907
 908	if (rp->status)
 909		return;
 910
 911	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
 912	hdev->le_pkts = rp->le_max_pkt;
 913
 914	hdev->le_cnt = hdev->le_pkts;
 915
 916	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
 917}
 918
 919static void hci_cc_le_read_local_features(struct hci_dev *hdev,
 920					  struct sk_buff *skb)
 921{
 922	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
 923
 924	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 925
 926	if (rp->status)
 927		return;
 928
 929	memcpy(hdev->le_features, rp->features, 8);
 930}
 931
 932static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
 933					struct sk_buff *skb)
 934{
 935	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
 936
 937	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 938
 939	if (rp->status)
 940		return;
 941
 942	hdev->adv_tx_power = rp->tx_power;
 943}
 944
 945static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
 946{
 947	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 948
 949	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 950
 951	hci_dev_lock(hdev);
 952
 953	if (hci_dev_test_flag(hdev, HCI_MGMT))
 954		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
 955						 rp->status);
 956
 957	hci_dev_unlock(hdev);
 958}
 959
 960static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
 961					  struct sk_buff *skb)
 962{
 963	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 964
 965	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 966
 967	hci_dev_lock(hdev);
 968
 969	if (hci_dev_test_flag(hdev, HCI_MGMT))
 970		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
 971						     ACL_LINK, 0, rp->status);
 972
 973	hci_dev_unlock(hdev);
 974}
 975
 976static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
 977{
 978	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 979
 980	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 981
 982	hci_dev_lock(hdev);
 983
 984	if (hci_dev_test_flag(hdev, HCI_MGMT))
 985		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
 986						 0, rp->status);
 987
 988	hci_dev_unlock(hdev);
 989}
 990
 991static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
 992					  struct sk_buff *skb)
 993{
 994	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 995
 996	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 997
 998	hci_dev_lock(hdev);
 999
1000	if (hci_dev_test_flag(hdev, HCI_MGMT))
1001		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1002						     ACL_LINK, 0, rp->status);
1003
1004	hci_dev_unlock(hdev);
1005}
1006
1007static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1008				       struct sk_buff *skb)
1009{
1010	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1011
1012	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1013}
1014
1015static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1016					   struct sk_buff *skb)
1017{
1018	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1019
1020	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1021}
1022
1023static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1024{
1025	__u8 status = *((__u8 *) skb->data);
1026	bdaddr_t *sent;
1027
1028	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1029
1030	if (status)
1031		return;
1032
1033	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1034	if (!sent)
1035		return;
1036
1037	hci_dev_lock(hdev);
1038
1039	bacpy(&hdev->random_addr, sent);
1040
1041	hci_dev_unlock(hdev);
1042}
1043
1044static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1045{
1046	__u8 *sent, status = *((__u8 *) skb->data);
1047
1048	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1049
1050	if (status)
1051		return;
1052
1053	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1054	if (!sent)
1055		return;
1056
1057	hci_dev_lock(hdev);
1058
1059	/* If we're doing connection initiation as peripheral. Set a
1060	 * timeout in case something goes wrong.
1061	 */
1062	if (*sent) {
1063		struct hci_conn *conn;
1064
1065		hci_dev_set_flag(hdev, HCI_LE_ADV);
1066
1067		conn = hci_lookup_le_connect(hdev);
1068		if (conn)
1069			queue_delayed_work(hdev->workqueue,
1070					   &conn->le_conn_timeout,
1071					   conn->conn_timeout);
1072	} else {
1073		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1074	}
1075
1076	hci_dev_unlock(hdev);
1077}
1078
1079static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1080{
1081	struct hci_cp_le_set_scan_param *cp;
1082	__u8 status = *((__u8 *) skb->data);
1083
1084	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1085
1086	if (status)
1087		return;
1088
1089	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1090	if (!cp)
1091		return;
1092
1093	hci_dev_lock(hdev);
1094
1095	hdev->le_scan_type = cp->type;
1096
1097	hci_dev_unlock(hdev);
1098}
1099
1100static bool has_pending_adv_report(struct hci_dev *hdev)
1101{
1102	struct discovery_state *d = &hdev->discovery;
1103
1104	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1105}
1106
1107static void clear_pending_adv_report(struct hci_dev *hdev)
1108{
1109	struct discovery_state *d = &hdev->discovery;
1110
1111	bacpy(&d->last_adv_addr, BDADDR_ANY);
1112	d->last_adv_data_len = 0;
1113}
1114
1115static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1116				     u8 bdaddr_type, s8 rssi, u32 flags,
1117				     u8 *data, u8 len)
1118{
1119	struct discovery_state *d = &hdev->discovery;
1120
1121	bacpy(&d->last_adv_addr, bdaddr);
1122	d->last_adv_addr_type = bdaddr_type;
1123	d->last_adv_rssi = rssi;
1124	d->last_adv_flags = flags;
1125	memcpy(d->last_adv_data, data, len);
1126	d->last_adv_data_len = len;
1127}
1128
1129static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1130				      struct sk_buff *skb)
1131{
1132	struct hci_cp_le_set_scan_enable *cp;
1133	__u8 status = *((__u8 *) skb->data);
1134
1135	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1136
1137	if (status)
1138		return;
1139
1140	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1141	if (!cp)
1142		return;
1143
1144	hci_dev_lock(hdev);
1145
1146	switch (cp->enable) {
1147	case LE_SCAN_ENABLE:
1148		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1149		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1150			clear_pending_adv_report(hdev);
1151		break;
1152
1153	case LE_SCAN_DISABLE:
1154		/* We do this here instead of when setting DISCOVERY_STOPPED
1155		 * since the latter would potentially require waiting for
1156		 * inquiry to stop too.
1157		 */
1158		if (has_pending_adv_report(hdev)) {
1159			struct discovery_state *d = &hdev->discovery;
1160
1161			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1162					  d->last_adv_addr_type, NULL,
1163					  d->last_adv_rssi, d->last_adv_flags,
1164					  d->last_adv_data,
1165					  d->last_adv_data_len, NULL, 0);
1166		}
1167
1168		/* Cancel this timer so that we don't try to disable scanning
1169		 * when it's already disabled.
1170		 */
1171		cancel_delayed_work(&hdev->le_scan_disable);
1172
1173		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1174
1175		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1176		 * interrupted scanning due to a connect request. Mark
1177		 * therefore discovery as stopped. If this was not
1178		 * because of a connect request advertising might have
1179		 * been disabled because of active scanning, so
1180		 * re-enable it again if necessary.
1181		 */
1182		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1183			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1184		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1185			 hdev->discovery.state == DISCOVERY_FINDING)
1186			hci_req_reenable_advertising(hdev);
1187
1188		break;
1189
1190	default:
1191		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1192		break;
1193	}
1194
1195	hci_dev_unlock(hdev);
1196}
1197
1198static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1199					   struct sk_buff *skb)
1200{
1201	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1202
1203	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1204
1205	if (rp->status)
1206		return;
1207
1208	hdev->le_white_list_size = rp->size;
1209}
1210
1211static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1212				       struct sk_buff *skb)
1213{
1214	__u8 status = *((__u8 *) skb->data);
1215
1216	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1217
1218	if (status)
1219		return;
1220
1221	hci_bdaddr_list_clear(&hdev->le_white_list);
1222}
1223
1224static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1225					struct sk_buff *skb)
1226{
1227	struct hci_cp_le_add_to_white_list *sent;
1228	__u8 status = *((__u8 *) skb->data);
1229
1230	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1231
1232	if (status)
1233		return;
1234
1235	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1236	if (!sent)
1237		return;
1238
1239	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1240			   sent->bdaddr_type);
1241}
1242
1243static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1244					  struct sk_buff *skb)
1245{
1246	struct hci_cp_le_del_from_white_list *sent;
1247	__u8 status = *((__u8 *) skb->data);
1248
1249	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1250
1251	if (status)
1252		return;
1253
1254	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1255	if (!sent)
1256		return;
1257
1258	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1259			    sent->bdaddr_type);
1260}
1261
1262static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1263					    struct sk_buff *skb)
1264{
1265	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1266
1267	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1268
1269	if (rp->status)
1270		return;
1271
1272	memcpy(hdev->le_states, rp->le_states, 8);
1273}
1274
1275static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1276					struct sk_buff *skb)
1277{
1278	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1279
1280	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1281
1282	if (rp->status)
1283		return;
1284
1285	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1286	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1287}
1288
1289static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1290					 struct sk_buff *skb)
1291{
1292	struct hci_cp_le_write_def_data_len *sent;
1293	__u8 status = *((__u8 *) skb->data);
1294
1295	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1296
1297	if (status)
1298		return;
1299
1300	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1301	if (!sent)
1302		return;
1303
1304	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1305	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1306}
1307
1308static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1309					struct sk_buff *skb)
1310{
1311	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1312
1313	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1314
1315	if (rp->status)
1316		return;
1317
1318	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1319	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1320	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1321	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1322}
1323
1324static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1325					   struct sk_buff *skb)
1326{
1327	struct hci_cp_write_le_host_supported *sent;
1328	__u8 status = *((__u8 *) skb->data);
1329
1330	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1331
1332	if (status)
1333		return;
1334
1335	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1336	if (!sent)
1337		return;
1338
1339	hci_dev_lock(hdev);
1340
1341	if (sent->le) {
1342		hdev->features[1][0] |= LMP_HOST_LE;
1343		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1344	} else {
1345		hdev->features[1][0] &= ~LMP_HOST_LE;
1346		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1347		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1348	}
1349
1350	if (sent->simul)
1351		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1352	else
1353		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1354
1355	hci_dev_unlock(hdev);
1356}
1357
1358static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1359{
1360	struct hci_cp_le_set_adv_param *cp;
1361	u8 status = *((u8 *) skb->data);
1362
1363	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364
1365	if (status)
1366		return;
1367
1368	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1369	if (!cp)
1370		return;
1371
1372	hci_dev_lock(hdev);
1373	hdev->adv_addr_type = cp->own_address_type;
1374	hci_dev_unlock(hdev);
1375}
1376
1377static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1378{
1379	struct hci_rp_read_rssi *rp = (void *) skb->data;
1380	struct hci_conn *conn;
1381
1382	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1383
1384	if (rp->status)
1385		return;
1386
1387	hci_dev_lock(hdev);
1388
1389	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1390	if (conn)
1391		conn->rssi = rp->rssi;
1392
1393	hci_dev_unlock(hdev);
1394}
1395
1396static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1397{
1398	struct hci_cp_read_tx_power *sent;
1399	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1400	struct hci_conn *conn;
1401
1402	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1403
1404	if (rp->status)
1405		return;
1406
1407	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1408	if (!sent)
1409		return;
1410
1411	hci_dev_lock(hdev);
1412
1413	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1414	if (!conn)
1415		goto unlock;
1416
1417	switch (sent->type) {
1418	case 0x00:
1419		conn->tx_power = rp->tx_power;
1420		break;
1421	case 0x01:
1422		conn->max_tx_power = rp->tx_power;
1423		break;
1424	}
1425
1426unlock:
1427	hci_dev_unlock(hdev);
1428}
1429
1430static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1431{
1432	u8 status = *((u8 *) skb->data);
1433	u8 *mode;
1434
1435	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1436
1437	if (status)
1438		return;
1439
1440	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1441	if (mode)
1442		hdev->ssp_debug_mode = *mode;
1443}
1444
1445static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1446{
1447	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1448
1449	if (status) {
1450		hci_conn_check_pending(hdev);
1451		return;
1452	}
1453
1454	set_bit(HCI_INQUIRY, &hdev->flags);
1455}
1456
1457static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1458{
1459	struct hci_cp_create_conn *cp;
1460	struct hci_conn *conn;
1461
1462	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1463
1464	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1465	if (!cp)
1466		return;
1467
1468	hci_dev_lock(hdev);
1469
1470	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1471
1472	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1473
1474	if (status) {
1475		if (conn && conn->state == BT_CONNECT) {
1476			if (status != 0x0c || conn->attempt > 2) {
1477				conn->state = BT_CLOSED;
1478				hci_connect_cfm(conn, status);
1479				hci_conn_del(conn);
1480			} else
1481				conn->state = BT_CONNECT2;
1482		}
1483	} else {
1484		if (!conn) {
1485			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1486					    HCI_ROLE_MASTER);
1487			if (!conn)
1488				BT_ERR("No memory for new connection");
1489		}
1490	}
1491
1492	hci_dev_unlock(hdev);
1493}
1494
1495static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1496{
1497	struct hci_cp_add_sco *cp;
1498	struct hci_conn *acl, *sco;
1499	__u16 handle;
1500
1501	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1502
1503	if (!status)
1504		return;
1505
1506	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1507	if (!cp)
1508		return;
1509
1510	handle = __le16_to_cpu(cp->handle);
1511
1512	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1513
1514	hci_dev_lock(hdev);
1515
1516	acl = hci_conn_hash_lookup_handle(hdev, handle);
1517	if (acl) {
1518		sco = acl->link;
1519		if (sco) {
1520			sco->state = BT_CLOSED;
1521
1522			hci_connect_cfm(sco, status);
1523			hci_conn_del(sco);
1524		}
1525	}
1526
1527	hci_dev_unlock(hdev);
1528}
1529
1530static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1531{
1532	struct hci_cp_auth_requested *cp;
1533	struct hci_conn *conn;
1534
1535	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1536
1537	if (!status)
1538		return;
1539
1540	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1541	if (!cp)
1542		return;
1543
1544	hci_dev_lock(hdev);
1545
1546	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1547	if (conn) {
1548		if (conn->state == BT_CONFIG) {
1549			hci_connect_cfm(conn, status);
1550			hci_conn_drop(conn);
1551		}
1552	}
1553
1554	hci_dev_unlock(hdev);
1555}
1556
1557static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1558{
1559	struct hci_cp_set_conn_encrypt *cp;
1560	struct hci_conn *conn;
1561
1562	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1563
1564	if (!status)
1565		return;
1566
1567	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1568	if (!cp)
1569		return;
1570
1571	hci_dev_lock(hdev);
1572
1573	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1574	if (conn) {
1575		if (conn->state == BT_CONFIG) {
1576			hci_connect_cfm(conn, status);
1577			hci_conn_drop(conn);
1578		}
1579	}
1580
1581	hci_dev_unlock(hdev);
1582}
1583
1584static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1585				    struct hci_conn *conn)
1586{
1587	if (conn->state != BT_CONFIG || !conn->out)
1588		return 0;
1589
1590	if (conn->pending_sec_level == BT_SECURITY_SDP)
1591		return 0;
1592
1593	/* Only request authentication for SSP connections or non-SSP
1594	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1595	 * is requested.
1596	 */
1597	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1598	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1599	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1600	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1601		return 0;
1602
1603	return 1;
1604}
1605
1606static int hci_resolve_name(struct hci_dev *hdev,
1607				   struct inquiry_entry *e)
1608{
1609	struct hci_cp_remote_name_req cp;
1610
1611	memset(&cp, 0, sizeof(cp));
1612
1613	bacpy(&cp.bdaddr, &e->data.bdaddr);
1614	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1615	cp.pscan_mode = e->data.pscan_mode;
1616	cp.clock_offset = e->data.clock_offset;
1617
1618	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1619}
1620
1621static bool hci_resolve_next_name(struct hci_dev *hdev)
1622{
1623	struct discovery_state *discov = &hdev->discovery;
1624	struct inquiry_entry *e;
1625
1626	if (list_empty(&discov->resolve))
1627		return false;
1628
1629	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1630	if (!e)
1631		return false;
1632
1633	if (hci_resolve_name(hdev, e) == 0) {
1634		e->name_state = NAME_PENDING;
1635		return true;
1636	}
1637
1638	return false;
1639}
1640
1641static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1642				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1643{
1644	struct discovery_state *discov = &hdev->discovery;
1645	struct inquiry_entry *e;
1646
1647	/* Update the mgmt connected state if necessary. Be careful with
1648	 * conn objects that exist but are not (yet) connected however.
1649	 * Only those in BT_CONFIG or BT_CONNECTED states can be
1650	 * considered connected.
1651	 */
1652	if (conn &&
1653	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1654	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1655		mgmt_device_connected(hdev, conn, 0, name, name_len);
1656
1657	if (discov->state == DISCOVERY_STOPPED)
1658		return;
1659
1660	if (discov->state == DISCOVERY_STOPPING)
1661		goto discov_complete;
1662
1663	if (discov->state != DISCOVERY_RESOLVING)
1664		return;
1665
1666	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1667	/* If the device was not found in a list of found devices names of which
1668	 * are pending. there is no need to continue resolving a next name as it
1669	 * will be done upon receiving another Remote Name Request Complete
1670	 * Event */
1671	if (!e)
1672		return;
1673
1674	list_del(&e->list);
1675	if (name) {
1676		e->name_state = NAME_KNOWN;
1677		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1678				 e->data.rssi, name, name_len);
1679	} else {
1680		e->name_state = NAME_NOT_KNOWN;
1681	}
1682
1683	if (hci_resolve_next_name(hdev))
1684		return;
1685
1686discov_complete:
1687	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1688}
1689
1690static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1691{
1692	struct hci_cp_remote_name_req *cp;
1693	struct hci_conn *conn;
1694
1695	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1696
1697	/* If successful wait for the name req complete event before
1698	 * checking for the need to do authentication */
1699	if (!status)
1700		return;
1701
1702	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1703	if (!cp)
1704		return;
1705
1706	hci_dev_lock(hdev);
1707
1708	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1709
1710	if (hci_dev_test_flag(hdev, HCI_MGMT))
1711		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1712
1713	if (!conn)
1714		goto unlock;
1715
1716	if (!hci_outgoing_auth_needed(hdev, conn))
1717		goto unlock;
1718
1719	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1720		struct hci_cp_auth_requested auth_cp;
1721
1722		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1723
1724		auth_cp.handle = __cpu_to_le16(conn->handle);
1725		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1726			     sizeof(auth_cp), &auth_cp);
1727	}
1728
1729unlock:
1730	hci_dev_unlock(hdev);
1731}
1732
1733static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1734{
1735	struct hci_cp_read_remote_features *cp;
1736	struct hci_conn *conn;
1737
1738	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1739
1740	if (!status)
1741		return;
1742
1743	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1744	if (!cp)
1745		return;
1746
1747	hci_dev_lock(hdev);
1748
1749	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1750	if (conn) {
1751		if (conn->state == BT_CONFIG) {
1752			hci_connect_cfm(conn, status);
1753			hci_conn_drop(conn);
1754		}
1755	}
1756
1757	hci_dev_unlock(hdev);
1758}
1759
1760static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1761{
1762	struct hci_cp_read_remote_ext_features *cp;
1763	struct hci_conn *conn;
1764
1765	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1766
1767	if (!status)
1768		return;
1769
1770	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1771	if (!cp)
1772		return;
1773
1774	hci_dev_lock(hdev);
1775
1776	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1777	if (conn) {
1778		if (conn->state == BT_CONFIG) {
1779			hci_connect_cfm(conn, status);
1780			hci_conn_drop(conn);
1781		}
1782	}
1783
1784	hci_dev_unlock(hdev);
1785}
1786
1787static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1788{
1789	struct hci_cp_setup_sync_conn *cp;
1790	struct hci_conn *acl, *sco;
1791	__u16 handle;
1792
1793	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1794
1795	if (!status)
1796		return;
1797
1798	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1799	if (!cp)
1800		return;
1801
1802	handle = __le16_to_cpu(cp->handle);
1803
1804	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1805
1806	hci_dev_lock(hdev);
1807
1808	acl = hci_conn_hash_lookup_handle(hdev, handle);
1809	if (acl) {
1810		sco = acl->link;
1811		if (sco) {
1812			sco->state = BT_CLOSED;
1813
1814			hci_connect_cfm(sco, status);
1815			hci_conn_del(sco);
1816		}
1817	}
1818
1819	hci_dev_unlock(hdev);
1820}
1821
1822static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1823{
1824	struct hci_cp_sniff_mode *cp;
1825	struct hci_conn *conn;
1826
1827	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1828
1829	if (!status)
1830		return;
1831
1832	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1833	if (!cp)
1834		return;
1835
1836	hci_dev_lock(hdev);
1837
1838	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1839	if (conn) {
1840		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1841
1842		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1843			hci_sco_setup(conn, status);
1844	}
1845
1846	hci_dev_unlock(hdev);
1847}
1848
1849static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1850{
1851	struct hci_cp_exit_sniff_mode *cp;
1852	struct hci_conn *conn;
1853
1854	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1855
1856	if (!status)
1857		return;
1858
1859	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1860	if (!cp)
1861		return;
1862
1863	hci_dev_lock(hdev);
1864
1865	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1866	if (conn) {
1867		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1868
1869		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1870			hci_sco_setup(conn, status);
1871	}
1872
1873	hci_dev_unlock(hdev);
1874}
1875
1876static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1877{
1878	struct hci_cp_disconnect *cp;
1879	struct hci_conn *conn;
1880
1881	if (!status)
1882		return;
1883
1884	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1885	if (!cp)
1886		return;
1887
1888	hci_dev_lock(hdev);
1889
1890	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1891	if (conn)
1892		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1893				       conn->dst_type, status);
1894
1895	hci_dev_unlock(hdev);
1896}
1897
1898static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1899{
1900	struct hci_cp_le_create_conn *cp;
1901	struct hci_conn *conn;
1902
1903	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1904
1905	/* All connection failure handling is taken care of by the
1906	 * hci_le_conn_failed function which is triggered by the HCI
1907	 * request completion callbacks used for connecting.
1908	 */
1909	if (status)
1910		return;
1911
1912	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1913	if (!cp)
1914		return;
1915
1916	hci_dev_lock(hdev);
1917
1918	conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
1919				       cp->peer_addr_type);
1920	if (!conn)
1921		goto unlock;
1922
1923	/* Store the initiator and responder address information which
1924	 * is needed for SMP. These values will not change during the
1925	 * lifetime of the connection.
1926	 */
1927	conn->init_addr_type = cp->own_address_type;
1928	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1929		bacpy(&conn->init_addr, &hdev->random_addr);
1930	else
1931		bacpy(&conn->init_addr, &hdev->bdaddr);
1932
1933	conn->resp_addr_type = cp->peer_addr_type;
1934	bacpy(&conn->resp_addr, &cp->peer_addr);
1935
1936	/* We don't want the connection attempt to stick around
1937	 * indefinitely since LE doesn't have a page timeout concept
1938	 * like BR/EDR. Set a timer for any connection that doesn't use
1939	 * the white list for connecting.
1940	 */
1941	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1942		queue_delayed_work(conn->hdev->workqueue,
1943				   &conn->le_conn_timeout,
1944				   conn->conn_timeout);
1945
1946unlock:
1947	hci_dev_unlock(hdev);
1948}
1949
1950static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1951{
1952	struct hci_cp_le_read_remote_features *cp;
1953	struct hci_conn *conn;
1954
1955	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1956
1957	if (!status)
1958		return;
1959
1960	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1961	if (!cp)
1962		return;
1963
1964	hci_dev_lock(hdev);
1965
1966	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1967	if (conn) {
1968		if (conn->state == BT_CONFIG) {
1969			hci_connect_cfm(conn, status);
1970			hci_conn_drop(conn);
1971		}
1972	}
1973
1974	hci_dev_unlock(hdev);
1975}
1976
1977static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1978{
1979	struct hci_cp_le_start_enc *cp;
1980	struct hci_conn *conn;
1981
1982	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1983
1984	if (!status)
1985		return;
1986
1987	hci_dev_lock(hdev);
1988
1989	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1990	if (!cp)
1991		goto unlock;
1992
1993	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1994	if (!conn)
1995		goto unlock;
1996
1997	if (conn->state != BT_CONNECTED)
1998		goto unlock;
1999
2000	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2001	hci_conn_drop(conn);
2002
2003unlock:
2004	hci_dev_unlock(hdev);
2005}
2006
2007static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2008{
2009	struct hci_cp_switch_role *cp;
2010	struct hci_conn *conn;
2011
2012	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2013
2014	if (!status)
2015		return;
2016
2017	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2018	if (!cp)
2019		return;
2020
2021	hci_dev_lock(hdev);
2022
2023	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2024	if (conn)
2025		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2026
2027	hci_dev_unlock(hdev);
2028}
2029
2030static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2031{
2032	__u8 status = *((__u8 *) skb->data);
2033	struct discovery_state *discov = &hdev->discovery;
2034	struct inquiry_entry *e;
2035
2036	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037
2038	hci_conn_check_pending(hdev);
2039
2040	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2041		return;
2042
2043	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2044	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2045
2046	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2047		return;
2048
2049	hci_dev_lock(hdev);
2050
2051	if (discov->state != DISCOVERY_FINDING)
2052		goto unlock;
2053
2054	if (list_empty(&discov->resolve)) {
2055		/* When BR/EDR inquiry is active and no LE scanning is in
2056		 * progress, then change discovery state to indicate completion.
2057		 *
2058		 * When running LE scanning and BR/EDR inquiry simultaneously
2059		 * and the LE scan already finished, then change the discovery
2060		 * state to indicate completion.
2061		 */
2062		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2063		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2064			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2065		goto unlock;
2066	}
2067
2068	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2069	if (e && hci_resolve_name(hdev, e) == 0) {
2070		e->name_state = NAME_PENDING;
2071		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2072	} else {
2073		/* When BR/EDR inquiry is active and no LE scanning is in
2074		 * progress, then change discovery state to indicate completion.
2075		 *
2076		 * When running LE scanning and BR/EDR inquiry simultaneously
2077		 * and the LE scan already finished, then change the discovery
2078		 * state to indicate completion.
2079		 */
2080		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2081		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2082			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2083	}
2084
2085unlock:
2086	hci_dev_unlock(hdev);
2087}
2088
2089static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2090{
2091	struct inquiry_data data;
2092	struct inquiry_info *info = (void *) (skb->data + 1);
2093	int num_rsp = *((__u8 *) skb->data);
2094
2095	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2096
2097	if (!num_rsp)
2098		return;
2099
2100	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2101		return;
2102
2103	hci_dev_lock(hdev);
2104
2105	for (; num_rsp; num_rsp--, info++) {
2106		u32 flags;
2107
2108		bacpy(&data.bdaddr, &info->bdaddr);
2109		data.pscan_rep_mode	= info->pscan_rep_mode;
2110		data.pscan_period_mode	= info->pscan_period_mode;
2111		data.pscan_mode		= info->pscan_mode;
2112		memcpy(data.dev_class, info->dev_class, 3);
2113		data.clock_offset	= info->clock_offset;
2114		data.rssi		= HCI_RSSI_INVALID;
2115		data.ssp_mode		= 0x00;
2116
2117		flags = hci_inquiry_cache_update(hdev, &data, false);
2118
2119		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2120				  info->dev_class, HCI_RSSI_INVALID,
2121				  flags, NULL, 0, NULL, 0);
2122	}
2123
2124	hci_dev_unlock(hdev);
2125}
2126
2127static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2128{
2129	struct hci_ev_conn_complete *ev = (void *) skb->data;
2130	struct hci_conn *conn;
2131
2132	BT_DBG("%s", hdev->name);
2133
2134	hci_dev_lock(hdev);
2135
2136	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2137	if (!conn) {
2138		if (ev->link_type != SCO_LINK)
2139			goto unlock;
2140
2141		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2142		if (!conn)
2143			goto unlock;
2144
2145		conn->type = SCO_LINK;
2146	}
2147
2148	if (!ev->status) {
2149		conn->handle = __le16_to_cpu(ev->handle);
2150
2151		if (conn->type == ACL_LINK) {
2152			conn->state = BT_CONFIG;
2153			hci_conn_hold(conn);
2154
2155			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2156			    !hci_find_link_key(hdev, &ev->bdaddr))
2157				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2158			else
2159				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2160		} else
2161			conn->state = BT_CONNECTED;
2162
2163		hci_debugfs_create_conn(conn);
2164		hci_conn_add_sysfs(conn);
2165
2166		if (test_bit(HCI_AUTH, &hdev->flags))
2167			set_bit(HCI_CONN_AUTH, &conn->flags);
2168
2169		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2170			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2171
2172		/* Get remote features */
2173		if (conn->type == ACL_LINK) {
2174			struct hci_cp_read_remote_features cp;
2175			cp.handle = ev->handle;
2176			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2177				     sizeof(cp), &cp);
2178
2179			hci_req_update_scan(hdev);
2180		}
2181
2182		/* Set packet type for incoming connection */
2183		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2184			struct hci_cp_change_conn_ptype cp;
2185			cp.handle = ev->handle;
2186			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2187			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2188				     &cp);
2189		}
2190	} else {
2191		conn->state = BT_CLOSED;
2192		if (conn->type == ACL_LINK)
2193			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2194					    conn->dst_type, ev->status);
2195	}
2196
2197	if (conn->type == ACL_LINK)
2198		hci_sco_setup(conn, ev->status);
2199
2200	if (ev->status) {
2201		hci_connect_cfm(conn, ev->status);
2202		hci_conn_del(conn);
2203	} else if (ev->link_type != ACL_LINK)
2204		hci_connect_cfm(conn, ev->status);
2205
2206unlock:
2207	hci_dev_unlock(hdev);
2208
2209	hci_conn_check_pending(hdev);
2210}
2211
2212static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2213{
2214	struct hci_cp_reject_conn_req cp;
2215
2216	bacpy(&cp.bdaddr, bdaddr);
2217	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2218	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2219}
2220
2221static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2222{
2223	struct hci_ev_conn_request *ev = (void *) skb->data;
2224	int mask = hdev->link_mode;
2225	struct inquiry_entry *ie;
2226	struct hci_conn *conn;
2227	__u8 flags = 0;
2228
2229	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2230	       ev->link_type);
2231
2232	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2233				      &flags);
2234
2235	if (!(mask & HCI_LM_ACCEPT)) {
2236		hci_reject_conn(hdev, &ev->bdaddr);
2237		return;
2238	}
2239
2240	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2241				   BDADDR_BREDR)) {
2242		hci_reject_conn(hdev, &ev->bdaddr);
2243		return;
2244	}
2245
2246	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2247	 * connection. These features are only touched through mgmt so
2248	 * only do the checks if HCI_MGMT is set.
2249	 */
2250	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2251	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2252	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2253				    BDADDR_BREDR)) {
2254		    hci_reject_conn(hdev, &ev->bdaddr);
2255		    return;
2256	}
2257
2258	/* Connection accepted */
2259
2260	hci_dev_lock(hdev);
2261
2262	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2263	if (ie)
2264		memcpy(ie->data.dev_class, ev->dev_class, 3);
2265
2266	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2267			&ev->bdaddr);
2268	if (!conn) {
2269		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2270				    HCI_ROLE_SLAVE);
2271		if (!conn) {
2272			BT_ERR("No memory for new connection");
2273			hci_dev_unlock(hdev);
2274			return;
2275		}
2276	}
2277
2278	memcpy(conn->dev_class, ev->dev_class, 3);
2279
2280	hci_dev_unlock(hdev);
2281
2282	if (ev->link_type == ACL_LINK ||
2283	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2284		struct hci_cp_accept_conn_req cp;
2285		conn->state = BT_CONNECT;
2286
2287		bacpy(&cp.bdaddr, &ev->bdaddr);
2288
2289		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2290			cp.role = 0x00; /* Become master */
2291		else
2292			cp.role = 0x01; /* Remain slave */
2293
2294		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2295	} else if (!(flags & HCI_PROTO_DEFER)) {
2296		struct hci_cp_accept_sync_conn_req cp;
2297		conn->state = BT_CONNECT;
2298
2299		bacpy(&cp.bdaddr, &ev->bdaddr);
2300		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2301
2302		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2303		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2304		cp.max_latency    = cpu_to_le16(0xffff);
2305		cp.content_format = cpu_to_le16(hdev->voice_setting);
2306		cp.retrans_effort = 0xff;
2307
2308		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2309			     &cp);
2310	} else {
2311		conn->state = BT_CONNECT2;
2312		hci_connect_cfm(conn, 0);
2313	}
2314}
2315
2316static u8 hci_to_mgmt_reason(u8 err)
2317{
2318	switch (err) {
2319	case HCI_ERROR_CONNECTION_TIMEOUT:
2320		return MGMT_DEV_DISCONN_TIMEOUT;
2321	case HCI_ERROR_REMOTE_USER_TERM:
2322	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2323	case HCI_ERROR_REMOTE_POWER_OFF:
2324		return MGMT_DEV_DISCONN_REMOTE;
2325	case HCI_ERROR_LOCAL_HOST_TERM:
2326		return MGMT_DEV_DISCONN_LOCAL_HOST;
2327	default:
2328		return MGMT_DEV_DISCONN_UNKNOWN;
2329	}
2330}
2331
2332static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2333{
2334	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2335	u8 reason;
2336	struct hci_conn_params *params;
2337	struct hci_conn *conn;
2338	bool mgmt_connected;
2339	u8 type;
2340
2341	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2342
2343	hci_dev_lock(hdev);
2344
2345	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2346	if (!conn)
2347		goto unlock;
2348
2349	if (ev->status) {
2350		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2351				       conn->dst_type, ev->status);
2352		goto unlock;
2353	}
2354
2355	conn->state = BT_CLOSED;
2356
2357	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2358
2359	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2360		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2361	else
2362		reason = hci_to_mgmt_reason(ev->reason);
2363
2364	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2365				reason, mgmt_connected);
2366
2367	if (conn->type == ACL_LINK) {
2368		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2369			hci_remove_link_key(hdev, &conn->dst);
2370
2371		hci_req_update_scan(hdev);
2372	}
2373
2374	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2375	if (params) {
2376		switch (params->auto_connect) {
2377		case HCI_AUTO_CONN_LINK_LOSS:
2378			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2379				break;
2380			/* Fall through */
2381
2382		case HCI_AUTO_CONN_DIRECT:
2383		case HCI_AUTO_CONN_ALWAYS:
2384			list_del_init(&params->action);
2385			list_add(&params->action, &hdev->pend_le_conns);
2386			hci_update_background_scan(hdev);
2387			break;
2388
2389		default:
2390			break;
2391		}
2392	}
2393
2394	type = conn->type;
2395
2396	hci_disconn_cfm(conn, ev->reason);
2397	hci_conn_del(conn);
2398
2399	/* Re-enable advertising if necessary, since it might
2400	 * have been disabled by the connection. From the
2401	 * HCI_LE_Set_Advertise_Enable command description in
2402	 * the core specification (v4.0):
2403	 * "The Controller shall continue advertising until the Host
2404	 * issues an LE_Set_Advertise_Enable command with
2405	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2406	 * or until a connection is created or until the Advertising
2407	 * is timed out due to Directed Advertising."
2408	 */
2409	if (type == LE_LINK)
2410		hci_req_reenable_advertising(hdev);
2411
2412unlock:
2413	hci_dev_unlock(hdev);
2414}
2415
2416static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2417{
2418	struct hci_ev_auth_complete *ev = (void *) skb->data;
2419	struct hci_conn *conn;
2420
2421	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2422
2423	hci_dev_lock(hdev);
2424
2425	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2426	if (!conn)
2427		goto unlock;
2428
2429	if (!ev->status) {
2430		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2431
2432		if (!hci_conn_ssp_enabled(conn) &&
2433		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2434			BT_INFO("re-auth of legacy device is not possible.");
2435		} else {
2436			set_bit(HCI_CONN_AUTH, &conn->flags);
2437			conn->sec_level = conn->pending_sec_level;
2438		}
2439	} else {
2440		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2441			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2442
2443		mgmt_auth_failed(conn, ev->status);
2444	}
2445
2446	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2447	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2448
2449	if (conn->state == BT_CONFIG) {
2450		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2451			struct hci_cp_set_conn_encrypt cp;
2452			cp.handle  = ev->handle;
2453			cp.encrypt = 0x01;
2454			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2455				     &cp);
2456		} else {
2457			conn->state = BT_CONNECTED;
2458			hci_connect_cfm(conn, ev->status);
2459			hci_conn_drop(conn);
2460		}
2461	} else {
2462		hci_auth_cfm(conn, ev->status);
2463
2464		hci_conn_hold(conn);
2465		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2466		hci_conn_drop(conn);
2467	}
2468
2469	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2470		if (!ev->status) {
2471			struct hci_cp_set_conn_encrypt cp;
2472			cp.handle  = ev->handle;
2473			cp.encrypt = 0x01;
2474			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2475				     &cp);
2476		} else {
2477			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2478			hci_encrypt_cfm(conn, ev->status, 0x00);
2479		}
2480	}
2481
2482unlock:
2483	hci_dev_unlock(hdev);
2484}
2485
2486static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2487{
2488	struct hci_ev_remote_name *ev = (void *) skb->data;
2489	struct hci_conn *conn;
2490
2491	BT_DBG("%s", hdev->name);
2492
2493	hci_conn_check_pending(hdev);
2494
2495	hci_dev_lock(hdev);
2496
2497	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2498
2499	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2500		goto check_auth;
2501
2502	if (ev->status == 0)
2503		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2504				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2505	else
2506		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2507
2508check_auth:
2509	if (!conn)
2510		goto unlock;
2511
2512	if (!hci_outgoing_auth_needed(hdev, conn))
2513		goto unlock;
2514
2515	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2516		struct hci_cp_auth_requested cp;
2517
2518		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2519
2520		cp.handle = __cpu_to_le16(conn->handle);
2521		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2522	}
2523
2524unlock:
2525	hci_dev_unlock(hdev);
2526}
2527
2528static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2529				       u16 opcode, struct sk_buff *skb)
2530{
2531	const struct hci_rp_read_enc_key_size *rp;
2532	struct hci_conn *conn;
2533	u16 handle;
2534
2535	BT_DBG("%s status 0x%02x", hdev->name, status);
2536
2537	if (!skb || skb->len < sizeof(*rp)) {
2538		BT_ERR("%s invalid HCI Read Encryption Key Size response",
2539		       hdev->name);
2540		return;
2541	}
2542
2543	rp = (void *)skb->data;
2544	handle = le16_to_cpu(rp->handle);
2545
2546	hci_dev_lock(hdev);
2547
2548	conn = hci_conn_hash_lookup_handle(hdev, handle);
2549	if (!conn)
2550		goto unlock;
2551
2552	/* If we fail to read the encryption key size, assume maximum
2553	 * (which is the same we do also when this HCI command isn't
2554	 * supported.
2555	 */
2556	if (rp->status) {
2557		BT_ERR("%s failed to read key size for handle %u", hdev->name,
2558		       handle);
2559		conn->enc_key_size = HCI_LINK_KEY_SIZE;
2560	} else {
2561		conn->enc_key_size = rp->key_size;
2562	}
2563
2564	if (conn->state == BT_CONFIG) {
2565		conn->state = BT_CONNECTED;
2566		hci_connect_cfm(conn, 0);
2567		hci_conn_drop(conn);
2568	} else {
2569		u8 encrypt;
2570
2571		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2572			encrypt = 0x00;
2573		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2574			encrypt = 0x02;
2575		else
2576			encrypt = 0x01;
2577
2578		hci_encrypt_cfm(conn, 0, encrypt);
2579	}
2580
2581unlock:
2582	hci_dev_unlock(hdev);
2583}
2584
2585static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2586{
2587	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2588	struct hci_conn *conn;
2589
2590	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2591
2592	hci_dev_lock(hdev);
2593
2594	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2595	if (!conn)
2596		goto unlock;
2597
2598	if (!ev->status) {
2599		if (ev->encrypt) {
2600			/* Encryption implies authentication */
2601			set_bit(HCI_CONN_AUTH, &conn->flags);
2602			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2603			conn->sec_level = conn->pending_sec_level;
2604
2605			/* P-256 authentication key implies FIPS */
2606			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2607				set_bit(HCI_CONN_FIPS, &conn->flags);
2608
2609			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2610			    conn->type == LE_LINK)
2611				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2612		} else {
2613			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2614			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2615		}
2616	}
2617
2618	/* We should disregard the current RPA and generate a new one
2619	 * whenever the encryption procedure fails.
2620	 */
2621	if (ev->status && conn->type == LE_LINK)
2622		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2623
2624	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2625
2626	if (ev->status && conn->state == BT_CONNECTED) {
2627		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2628			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2629
2630		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2631		hci_conn_drop(conn);
2632		goto unlock;
2633	}
2634
2635	/* In Secure Connections Only mode, do not allow any connections
2636	 * that are not encrypted with AES-CCM using a P-256 authenticated
2637	 * combination key.
2638	 */
2639	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2640	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2641	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2642		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2643		hci_conn_drop(conn);
2644		goto unlock;
2645	}
2646
2647	/* Try reading the encryption key size for encrypted ACL links */
2648	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2649		struct hci_cp_read_enc_key_size cp;
2650		struct hci_request req;
2651
2652		/* Only send HCI_Read_Encryption_Key_Size if the
2653		 * controller really supports it. If it doesn't, assume
2654		 * the default size (16).
2655		 */
2656		if (!(hdev->commands[20] & 0x10)) {
2657			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2658			goto notify;
2659		}
2660
2661		hci_req_init(&req, hdev);
2662
2663		cp.handle = cpu_to_le16(conn->handle);
2664		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2665
2666		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2667			BT_ERR("Sending HCI Read Encryption Key Size failed");
2668			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2669			goto notify;
2670		}
2671
2672		goto unlock;
2673	}
2674
2675notify:
2676	if (conn->state == BT_CONFIG) {
2677		if (!ev->status)
2678			conn->state = BT_CONNECTED;
2679
2680		hci_connect_cfm(conn, ev->status);
2681		hci_conn_drop(conn);
2682	} else
2683		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2684
2685unlock:
2686	hci_dev_unlock(hdev);
2687}
2688
2689static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2690					     struct sk_buff *skb)
2691{
2692	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2693	struct hci_conn *conn;
2694
2695	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2696
2697	hci_dev_lock(hdev);
2698
2699	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2700	if (conn) {
2701		if (!ev->status)
2702			set_bit(HCI_CONN_SECURE, &conn->flags);
2703
2704		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2705
2706		hci_key_change_cfm(conn, ev->status);
2707	}
2708
2709	hci_dev_unlock(hdev);
2710}
2711
2712static void hci_remote_features_evt(struct hci_dev *hdev,
2713				    struct sk_buff *skb)
2714{
2715	struct hci_ev_remote_features *ev = (void *) skb->data;
2716	struct hci_conn *conn;
2717
2718	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2719
2720	hci_dev_lock(hdev);
2721
2722	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2723	if (!conn)
2724		goto unlock;
2725
2726	if (!ev->status)
2727		memcpy(conn->features[0], ev->features, 8);
2728
2729	if (conn->state != BT_CONFIG)
2730		goto unlock;
2731
2732	if (!ev->status && lmp_ext_feat_capable(hdev) &&
2733	    lmp_ext_feat_capable(conn)) {
2734		struct hci_cp_read_remote_ext_features cp;
2735		cp.handle = ev->handle;
2736		cp.page = 0x01;
2737		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2738			     sizeof(cp), &cp);
2739		goto unlock;
2740	}
2741
2742	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2743		struct hci_cp_remote_name_req cp;
2744		memset(&cp, 0, sizeof(cp));
2745		bacpy(&cp.bdaddr, &conn->dst);
2746		cp.pscan_rep_mode = 0x02;
2747		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2748	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2749		mgmt_device_connected(hdev, conn, 0, NULL, 0);
2750
2751	if (!hci_outgoing_auth_needed(hdev, conn)) {
2752		conn->state = BT_CONNECTED;
2753		hci_connect_cfm(conn, ev->status);
2754		hci_conn_drop(conn);
2755	}
2756
2757unlock:
2758	hci_dev_unlock(hdev);
2759}
2760
2761static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2762				 u16 *opcode, u8 *status,
2763				 hci_req_complete_t *req_complete,
2764				 hci_req_complete_skb_t *req_complete_skb)
2765{
2766	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2767
2768	*opcode = __le16_to_cpu(ev->opcode);
2769	*status = skb->data[sizeof(*ev)];
2770
2771	skb_pull(skb, sizeof(*ev));
2772
2773	switch (*opcode) {
2774	case HCI_OP_INQUIRY_CANCEL:
2775		hci_cc_inquiry_cancel(hdev, skb);
2776		break;
2777
2778	case HCI_OP_PERIODIC_INQ:
2779		hci_cc_periodic_inq(hdev, skb);
2780		break;
2781
2782	case HCI_OP_EXIT_PERIODIC_INQ:
2783		hci_cc_exit_periodic_inq(hdev, skb);
2784		break;
2785
2786	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2787		hci_cc_remote_name_req_cancel(hdev, skb);
2788		break;
2789
2790	case HCI_OP_ROLE_DISCOVERY:
2791		hci_cc_role_discovery(hdev, skb);
2792		break;
2793
2794	case HCI_OP_READ_LINK_POLICY:
2795		hci_cc_read_link_policy(hdev, skb);
2796		break;
2797
2798	case HCI_OP_WRITE_LINK_POLICY:
2799		hci_cc_write_link_policy(hdev, skb);
2800		break;
2801
2802	case HCI_OP_READ_DEF_LINK_POLICY:
2803		hci_cc_read_def_link_policy(hdev, skb);
2804		break;
2805
2806	case HCI_OP_WRITE_DEF_LINK_POLICY:
2807		hci_cc_write_def_link_policy(hdev, skb);
2808		break;
2809
2810	case HCI_OP_RESET:
2811		hci_cc_reset(hdev, skb);
2812		break;
2813
2814	case HCI_OP_READ_STORED_LINK_KEY:
2815		hci_cc_read_stored_link_key(hdev, skb);
2816		break;
2817
2818	case HCI_OP_DELETE_STORED_LINK_KEY:
2819		hci_cc_delete_stored_link_key(hdev, skb);
2820		break;
2821
2822	case HCI_OP_WRITE_LOCAL_NAME:
2823		hci_cc_write_local_name(hdev, skb);
2824		break;
2825
2826	case HCI_OP_READ_LOCAL_NAME:
2827		hci_cc_read_local_name(hdev, skb);
2828		break;
2829
2830	case HCI_OP_WRITE_AUTH_ENABLE:
2831		hci_cc_write_auth_enable(hdev, skb);
2832		break;
2833
2834	case HCI_OP_WRITE_ENCRYPT_MODE:
2835		hci_cc_write_encrypt_mode(hdev, skb);
2836		break;
2837
2838	case HCI_OP_WRITE_SCAN_ENABLE:
2839		hci_cc_write_scan_enable(hdev, skb);
2840		break;
2841
2842	case HCI_OP_READ_CLASS_OF_DEV:
2843		hci_cc_read_class_of_dev(hdev, skb);
2844		break;
2845
2846	case HCI_OP_WRITE_CLASS_OF_DEV:
2847		hci_cc_write_class_of_dev(hdev, skb);
2848		break;
2849
2850	case HCI_OP_READ_VOICE_SETTING:
2851		hci_cc_read_voice_setting(hdev, skb);
2852		break;
2853
2854	case HCI_OP_WRITE_VOICE_SETTING:
2855		hci_cc_write_voice_setting(hdev, skb);
2856		break;
2857
2858	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2859		hci_cc_read_num_supported_iac(hdev, skb);
2860		break;
2861
2862	case HCI_OP_WRITE_SSP_MODE:
2863		hci_cc_write_ssp_mode(hdev, skb);
2864		break;
2865
2866	case HCI_OP_WRITE_SC_SUPPORT:
2867		hci_cc_write_sc_support(hdev, skb);
2868		break;
2869
2870	case HCI_OP_READ_LOCAL_VERSION:
2871		hci_cc_read_local_version(hdev, skb);
2872		break;
2873
2874	case HCI_OP_READ_LOCAL_COMMANDS:
2875		hci_cc_read_local_commands(hdev, skb);
2876		break;
2877
2878	case HCI_OP_READ_LOCAL_FEATURES:
2879		hci_cc_read_local_features(hdev, skb);
2880		break;
2881
2882	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2883		hci_cc_read_local_ext_features(hdev, skb);
2884		break;
2885
2886	case HCI_OP_READ_BUFFER_SIZE:
2887		hci_cc_read_buffer_size(hdev, skb);
2888		break;
2889
2890	case HCI_OP_READ_BD_ADDR:
2891		hci_cc_read_bd_addr(hdev, skb);
2892		break;
2893
2894	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2895		hci_cc_read_page_scan_activity(hdev, skb);
2896		break;
2897
2898	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2899		hci_cc_write_page_scan_activity(hdev, skb);
2900		break;
2901
2902	case HCI_OP_READ_PAGE_SCAN_TYPE:
2903		hci_cc_read_page_scan_type(hdev, skb);
2904		break;
2905
2906	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2907		hci_cc_write_page_scan_type(hdev, skb);
2908		break;
2909
2910	case HCI_OP_READ_DATA_BLOCK_SIZE:
2911		hci_cc_read_data_block_size(hdev, skb);
2912		break;
2913
2914	case HCI_OP_READ_FLOW_CONTROL_MODE:
2915		hci_cc_read_flow_control_mode(hdev, skb);
2916		break;
2917
2918	case HCI_OP_READ_LOCAL_AMP_INFO:
2919		hci_cc_read_local_amp_info(hdev, skb);
2920		break;
2921
2922	case HCI_OP_READ_CLOCK:
2923		hci_cc_read_clock(hdev, skb);
2924		break;
2925
2926	case HCI_OP_READ_INQ_RSP_TX_POWER:
2927		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2928		break;
2929
2930	case HCI_OP_PIN_CODE_REPLY:
2931		hci_cc_pin_code_reply(hdev, skb);
2932		break;
2933
2934	case HCI_OP_PIN_CODE_NEG_REPLY:
2935		hci_cc_pin_code_neg_reply(hdev, skb);
2936		break;
2937
2938	case HCI_OP_READ_LOCAL_OOB_DATA:
2939		hci_cc_read_local_oob_data(hdev, skb);
2940		break;
2941
2942	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2943		hci_cc_read_local_oob_ext_data(hdev, skb);
2944		break;
2945
2946	case HCI_OP_LE_READ_BUFFER_SIZE:
2947		hci_cc_le_read_buffer_size(hdev, skb);
2948		break;
2949
2950	case HCI_OP_LE_READ_LOCAL_FEATURES:
2951		hci_cc_le_read_local_features(hdev, skb);
2952		break;
2953
2954	case HCI_OP_LE_READ_ADV_TX_POWER:
2955		hci_cc_le_read_adv_tx_power(hdev, skb);
2956		break;
2957
2958	case HCI_OP_USER_CONFIRM_REPLY:
2959		hci_cc_user_confirm_reply(hdev, skb);
2960		break;
2961
2962	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2963		hci_cc_user_confirm_neg_reply(hdev, skb);
2964		break;
2965
2966	case HCI_OP_USER_PASSKEY_REPLY:
2967		hci_cc_user_passkey_reply(hdev, skb);
2968		break;
2969
2970	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2971		hci_cc_user_passkey_neg_reply(hdev, skb);
2972		break;
2973
2974	case HCI_OP_LE_SET_RANDOM_ADDR:
2975		hci_cc_le_set_random_addr(hdev, skb);
2976		break;
2977
2978	case HCI_OP_LE_SET_ADV_ENABLE:
2979		hci_cc_le_set_adv_enable(hdev, skb);
2980		break;
2981
2982	case HCI_OP_LE_SET_SCAN_PARAM:
2983		hci_cc_le_set_scan_param(hdev, skb);
2984		break;
2985
2986	case HCI_OP_LE_SET_SCAN_ENABLE:
2987		hci_cc_le_set_scan_enable(hdev, skb);
2988		break;
2989
2990	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2991		hci_cc_le_read_white_list_size(hdev, skb);
2992		break;
2993
2994	case HCI_OP_LE_CLEAR_WHITE_LIST:
2995		hci_cc_le_clear_white_list(hdev, skb);
2996		break;
2997
2998	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2999		hci_cc_le_add_to_white_list(hdev, skb);
3000		break;
3001
3002	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3003		hci_cc_le_del_from_white_list(hdev, skb);
3004		break;
3005
3006	case HCI_OP_LE_READ_SUPPORTED_STATES:
3007		hci_cc_le_read_supported_states(hdev, skb);
3008		break;
3009
3010	case HCI_OP_LE_READ_DEF_DATA_LEN:
3011		hci_cc_le_read_def_data_len(hdev, skb);
3012		break;
3013
3014	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3015		hci_cc_le_write_def_data_len(hdev, skb);
3016		break;
3017
3018	case HCI_OP_LE_READ_MAX_DATA_LEN:
3019		hci_cc_le_read_max_data_len(hdev, skb);
3020		break;
3021
3022	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3023		hci_cc_write_le_host_supported(hdev, skb);
3024		break;
3025
3026	case HCI_OP_LE_SET_ADV_PARAM:
3027		hci_cc_set_adv_param(hdev, skb);
3028		break;
3029
3030	case HCI_OP_READ_RSSI:
3031		hci_cc_read_rssi(hdev, skb);
3032		break;
3033
3034	case HCI_OP_READ_TX_POWER:
3035		hci_cc_read_tx_power(hdev, skb);
3036		break;
3037
3038	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3039		hci_cc_write_ssp_debug_mode(hdev, skb);
3040		break;
3041
3042	default:
3043		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3044		break;
3045	}
3046
3047	if (*opcode != HCI_OP_NOP)
3048		cancel_delayed_work(&hdev->cmd_timer);
3049
3050	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3051		atomic_set(&hdev->cmd_cnt, 1);
3052
3053	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3054			     req_complete_skb);
3055
3056	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3057		queue_work(hdev->workqueue, &hdev->cmd_work);
3058}
3059
3060static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3061			       u16 *opcode, u8 *status,
3062			       hci_req_complete_t *req_complete,
3063			       hci_req_complete_skb_t *req_complete_skb)
3064{
3065	struct hci_ev_cmd_status *ev = (void *) skb->data;
3066
3067	skb_pull(skb, sizeof(*ev));
3068
3069	*opcode = __le16_to_cpu(ev->opcode);
3070	*status = ev->status;
3071
3072	switch (*opcode) {
3073	case HCI_OP_INQUIRY:
3074		hci_cs_inquiry(hdev, ev->status);
3075		break;
3076
3077	case HCI_OP_CREATE_CONN:
3078		hci_cs_create_conn(hdev, ev->status);
3079		break;
3080
3081	case HCI_OP_DISCONNECT:
3082		hci_cs_disconnect(hdev, ev->status);
3083		break;
3084
3085	case HCI_OP_ADD_SCO:
3086		hci_cs_add_sco(hdev, ev->status);
3087		break;
3088
3089	case HCI_OP_AUTH_REQUESTED:
3090		hci_cs_auth_requested(hdev, ev->status);
3091		break;
3092
3093	case HCI_OP_SET_CONN_ENCRYPT:
3094		hci_cs_set_conn_encrypt(hdev, ev->status);
3095		break;
3096
3097	case HCI_OP_REMOTE_NAME_REQ:
3098		hci_cs_remote_name_req(hdev, ev->status);
3099		break;
3100
3101	case HCI_OP_READ_REMOTE_FEATURES:
3102		hci_cs_read_remote_features(hdev, ev->status);
3103		break;
3104
3105	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3106		hci_cs_read_remote_ext_features(hdev, ev->status);
3107		break;
3108
3109	case HCI_OP_SETUP_SYNC_CONN:
3110		hci_cs_setup_sync_conn(hdev, ev->status);
3111		break;
3112
3113	case HCI_OP_SNIFF_MODE:
3114		hci_cs_sniff_mode(hdev, ev->status);
3115		break;
3116
3117	case HCI_OP_EXIT_SNIFF_MODE:
3118		hci_cs_exit_sniff_mode(hdev, ev->status);
3119		break;
3120
3121	case HCI_OP_SWITCH_ROLE:
3122		hci_cs_switch_role(hdev, ev->status);
3123		break;
3124
3125	case HCI_OP_LE_CREATE_CONN:
3126		hci_cs_le_create_conn(hdev, ev->status);
3127		break;
3128
3129	case HCI_OP_LE_READ_REMOTE_FEATURES:
3130		hci_cs_le_read_remote_features(hdev, ev->status);
3131		break;
3132
3133	case HCI_OP_LE_START_ENC:
3134		hci_cs_le_start_enc(hdev, ev->status);
3135		break;
3136
3137	default:
3138		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3139		break;
3140	}
3141
3142	if (*opcode != HCI_OP_NOP)
3143		cancel_delayed_work(&hdev->cmd_timer);
3144
3145	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3146		atomic_set(&hdev->cmd_cnt, 1);
3147
3148	/* Indicate request completion if the command failed. Also, if
3149	 * we're not waiting for a special event and we get a success
3150	 * command status we should try to flag the request as completed
3151	 * (since for this kind of commands there will not be a command
3152	 * complete event).
3153	 */
3154	if (ev->status ||
3155	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3156		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3157				     req_complete_skb);
3158
3159	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3160		queue_work(hdev->workqueue, &hdev->cmd_work);
3161}
3162
3163static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3164{
3165	struct hci_ev_hardware_error *ev = (void *) skb->data;
3166
3167	hdev->hw_error_code = ev->code;
3168
3169	queue_work(hdev->req_workqueue, &hdev->error_reset);
3170}
3171
3172static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3173{
3174	struct hci_ev_role_change *ev = (void *) skb->data;
3175	struct hci_conn *conn;
3176
3177	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3178
3179	hci_dev_lock(hdev);
3180
3181	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3182	if (conn) {
3183		if (!ev->status)
3184			conn->role = ev->role;
3185
3186		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3187
3188		hci_role_switch_cfm(conn, ev->status, ev->role);
3189	}
3190
3191	hci_dev_unlock(hdev);
3192}
3193
3194static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3195{
3196	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3197	int i;
3198
3199	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3200		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3201		return;
3202	}
3203
3204	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3205	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3206		BT_DBG("%s bad parameters", hdev->name);
3207		return;
3208	}
3209
3210	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3211
3212	for (i = 0; i < ev->num_hndl; i++) {
3213		struct hci_comp_pkts_info *info = &ev->handles[i];
3214		struct hci_conn *conn;
3215		__u16  handle, count;
3216
3217		handle = __le16_to_cpu(info->handle);
3218		count  = __le16_to_cpu(info->count);
3219
3220		conn = hci_conn_hash_lookup_handle(hdev, handle);
3221		if (!conn)
3222			continue;
3223
3224		conn->sent -= count;
3225
3226		switch (conn->type) {
3227		case ACL_LINK:
3228			hdev->acl_cnt += count;
3229			if (hdev->acl_cnt > hdev->acl_pkts)
3230				hdev->acl_cnt = hdev->acl_pkts;
3231			break;
3232
3233		case LE_LINK:
3234			if (hdev->le_pkts) {
3235				hdev->le_cnt += count;
3236				if (hdev->le_cnt > hdev->le_pkts)
3237					hdev->le_cnt = hdev->le_pkts;
3238			} else {
3239				hdev->acl_cnt += count;
3240				if (hdev->acl_cnt > hdev->acl_pkts)
3241					hdev->acl_cnt = hdev->acl_pkts;
3242			}
3243			break;
3244
3245		case SCO_LINK:
3246			hdev->sco_cnt += count;
3247			if (hdev->sco_cnt > hdev->sco_pkts)
3248				hdev->sco_cnt = hdev->sco_pkts;
3249			break;
3250
3251		default:
3252			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3253			break;
3254		}
3255	}
3256
3257	queue_work(hdev->workqueue, &hdev->tx_work);
3258}
3259
3260static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3261						 __u16 handle)
3262{
3263	struct hci_chan *chan;
3264
3265	switch (hdev->dev_type) {
3266	case HCI_PRIMARY:
3267		return hci_conn_hash_lookup_handle(hdev, handle);
3268	case HCI_AMP:
3269		chan = hci_chan_lookup_handle(hdev, handle);
3270		if (chan)
3271			return chan->conn;
3272		break;
3273	default:
3274		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3275		break;
3276	}
3277
3278	return NULL;
3279}
3280
3281static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3282{
3283	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3284	int i;
3285
3286	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3287		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3288		return;
3289	}
3290
3291	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3292	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3293		BT_DBG("%s bad parameters", hdev->name);
3294		return;
3295	}
3296
3297	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3298	       ev->num_hndl);
3299
3300	for (i = 0; i < ev->num_hndl; i++) {
3301		struct hci_comp_blocks_info *info = &ev->handles[i];
3302		struct hci_conn *conn = NULL;
3303		__u16  handle, block_count;
3304
3305		handle = __le16_to_cpu(info->handle);
3306		block_count = __le16_to_cpu(info->blocks);
3307
3308		conn = __hci_conn_lookup_handle(hdev, handle);
3309		if (!conn)
3310			continue;
3311
3312		conn->sent -= block_count;
3313
3314		switch (conn->type) {
3315		case ACL_LINK:
3316		case AMP_LINK:
3317			hdev->block_cnt += block_count;
3318			if (hdev->block_cnt > hdev->num_blocks)
3319				hdev->block_cnt = hdev->num_blocks;
3320			break;
3321
3322		default:
3323			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3324			break;
3325		}
3326	}
3327
3328	queue_work(hdev->workqueue, &hdev->tx_work);
3329}
3330
3331static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3332{
3333	struct hci_ev_mode_change *ev = (void *) skb->data;
3334	struct hci_conn *conn;
3335
3336	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3337
3338	hci_dev_lock(hdev);
3339
3340	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3341	if (conn) {
3342		conn->mode = ev->mode;
3343
3344		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3345					&conn->flags)) {
3346			if (conn->mode == HCI_CM_ACTIVE)
3347				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3348			else
3349				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3350		}
3351
3352		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3353			hci_sco_setup(conn, ev->status);
3354	}
3355
3356	hci_dev_unlock(hdev);
3357}
3358
3359static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3360{
3361	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3362	struct hci_conn *conn;
3363
3364	BT_DBG("%s", hdev->name);
3365
3366	hci_dev_lock(hdev);
3367
3368	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3369	if (!conn)
3370		goto unlock;
3371
3372	if (conn->state == BT_CONNECTED) {
3373		hci_conn_hold(conn);
3374		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3375		hci_conn_drop(conn);
3376	}
3377
3378	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3379	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3380		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3381			     sizeof(ev->bdaddr), &ev->bdaddr);
3382	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3383		u8 secure;
3384
3385		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3386			secure = 1;
3387		else
3388			secure = 0;
3389
3390		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3391	}
3392
3393unlock:
3394	hci_dev_unlock(hdev);
3395}
3396
3397static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3398{
3399	if (key_type == HCI_LK_CHANGED_COMBINATION)
3400		return;
3401
3402	conn->pin_length = pin_len;
3403	conn->key_type = key_type;
3404
3405	switch (key_type) {
3406	case HCI_LK_LOCAL_UNIT:
3407	case HCI_LK_REMOTE_UNIT:
3408	case HCI_LK_DEBUG_COMBINATION:
3409		return;
3410	case HCI_LK_COMBINATION:
3411		if (pin_len == 16)
3412			conn->pending_sec_level = BT_SECURITY_HIGH;
3413		else
3414			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3415		break;
3416	case HCI_LK_UNAUTH_COMBINATION_P192:
3417	case HCI_LK_UNAUTH_COMBINATION_P256:
3418		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3419		break;
3420	case HCI_LK_AUTH_COMBINATION_P192:
3421		conn->pending_sec_level = BT_SECURITY_HIGH;
3422		break;
3423	case HCI_LK_AUTH_COMBINATION_P256:
3424		conn->pending_sec_level = BT_SECURITY_FIPS;
3425		break;
3426	}
3427}
3428
3429static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3430{
3431	struct hci_ev_link_key_req *ev = (void *) skb->data;
3432	struct hci_cp_link_key_reply cp;
3433	struct hci_conn *conn;
3434	struct link_key *key;
3435
3436	BT_DBG("%s", hdev->name);
3437
3438	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3439		return;
3440
3441	hci_dev_lock(hdev);
3442
3443	key = hci_find_link_key(hdev, &ev->bdaddr);
3444	if (!key) {
3445		BT_DBG("%s link key not found for %pMR", hdev->name,
3446		       &ev->bdaddr);
3447		goto not_found;
3448	}
3449
3450	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3451	       &ev->bdaddr);
3452
3453	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3454	if (conn) {
3455		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3456
3457		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3458		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3459		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3460			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3461			goto not_found;
3462		}
3463
3464		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3465		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3466		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3467			BT_DBG("%s ignoring key unauthenticated for high security",
3468			       hdev->name);
3469			goto not_found;
3470		}
3471
3472		conn_set_key(conn, key->type, key->pin_len);
3473	}
3474
3475	bacpy(&cp.bdaddr, &ev->bdaddr);
3476	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3477
3478	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3479
3480	hci_dev_unlock(hdev);
3481
3482	return;
3483
3484not_found:
3485	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3486	hci_dev_unlock(hdev);
3487}
3488
3489static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3490{
3491	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3492	struct hci_conn *conn;
3493	struct link_key *key;
3494	bool persistent;
3495	u8 pin_len = 0;
3496
3497	BT_DBG("%s", hdev->name);
3498
3499	hci_dev_lock(hdev);
3500
3501	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3502	if (!conn)
3503		goto unlock;
3504
3505	hci_conn_hold(conn);
3506	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3507	hci_conn_drop(conn);
3508
3509	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3510	conn_set_key(conn, ev->key_type, conn->pin_length);
3511
3512	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3513		goto unlock;
3514
3515	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3516			        ev->key_type, pin_len, &persistent);
3517	if (!key)
3518		goto unlock;
3519
3520	/* Update connection information since adding the key will have
3521	 * fixed up the type in the case of changed combination keys.
3522	 */
3523	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3524		conn_set_key(conn, key->type, key->pin_len);
3525
3526	mgmt_new_link_key(hdev, key, persistent);
3527
3528	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3529	 * is set. If it's not set simply remove the key from the kernel
3530	 * list (we've still notified user space about it but with
3531	 * store_hint being 0).
3532	 */
3533	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3534	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3535		list_del_rcu(&key->list);
3536		kfree_rcu(key, rcu);
3537		goto unlock;
3538	}
3539
3540	if (persistent)
3541		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3542	else
3543		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3544
3545unlock:
3546	hci_dev_unlock(hdev);
3547}
3548
3549static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3550{
3551	struct hci_ev_clock_offset *ev = (void *) skb->data;
3552	struct hci_conn *conn;
3553
3554	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3555
3556	hci_dev_lock(hdev);
3557
3558	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3559	if (conn && !ev->status) {
3560		struct inquiry_entry *ie;
3561
3562		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3563		if (ie) {
3564			ie->data.clock_offset = ev->clock_offset;
3565			ie->timestamp = jiffies;
3566		}
3567	}
3568
3569	hci_dev_unlock(hdev);
3570}
3571
3572static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3573{
3574	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3575	struct hci_conn *conn;
3576
3577	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3578
3579	hci_dev_lock(hdev);
3580
3581	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3582	if (conn && !ev->status)
3583		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3584
3585	hci_dev_unlock(hdev);
3586}
3587
3588static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3589{
3590	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3591	struct inquiry_entry *ie;
3592
3593	BT_DBG("%s", hdev->name);
3594
3595	hci_dev_lock(hdev);
3596
3597	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3598	if (ie) {
3599		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3600		ie->timestamp = jiffies;
3601	}
3602
3603	hci_dev_unlock(hdev);
3604}
3605
3606static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3607					     struct sk_buff *skb)
3608{
3609	struct inquiry_data data;
3610	int num_rsp = *((__u8 *) skb->data);
3611
3612	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3613
3614	if (!num_rsp)
3615		return;
3616
3617	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3618		return;
3619
3620	hci_dev_lock(hdev);
3621
3622	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3623		struct inquiry_info_with_rssi_and_pscan_mode *info;
3624		info = (void *) (skb->data + 1);
3625
3626		for (; num_rsp; num_rsp--, info++) {
3627			u32 flags;
3628
3629			bacpy(&data.bdaddr, &info->bdaddr);
3630			data.pscan_rep_mode	= info->pscan_rep_mode;
3631			data.pscan_period_mode	= info->pscan_period_mode;
3632			data.pscan_mode		= info->pscan_mode;
3633			memcpy(data.dev_class, info->dev_class, 3);
3634			data.clock_offset	= info->clock_offset;
3635			data.rssi		= info->rssi;
3636			data.ssp_mode		= 0x00;
3637
3638			flags = hci_inquiry_cache_update(hdev, &data, false);
3639
3640			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3641					  info->dev_class, info->rssi,
3642					  flags, NULL, 0, NULL, 0);
3643		}
3644	} else {
3645		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3646
3647		for (; num_rsp; num_rsp--, info++) {
3648			u32 flags;
3649
3650			bacpy(&data.bdaddr, &info->bdaddr);
3651			data.pscan_rep_mode	= info->pscan_rep_mode;
3652			data.pscan_period_mode	= info->pscan_period_mode;
3653			data.pscan_mode		= 0x00;
3654			memcpy(data.dev_class, info->dev_class, 3);
3655			data.clock_offset	= info->clock_offset;
3656			data.rssi		= info->rssi;
3657			data.ssp_mode		= 0x00;
3658
3659			flags = hci_inquiry_cache_update(hdev, &data, false);
3660
3661			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3662					  info->dev_class, info->rssi,
3663					  flags, NULL, 0, NULL, 0);
3664		}
3665	}
3666
3667	hci_dev_unlock(hdev);
3668}
3669
3670static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3671					struct sk_buff *skb)
3672{
3673	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3674	struct hci_conn *conn;
3675
3676	BT_DBG("%s", hdev->name);
3677
3678	hci_dev_lock(hdev);
3679
3680	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3681	if (!conn)
3682		goto unlock;
3683
3684	if (ev->page < HCI_MAX_PAGES)
3685		memcpy(conn->features[ev->page], ev->features, 8);
3686
3687	if (!ev->status && ev->page == 0x01) {
3688		struct inquiry_entry *ie;
3689
3690		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3691		if (ie)
3692			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3693
3694		if (ev->features[0] & LMP_HOST_SSP) {
3695			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3696		} else {
3697			/* It is mandatory by the Bluetooth specification that
3698			 * Extended Inquiry Results are only used when Secure
3699			 * Simple Pairing is enabled, but some devices violate
3700			 * this.
3701			 *
3702			 * To make these devices work, the internal SSP
3703			 * enabled flag needs to be cleared if the remote host
3704			 * features do not indicate SSP support */
3705			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3706		}
3707
3708		if (ev->features[0] & LMP_HOST_SC)
3709			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3710	}
3711
3712	if (conn->state != BT_CONFIG)
3713		goto unlock;
3714
3715	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3716		struct hci_cp_remote_name_req cp;
3717		memset(&cp, 0, sizeof(cp));
3718		bacpy(&cp.bdaddr, &conn->dst);
3719		cp.pscan_rep_mode = 0x02;
3720		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3721	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3722		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3723
3724	if (!hci_outgoing_auth_needed(hdev, conn)) {
3725		conn->state = BT_CONNECTED;
3726		hci_connect_cfm(conn, ev->status);
3727		hci_conn_drop(conn);
3728	}
3729
3730unlock:
3731	hci_dev_unlock(hdev);
3732}
3733
3734static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3735				       struct sk_buff *skb)
3736{
3737	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3738	struct hci_conn *conn;
3739
3740	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3741
3742	hci_dev_lock(hdev);
3743
3744	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3745	if (!conn) {
3746		if (ev->link_type == ESCO_LINK)
3747			goto unlock;
3748
3749		/* When the link type in the event indicates SCO connection
3750		 * and lookup of the connection object fails, then check
3751		 * if an eSCO connection object exists.
3752		 *
3753		 * The core limits the synchronous connections to either
3754		 * SCO or eSCO. The eSCO connection is preferred and tried
3755		 * to be setup first and until successfully established,
3756		 * the link type will be hinted as eSCO.
3757		 */
3758		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3759		if (!conn)
3760			goto unlock;
3761	}
3762
3763	switch (ev->status) {
3764	case 0x00:
3765		conn->handle = __le16_to_cpu(ev->handle);
3766		conn->state  = BT_CONNECTED;
3767		conn->type   = ev->link_type;
3768
3769		hci_debugfs_create_conn(conn);
3770		hci_conn_add_sysfs(conn);
3771		break;
3772
3773	case 0x10:	/* Connection Accept Timeout */
3774	case 0x0d:	/* Connection Rejected due to Limited Resources */
3775	case 0x11:	/* Unsupported Feature or Parameter Value */
3776	case 0x1c:	/* SCO interval rejected */
3777	case 0x1a:	/* Unsupported Remote Feature */
3778	case 0x1f:	/* Unspecified error */
3779	case 0x20:	/* Unsupported LMP Parameter value */
3780		if (conn->out) {
3781			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3782					(hdev->esco_type & EDR_ESCO_MASK);
3783			if (hci_setup_sync(conn, conn->link->handle))
3784				goto unlock;
3785		}
3786		/* fall through */
3787
3788	default:
3789		conn->state = BT_CLOSED;
3790		break;
3791	}
3792
3793	hci_connect_cfm(conn, ev->status);
3794	if (ev->status)
3795		hci_conn_del(conn);
3796
3797unlock:
3798	hci_dev_unlock(hdev);
3799}
3800
3801static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3802{
3803	size_t parsed = 0;
3804
3805	while (parsed < eir_len) {
3806		u8 field_len = eir[0];
3807
3808		if (field_len == 0)
3809			return parsed;
3810
3811		parsed += field_len + 1;
3812		eir += field_len + 1;
3813	}
3814
3815	return eir_len;
3816}
3817
3818static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3819					    struct sk_buff *skb)
3820{
3821	struct inquiry_data data;
3822	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3823	int num_rsp = *((__u8 *) skb->data);
3824	size_t eir_len;
3825
3826	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3827
3828	if (!num_rsp)
3829		return;
3830
3831	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3832		return;
3833
3834	hci_dev_lock(hdev);
3835
3836	for (; num_rsp; num_rsp--, info++) {
3837		u32 flags;
3838		bool name_known;
3839
3840		bacpy(&data.bdaddr, &info->bdaddr);
3841		data.pscan_rep_mode	= info->pscan_rep_mode;
3842		data.pscan_period_mode	= info->pscan_period_mode;
3843		data.pscan_mode		= 0x00;
3844		memcpy(data.dev_class, info->dev_class, 3);
3845		data.clock_offset	= info->clock_offset;
3846		data.rssi		= info->rssi;
3847		data.ssp_mode		= 0x01;
3848
3849		if (hci_dev_test_flag(hdev, HCI_MGMT))
3850			name_known = eir_get_data(info->data,
3851						  sizeof(info->data),
3852						  EIR_NAME_COMPLETE, NULL);
3853		else
3854			name_known = true;
3855
3856		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3857
3858		eir_len = eir_get_length(info->data, sizeof(info->data));
3859
3860		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3861				  info->dev_class, info->rssi,
3862				  flags, info->data, eir_len, NULL, 0);
3863	}
3864
3865	hci_dev_unlock(hdev);
3866}
3867
3868static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3869					 struct sk_buff *skb)
3870{
3871	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3872	struct hci_conn *conn;
3873
3874	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3875	       __le16_to_cpu(ev->handle));
3876
3877	hci_dev_lock(hdev);
3878
3879	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3880	if (!conn)
3881		goto unlock;
3882
3883	/* For BR/EDR the necessary steps are taken through the
3884	 * auth_complete event.
3885	 */
3886	if (conn->type != LE_LINK)
3887		goto unlock;
3888
3889	if (!ev->status)
3890		conn->sec_level = conn->pending_sec_level;
3891
3892	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3893
3894	if (ev->status && conn->state == BT_CONNECTED) {
3895		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3896		hci_conn_drop(conn);
3897		goto unlock;
3898	}
3899
3900	if (conn->state == BT_CONFIG) {
3901		if (!ev->status)
3902			conn->state = BT_CONNECTED;
3903
3904		hci_connect_cfm(conn, ev->status);
3905		hci_conn_drop(conn);
3906	} else {
3907		hci_auth_cfm(conn, ev->status);
3908
3909		hci_conn_hold(conn);
3910		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3911		hci_conn_drop(conn);
3912	}
3913
3914unlock:
3915	hci_dev_unlock(hdev);
3916}
3917
3918static u8 hci_get_auth_req(struct hci_conn *conn)
3919{
3920	/* If remote requests no-bonding follow that lead */
3921	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3922	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3923		return conn->remote_auth | (conn->auth_type & 0x01);
3924
3925	/* If both remote and local have enough IO capabilities, require
3926	 * MITM protection
3927	 */
3928	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3929	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3930		return conn->remote_auth | 0x01;
3931
3932	/* No MITM protection possible so ignore remote requirement */
3933	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3934}
3935
3936static u8 bredr_oob_data_present(struct hci_conn *conn)
3937{
3938	struct hci_dev *hdev = conn->hdev;
3939	struct oob_data *data;
3940
3941	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3942	if (!data)
3943		return 0x00;
3944
3945	if (bredr_sc_enabled(hdev)) {
3946		/* When Secure Connections is enabled, then just
3947		 * return the present value stored with the OOB
3948		 * data. The stored value contains the right present
3949		 * information. However it can only be trusted when
3950		 * not in Secure Connection Only mode.
3951		 */
3952		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3953			return data->present;
3954
3955		/* When Secure Connections Only mode is enabled, then
3956		 * the P-256 values are required. If they are not
3957		 * available, then do not declare that OOB data is
3958		 * present.
3959		 */
3960		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3961		    !memcmp(data->hash256, ZERO_KEY, 16))
3962			return 0x00;
3963
3964		return 0x02;
3965	}
3966
3967	/* When Secure Connections is not enabled or actually
3968	 * not supported by the hardware, then check that if
3969	 * P-192 data values are present.
3970	 */
3971	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3972	    !memcmp(data->hash192, ZERO_KEY, 16))
3973		return 0x00;
3974
3975	return 0x01;
3976}
3977
3978static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3979{
3980	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3981	struct hci_conn *conn;
3982
3983	BT_DBG("%s", hdev->name);
3984
3985	hci_dev_lock(hdev);
3986
3987	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3988	if (!conn)
3989		goto unlock;
3990
3991	hci_conn_hold(conn);
3992
3993	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3994		goto unlock;
3995
3996	/* Allow pairing if we're pairable, the initiators of the
3997	 * pairing or if the remote is not requesting bonding.
3998	 */
3999	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4000	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4001	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4002		struct hci_cp_io_capability_reply cp;
4003
4004		bacpy(&cp.bdaddr, &ev->bdaddr);
4005		/* Change the IO capability from KeyboardDisplay
4006		 * to DisplayYesNo as it is not supported by BT spec. */
4007		cp.capability = (conn->io_capability == 0x04) ?
4008				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4009
4010		/* If we are initiators, there is no remote information yet */
4011		if (conn->remote_auth == 0xff) {
4012			/* Request MITM protection if our IO caps allow it
4013			 * except for the no-bonding case.
4014			 */
4015			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4016			    conn->auth_type != HCI_AT_NO_BONDING)
4017				conn->auth_type |= 0x01;
4018		} else {
4019			conn->auth_type = hci_get_auth_req(conn);
4020		}
4021
4022		/* If we're not bondable, force one of the non-bondable
4023		 * authentication requirement values.
4024		 */
4025		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4026			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4027
4028		cp.authentication = conn->auth_type;
4029		cp.oob_data = bredr_oob_data_present(conn);
4030
4031		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4032			     sizeof(cp), &cp);
4033	} else {
4034		struct hci_cp_io_capability_neg_reply cp;
4035
4036		bacpy(&cp.bdaddr, &ev->bdaddr);
4037		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4038
4039		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4040			     sizeof(cp), &cp);
4041	}
4042
4043unlock:
4044	hci_dev_unlock(hdev);
4045}
4046
4047static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4048{
4049	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4050	struct hci_conn *conn;
4051
4052	BT_DBG("%s", hdev->name);
4053
4054	hci_dev_lock(hdev);
4055
4056	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4057	if (!conn)
4058		goto unlock;
4059
4060	conn->remote_cap = ev->capability;
4061	conn->remote_auth = ev->authentication;
4062
4063unlock:
4064	hci_dev_unlock(hdev);
4065}
4066
4067static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4068					 struct sk_buff *skb)
4069{
4070	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4071	int loc_mitm, rem_mitm, confirm_hint = 0;
4072	struct hci_conn *conn;
4073
4074	BT_DBG("%s", hdev->name);
4075
4076	hci_dev_lock(hdev);
4077
4078	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4079		goto unlock;
4080
4081	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4082	if (!conn)
4083		goto unlock;
4084
4085	loc_mitm = (conn->auth_type & 0x01);
4086	rem_mitm = (conn->remote_auth & 0x01);
4087
4088	/* If we require MITM but the remote device can't provide that
4089	 * (it has NoInputNoOutput) then reject the confirmation
4090	 * request. We check the security level here since it doesn't
4091	 * necessarily match conn->auth_type.
4092	 */
4093	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4094	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4095		BT_DBG("Rejecting request: remote device can't provide MITM");
4096		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4097			     sizeof(ev->bdaddr), &ev->bdaddr);
4098		goto unlock;
4099	}
4100
4101	/* If no side requires MITM protection; auto-accept */
4102	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4103	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4104
4105		/* If we're not the initiators request authorization to
4106		 * proceed from user space (mgmt_user_confirm with
4107		 * confirm_hint set to 1). The exception is if neither
4108		 * side had MITM or if the local IO capability is
4109		 * NoInputNoOutput, in which case we do auto-accept
4110		 */
4111		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4112		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4113		    (loc_mitm || rem_mitm)) {
4114			BT_DBG("Confirming auto-accept as acceptor");
4115			confirm_hint = 1;
4116			goto confirm;
4117		}
4118
4119		BT_DBG("Auto-accept of user confirmation with %ums delay",
4120		       hdev->auto_accept_delay);
4121
4122		if (hdev->auto_accept_delay > 0) {
4123			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4124			queue_delayed_work(conn->hdev->workqueue,
4125					   &conn->auto_accept_work, delay);
4126			goto unlock;
4127		}
4128
4129		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4130			     sizeof(ev->bdaddr), &ev->bdaddr);
4131		goto unlock;
4132	}
4133
4134confirm:
4135	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4136				  le32_to_cpu(ev->passkey), confirm_hint);
4137
4138unlock:
4139	hci_dev_unlock(hdev);
4140}
4141
4142static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4143					 struct sk_buff *skb)
4144{
4145	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4146
4147	BT_DBG("%s", hdev->name);
4148
4149	if (hci_dev_test_flag(hdev, HCI_MGMT))
4150		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4151}
4152
4153static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4154					struct sk_buff *skb)
4155{
4156	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4157	struct hci_conn *conn;
4158
4159	BT_DBG("%s", hdev->name);
4160
4161	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4162	if (!conn)
4163		return;
4164
4165	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4166	conn->passkey_entered = 0;
4167
4168	if (hci_dev_test_flag(hdev, HCI_MGMT))
4169		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4170					 conn->dst_type, conn->passkey_notify,
4171					 conn->passkey_entered);
4172}
4173
4174static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4175{
4176	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4177	struct hci_conn *conn;
4178
4179	BT_DBG("%s", hdev->name);
4180
4181	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4182	if (!conn)
4183		return;
4184
4185	switch (ev->type) {
4186	case HCI_KEYPRESS_STARTED:
4187		conn->passkey_entered = 0;
4188		return;
4189
4190	case HCI_KEYPRESS_ENTERED:
4191		conn->passkey_entered++;
4192		break;
4193
4194	case HCI_KEYPRESS_ERASED:
4195		conn->passkey_entered--;
4196		break;
4197
4198	case HCI_KEYPRESS_CLEARED:
4199		conn->passkey_entered = 0;
4200		break;
4201
4202	case HCI_KEYPRESS_COMPLETED:
4203		return;
4204	}
4205
4206	if (hci_dev_test_flag(hdev, HCI_MGMT))
4207		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4208					 conn->dst_type, conn->passkey_notify,
4209					 conn->passkey_entered);
4210}
4211
4212static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4213					 struct sk_buff *skb)
4214{
4215	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4216	struct hci_conn *conn;
4217
4218	BT_DBG("%s", hdev->name);
4219
4220	hci_dev_lock(hdev);
4221
4222	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4223	if (!conn)
4224		goto unlock;
4225
4226	/* Reset the authentication requirement to unknown */
4227	conn->remote_auth = 0xff;
4228
4229	/* To avoid duplicate auth_failed events to user space we check
4230	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4231	 * initiated the authentication. A traditional auth_complete
4232	 * event gets always produced as initiator and is also mapped to
4233	 * the mgmt_auth_failed event */
4234	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4235		mgmt_auth_failed(conn, ev->status);
4236
4237	hci_conn_drop(conn);
4238
4239unlock:
4240	hci_dev_unlock(hdev);
4241}
4242
4243static void hci_remote_host_features_evt(struct hci_dev *hdev,
4244					 struct sk_buff *skb)
4245{
4246	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4247	struct inquiry_entry *ie;
4248	struct hci_conn *conn;
4249
4250	BT_DBG("%s", hdev->name);
4251
4252	hci_dev_lock(hdev);
4253
4254	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4255	if (conn)
4256		memcpy(conn->features[1], ev->features, 8);
4257
4258	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4259	if (ie)
4260		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4261
4262	hci_dev_unlock(hdev);
4263}
4264
4265static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4266					    struct sk_buff *skb)
4267{
4268	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4269	struct oob_data *data;
4270
4271	BT_DBG("%s", hdev->name);
4272
4273	hci_dev_lock(hdev);
4274
4275	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4276		goto unlock;
4277
4278	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4279	if (!data) {
4280		struct hci_cp_remote_oob_data_neg_reply cp;
4281
4282		bacpy(&cp.bdaddr, &ev->bdaddr);
4283		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4284			     sizeof(cp), &cp);
4285		goto unlock;
4286	}
4287
4288	if (bredr_sc_enabled(hdev)) {
4289		struct hci_cp_remote_oob_ext_data_reply cp;
4290
4291		bacpy(&cp.bdaddr, &ev->bdaddr);
4292		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4293			memset(cp.hash192, 0, sizeof(cp.hash192));
4294			memset(cp.rand192, 0, sizeof(cp.rand192));
4295		} else {
4296			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4297			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4298		}
4299		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4300		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4301
4302		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4303			     sizeof(cp), &cp);
4304	} else {
4305		struct hci_cp_remote_oob_data_reply cp;
4306
4307		bacpy(&cp.bdaddr, &ev->bdaddr);
4308		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4309		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4310
4311		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4312			     sizeof(cp), &cp);
4313	}
4314
4315unlock:
4316	hci_dev_unlock(hdev);
4317}
4318
4319#if IS_ENABLED(CONFIG_BT_HS)
4320static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4321{
4322	struct hci_ev_channel_selected *ev = (void *)skb->data;
4323	struct hci_conn *hcon;
4324
4325	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4326
4327	skb_pull(skb, sizeof(*ev));
4328
4329	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4330	if (!hcon)
4331		return;
4332
4333	amp_read_loc_assoc_final_data(hdev, hcon);
4334}
4335
4336static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4337				      struct sk_buff *skb)
4338{
4339	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4340	struct hci_conn *hcon, *bredr_hcon;
4341
4342	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4343	       ev->status);
4344
4345	hci_dev_lock(hdev);
4346
4347	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4348	if (!hcon) {
4349		hci_dev_unlock(hdev);
4350		return;
4351	}
4352
4353	if (ev->status) {
4354		hci_conn_del(hcon);
4355		hci_dev_unlock(hdev);
4356		return;
4357	}
4358
4359	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4360
4361	hcon->state = BT_CONNECTED;
4362	bacpy(&hcon->dst, &bredr_hcon->dst);
4363
4364	hci_conn_hold(hcon);
4365	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4366	hci_conn_drop(hcon);
4367
4368	hci_debugfs_create_conn(hcon);
4369	hci_conn_add_sysfs(hcon);
4370
4371	amp_physical_cfm(bredr_hcon, hcon);
4372
4373	hci_dev_unlock(hdev);
4374}
4375
4376static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4377{
4378	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4379	struct hci_conn *hcon;
4380	struct hci_chan *hchan;
4381	struct amp_mgr *mgr;
4382
4383	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4384	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4385	       ev->status);
4386
4387	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4388	if (!hcon)
4389		return;
4390
4391	/* Create AMP hchan */
4392	hchan = hci_chan_create(hcon);
4393	if (!hchan)
4394		return;
4395
4396	hchan->handle = le16_to_cpu(ev->handle);
4397
4398	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4399
4400	mgr = hcon->amp_mgr;
4401	if (mgr && mgr->bredr_chan) {
4402		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4403
4404		l2cap_chan_lock(bredr_chan);
4405
4406		bredr_chan->conn->mtu = hdev->block_mtu;
4407		l2cap_logical_cfm(bredr_chan, hchan, 0);
4408		hci_conn_hold(hcon);
4409
4410		l2cap_chan_unlock(bredr_chan);
4411	}
4412}
4413
4414static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4415					     struct sk_buff *skb)
4416{
4417	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4418	struct hci_chan *hchan;
4419
4420	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4421	       le16_to_cpu(ev->handle), ev->status);
4422
4423	if (ev->status)
4424		return;
4425
4426	hci_dev_lock(hdev);
4427
4428	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4429	if (!hchan)
4430		goto unlock;
4431
4432	amp_destroy_logical_link(hchan, ev->reason);
4433
4434unlock:
4435	hci_dev_unlock(hdev);
4436}
4437
4438static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4439					     struct sk_buff *skb)
4440{
4441	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4442	struct hci_conn *hcon;
4443
4444	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4445
4446	if (ev->status)
4447		return;
4448
4449	hci_dev_lock(hdev);
4450
4451	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4452	if (hcon) {
4453		hcon->state = BT_CLOSED;
4454		hci_conn_del(hcon);
4455	}
4456
4457	hci_dev_unlock(hdev);
4458}
4459#endif
4460
4461static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4462{
4463	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4464	struct hci_conn_params *params;
4465	struct hci_conn *conn;
4466	struct smp_irk *irk;
4467	u8 addr_type;
4468
4469	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4470
4471	hci_dev_lock(hdev);
4472
4473	/* All controllers implicitly stop advertising in the event of a
4474	 * connection, so ensure that the state bit is cleared.
4475	 */
4476	hci_dev_clear_flag(hdev, HCI_LE_ADV);
4477
4478	conn = hci_lookup_le_connect(hdev);
4479	if (!conn) {
4480		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4481		if (!conn) {
4482			BT_ERR("No memory for new connection");
4483			goto unlock;
4484		}
4485
4486		conn->dst_type = ev->bdaddr_type;
4487
4488		/* If we didn't have a hci_conn object previously
4489		 * but we're in master role this must be something
4490		 * initiated using a white list. Since white list based
4491		 * connections are not "first class citizens" we don't
4492		 * have full tracking of them. Therefore, we go ahead
4493		 * with a "best effort" approach of determining the
4494		 * initiator address based on the HCI_PRIVACY flag.
4495		 */
4496		if (conn->out) {
4497			conn->resp_addr_type = ev->bdaddr_type;
4498			bacpy(&conn->resp_addr, &ev->bdaddr);
4499			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4500				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4501				bacpy(&conn->init_addr, &hdev->rpa);
4502			} else {
4503				hci_copy_identity_address(hdev,
4504							  &conn->init_addr,
4505							  &conn->init_addr_type);
4506			}
4507		}
4508	} else {
4509		cancel_delayed_work(&conn->le_conn_timeout);
4510	}
4511
4512	if (!conn->out) {
4513		/* Set the responder (our side) address type based on
4514		 * the advertising address type.
4515		 */
4516		conn->resp_addr_type = hdev->adv_addr_type;
4517		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4518			bacpy(&conn->resp_addr, &hdev->random_addr);
4519		else
4520			bacpy(&conn->resp_addr, &hdev->bdaddr);
4521
4522		conn->init_addr_type = ev->bdaddr_type;
4523		bacpy(&conn->init_addr, &ev->bdaddr);
4524
4525		/* For incoming connections, set the default minimum
4526		 * and maximum connection interval. They will be used
4527		 * to check if the parameters are in range and if not
4528		 * trigger the connection update procedure.
4529		 */
4530		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4531		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4532	}
4533
4534	/* Lookup the identity address from the stored connection
4535	 * address and address type.
4536	 *
4537	 * When establishing connections to an identity address, the
4538	 * connection procedure will store the resolvable random
4539	 * address first. Now if it can be converted back into the
4540	 * identity address, start using the identity address from
4541	 * now on.
4542	 */
4543	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4544	if (irk) {
4545		bacpy(&conn->dst, &irk->bdaddr);
4546		conn->dst_type = irk->addr_type;
4547	}
4548
4549	if (ev->status) {
4550		hci_le_conn_failed(conn, ev->status);
4551		goto unlock;
4552	}
4553
4554	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4555		addr_type = BDADDR_LE_PUBLIC;
4556	else
4557		addr_type = BDADDR_LE_RANDOM;
4558
4559	/* Drop the connection if the device is blocked */
4560	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4561		hci_conn_drop(conn);
4562		goto unlock;
4563	}
4564
4565	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4566		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4567
4568	conn->sec_level = BT_SECURITY_LOW;
4569	conn->handle = __le16_to_cpu(ev->handle);
4570	conn->state = BT_CONFIG;
4571
4572	conn->le_conn_interval = le16_to_cpu(ev->interval);
4573	conn->le_conn_latency = le16_to_cpu(ev->latency);
4574	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4575
4576	hci_debugfs_create_conn(conn);
4577	hci_conn_add_sysfs(conn);
4578
4579	if (!ev->status) {
4580		/* The remote features procedure is defined for master
4581		 * role only. So only in case of an initiated connection
4582		 * request the remote features.
4583		 *
4584		 * If the local controller supports slave-initiated features
4585		 * exchange, then requesting the remote features in slave
4586		 * role is possible. Otherwise just transition into the
4587		 * connected state without requesting the remote features.
4588		 */
4589		if (conn->out ||
4590		    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4591			struct hci_cp_le_read_remote_features cp;
4592
4593			cp.handle = __cpu_to_le16(conn->handle);
4594
4595			hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4596				     sizeof(cp), &cp);
4597
4598			hci_conn_hold(conn);
4599		} else {
4600			conn->state = BT_CONNECTED;
4601			hci_connect_cfm(conn, ev->status);
4602		}
4603	} else {
4604		hci_connect_cfm(conn, ev->status);
4605	}
4606
4607	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4608					   conn->dst_type);
4609	if (params) {
4610		list_del_init(&params->action);
4611		if (params->conn) {
4612			hci_conn_drop(params->conn);
4613			hci_conn_put(params->conn);
4614			params->conn = NULL;
4615		}
4616	}
4617
4618unlock:
4619	hci_update_background_scan(hdev);
4620	hci_dev_unlock(hdev);
4621}
4622
4623static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4624					    struct sk_buff *skb)
4625{
4626	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4627	struct hci_conn *conn;
4628
4629	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4630
4631	if (ev->status)
4632		return;
4633
4634	hci_dev_lock(hdev);
4635
4636	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4637	if (conn) {
4638		conn->le_conn_interval = le16_to_cpu(ev->interval);
4639		conn->le_conn_latency = le16_to_cpu(ev->latency);
4640		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4641	}
4642
4643	hci_dev_unlock(hdev);
4644}
4645
4646/* This function requires the caller holds hdev->lock */
4647static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4648					      bdaddr_t *addr,
4649					      u8 addr_type, u8 adv_type)
4650{
4651	struct hci_conn *conn;
4652	struct hci_conn_params *params;
4653
4654	/* If the event is not connectable don't proceed further */
4655	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4656		return NULL;
4657
4658	/* Ignore if the device is blocked */
4659	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4660		return NULL;
4661
4662	/* Most controller will fail if we try to create new connections
4663	 * while we have an existing one in slave role.
4664	 */
4665	if (hdev->conn_hash.le_num_slave > 0)
4666		return NULL;
4667
4668	/* If we're not connectable only connect devices that we have in
4669	 * our pend_le_conns list.
4670	 */
4671	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4672					   addr_type);
4673	if (!params)
4674		return NULL;
4675
4676	if (!params->explicit_connect) {
4677		switch (params->auto_connect) {
4678		case HCI_AUTO_CONN_DIRECT:
4679			/* Only devices advertising with ADV_DIRECT_IND are
4680			 * triggering a connection attempt. This is allowing
4681			 * incoming connections from slave devices.
4682			 */
4683			if (adv_type != LE_ADV_DIRECT_IND)
4684				return NULL;
4685			break;
4686		case HCI_AUTO_CONN_ALWAYS:
4687			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
4688			 * are triggering a connection attempt. This means
4689			 * that incoming connectioms from slave device are
4690			 * accepted and also outgoing connections to slave
4691			 * devices are established when found.
4692			 */
4693			break;
4694		default:
4695			return NULL;
4696		}
4697	}
4698
4699	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4700			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4701	if (!IS_ERR(conn)) {
4702		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4703		 * by higher layer that tried to connect, if no then
4704		 * store the pointer since we don't really have any
4705		 * other owner of the object besides the params that
4706		 * triggered it. This way we can abort the connection if
4707		 * the parameters get removed and keep the reference
4708		 * count consistent once the connection is established.
4709		 */
4710
4711		if (!params->explicit_connect)
4712			params->conn = hci_conn_get(conn);
4713
4714		return conn;
4715	}
4716
4717	switch (PTR_ERR(conn)) {
4718	case -EBUSY:
4719		/* If hci_connect() returns -EBUSY it means there is already
4720		 * an LE connection attempt going on. Since controllers don't
4721		 * support more than one connection attempt at the time, we
4722		 * don't consider this an error case.
4723		 */
4724		break;
4725	default:
4726		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4727		return NULL;
4728	}
4729
4730	return NULL;
4731}
4732
4733static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4734			       u8 bdaddr_type, bdaddr_t *direct_addr,
4735			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4736{
4737	struct discovery_state *d = &hdev->discovery;
4738	struct smp_irk *irk;
4739	struct hci_conn *conn;
4740	bool match;
4741	u32 flags;
4742	u8 *ptr, real_len;
4743
4744	switch (type) {
4745	case LE_ADV_IND:
4746	case LE_ADV_DIRECT_IND:
4747	case LE_ADV_SCAN_IND:
4748	case LE_ADV_NONCONN_IND:
4749	case LE_ADV_SCAN_RSP:
4750		break;
4751	default:
4752		BT_ERR_RATELIMITED("Unknown advetising packet type: 0x%02x",
4753				   type);
4754		return;
4755	}
4756
4757	/* Find the end of the data in case the report contains padded zero
4758	 * bytes at the end causing an invalid length value.
4759	 *
4760	 * When data is NULL, len is 0 so there is no need for extra ptr
4761	 * check as 'ptr < data + 0' is already false in such case.
4762	 */
4763	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
4764		if (ptr + 1 + *ptr > data + len)
4765			break;
4766	}
4767
4768	real_len = ptr - data;
4769
4770	/* Adjust for actual length */
4771	if (len != real_len) {
4772		BT_ERR_RATELIMITED("%s advertising data length corrected",
4773				   hdev->name);
4774		len = real_len;
4775	}
4776
4777	/* If the direct address is present, then this report is from
4778	 * a LE Direct Advertising Report event. In that case it is
4779	 * important to see if the address is matching the local
4780	 * controller address.
4781	 */
4782	if (direct_addr) {
4783		/* Only resolvable random addresses are valid for these
4784		 * kind of reports and others can be ignored.
4785		 */
4786		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4787			return;
4788
4789		/* If the controller is not using resolvable random
4790		 * addresses, then this report can be ignored.
4791		 */
4792		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4793			return;
4794
4795		/* If the local IRK of the controller does not match
4796		 * with the resolvable random address provided, then
4797		 * this report can be ignored.
4798		 */
4799		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4800			return;
4801	}
4802
4803	/* Check if we need to convert to identity address */
4804	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4805	if (irk) {
4806		bdaddr = &irk->bdaddr;
4807		bdaddr_type = irk->addr_type;
4808	}
4809
4810	/* Check if we have been requested to connect to this device */
4811	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4812	if (conn && type == LE_ADV_IND) {
4813		/* Store report for later inclusion by
4814		 * mgmt_device_connected
4815		 */
4816		memcpy(conn->le_adv_data, data, len);
4817		conn->le_adv_data_len = len;
4818	}
4819
4820	/* Passive scanning shouldn't trigger any device found events,
4821	 * except for devices marked as CONN_REPORT for which we do send
4822	 * device found events.
4823	 */
4824	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4825		if (type == LE_ADV_DIRECT_IND)
4826			return;
4827
4828		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4829					       bdaddr, bdaddr_type))
4830			return;
4831
4832		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4833			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4834		else
4835			flags = 0;
4836		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4837				  rssi, flags, data, len, NULL, 0);
4838		return;
4839	}
4840
4841	/* When receiving non-connectable or scannable undirected
4842	 * advertising reports, this means that the remote device is
4843	 * not connectable and then clearly indicate this in the
4844	 * device found event.
4845	 *
4846	 * When receiving a scan response, then there is no way to
4847	 * know if the remote device is connectable or not. However
4848	 * since scan responses are merged with a previously seen
4849	 * advertising report, the flags field from that report
4850	 * will be used.
4851	 *
4852	 * In the really unlikely case that a controller get confused
4853	 * and just sends a scan response event, then it is marked as
4854	 * not connectable as well.
4855	 */
4856	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4857	    type == LE_ADV_SCAN_RSP)
4858		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4859	else
4860		flags = 0;
4861
4862	/* If there's nothing pending either store the data from this
4863	 * event or send an immediate device found event if the data
4864	 * should not be stored for later.
4865	 */
4866	if (!has_pending_adv_report(hdev)) {
4867		/* If the report will trigger a SCAN_REQ store it for
4868		 * later merging.
4869		 */
4870		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4871			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4872						 rssi, flags, data, len);
4873			return;
4874		}
4875
4876		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4877				  rssi, flags, data, len, NULL, 0);
4878		return;
4879	}
4880
4881	/* Check if the pending report is for the same device as the new one */
4882	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4883		 bdaddr_type == d->last_adv_addr_type);
4884
4885	/* If the pending data doesn't match this report or this isn't a
4886	 * scan response (e.g. we got a duplicate ADV_IND) then force
4887	 * sending of the pending data.
4888	 */
4889	if (type != LE_ADV_SCAN_RSP || !match) {
4890		/* Send out whatever is in the cache, but skip duplicates */
4891		if (!match)
4892			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4893					  d->last_adv_addr_type, NULL,
4894					  d->last_adv_rssi, d->last_adv_flags,
4895					  d->last_adv_data,
4896					  d->last_adv_data_len, NULL, 0);
4897
4898		/* If the new report will trigger a SCAN_REQ store it for
4899		 * later merging.
4900		 */
4901		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4902			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4903						 rssi, flags, data, len);
4904			return;
4905		}
4906
4907		/* The advertising reports cannot be merged, so clear
4908		 * the pending report and send out a device found event.
4909		 */
4910		clear_pending_adv_report(hdev);
4911		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4912				  rssi, flags, data, len, NULL, 0);
4913		return;
4914	}
4915
4916	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4917	 * the new event is a SCAN_RSP. We can therefore proceed with
4918	 * sending a merged device found event.
4919	 */
4920	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4921			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4922			  d->last_adv_data, d->last_adv_data_len, data, len);
4923	clear_pending_adv_report(hdev);
4924}
4925
4926static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4927{
4928	u8 num_reports = skb->data[0];
4929	void *ptr = &skb->data[1];
4930
4931	hci_dev_lock(hdev);
4932
4933	while (num_reports--) {
4934		struct hci_ev_le_advertising_info *ev = ptr;
4935		s8 rssi;
4936
4937		rssi = ev->data[ev->length];
4938		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4939				   ev->bdaddr_type, NULL, 0, rssi,
4940				   ev->data, ev->length);
4941
4942		ptr += sizeof(*ev) + ev->length + 1;
4943	}
4944
4945	hci_dev_unlock(hdev);
4946}
4947
4948static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4949					    struct sk_buff *skb)
4950{
4951	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4952	struct hci_conn *conn;
4953
4954	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4955
4956	hci_dev_lock(hdev);
4957
4958	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4959	if (conn) {
4960		if (!ev->status)
4961			memcpy(conn->features[0], ev->features, 8);
4962
4963		if (conn->state == BT_CONFIG) {
4964			__u8 status;
4965
4966			/* If the local controller supports slave-initiated
4967			 * features exchange, but the remote controller does
4968			 * not, then it is possible that the error code 0x1a
4969			 * for unsupported remote feature gets returned.
4970			 *
4971			 * In this specific case, allow the connection to
4972			 * transition into connected state and mark it as
4973			 * successful.
4974			 */
4975			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4976			    !conn->out && ev->status == 0x1a)
4977				status = 0x00;
4978			else
4979				status = ev->status;
4980
4981			conn->state = BT_CONNECTED;
4982			hci_connect_cfm(conn, status);
4983			hci_conn_drop(conn);
4984		}
4985	}
4986
4987	hci_dev_unlock(hdev);
4988}
4989
4990static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4991{
4992	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4993	struct hci_cp_le_ltk_reply cp;
4994	struct hci_cp_le_ltk_neg_reply neg;
4995	struct hci_conn *conn;
4996	struct smp_ltk *ltk;
4997
4998	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4999
5000	hci_dev_lock(hdev);
5001
5002	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5003	if (conn == NULL)
5004		goto not_found;
5005
5006	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5007	if (!ltk)
5008		goto not_found;
5009
5010	if (smp_ltk_is_sc(ltk)) {
5011		/* With SC both EDiv and Rand are set to zero */
5012		if (ev->ediv || ev->rand)
5013			goto not_found;
5014	} else {
5015		/* For non-SC keys check that EDiv and Rand match */
5016		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5017			goto not_found;
5018	}
5019
5020	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5021	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5022	cp.handle = cpu_to_le16(conn->handle);
5023
5024	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5025
5026	conn->enc_key_size = ltk->enc_size;
5027
5028	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5029
5030	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5031	 * temporary key used to encrypt a connection following
5032	 * pairing. It is used during the Encrypted Session Setup to
5033	 * distribute the keys. Later, security can be re-established
5034	 * using a distributed LTK.
5035	 */
5036	if (ltk->type == SMP_STK) {
5037		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5038		list_del_rcu(&ltk->list);
5039		kfree_rcu(ltk, rcu);
5040	} else {
5041		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5042	}
5043
5044	hci_dev_unlock(hdev);
5045
5046	return;
5047
5048not_found:
5049	neg.handle = ev->handle;
5050	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5051	hci_dev_unlock(hdev);
5052}
5053
5054static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5055				      u8 reason)
5056{
5057	struct hci_cp_le_conn_param_req_neg_reply cp;
5058
5059	cp.handle = cpu_to_le16(handle);
5060	cp.reason = reason;
5061
5062	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5063		     &cp);
5064}
5065
5066static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5067					     struct sk_buff *skb)
5068{
5069	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5070	struct hci_cp_le_conn_param_req_reply cp;
5071	struct hci_conn *hcon;
5072	u16 handle, min, max, latency, timeout;
5073
5074	handle = le16_to_cpu(ev->handle);
5075	min = le16_to_cpu(ev->interval_min);
5076	max = le16_to_cpu(ev->interval_max);
5077	latency = le16_to_cpu(ev->latency);
5078	timeout = le16_to_cpu(ev->timeout);
5079
5080	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5081	if (!hcon || hcon->state != BT_CONNECTED)
5082		return send_conn_param_neg_reply(hdev, handle,
5083						 HCI_ERROR_UNKNOWN_CONN_ID);
5084
5085	if (hci_check_conn_params(min, max, latency, timeout))
5086		return send_conn_param_neg_reply(hdev, handle,
5087						 HCI_ERROR_INVALID_LL_PARAMS);
5088
5089	if (hcon->role == HCI_ROLE_MASTER) {
5090		struct hci_conn_params *params;
5091		u8 store_hint;
5092
5093		hci_dev_lock(hdev);
5094
5095		params = hci_conn_params_lookup(hdev, &hcon->dst,
5096						hcon->dst_type);
5097		if (params) {
5098			params->conn_min_interval = min;
5099			params->conn_max_interval = max;
5100			params->conn_latency = latency;
5101			params->supervision_timeout = timeout;
5102			store_hint = 0x01;
5103		} else{
5104			store_hint = 0x00;
5105		}
5106
5107		hci_dev_unlock(hdev);
5108
5109		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5110				    store_hint, min, max, latency, timeout);
5111	}
5112
5113	cp.handle = ev->handle;
5114	cp.interval_min = ev->interval_min;
5115	cp.interval_max = ev->interval_max;
5116	cp.latency = ev->latency;
5117	cp.timeout = ev->timeout;
5118	cp.min_ce_len = 0;
5119	cp.max_ce_len = 0;
5120
5121	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5122}
5123
5124static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5125					 struct sk_buff *skb)
5126{
5127	u8 num_reports = skb->data[0];
5128	void *ptr = &skb->data[1];
5129
5130	hci_dev_lock(hdev);
5131
5132	while (num_reports--) {
5133		struct hci_ev_le_direct_adv_info *ev = ptr;
5134
5135		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5136				   ev->bdaddr_type, &ev->direct_addr,
5137				   ev->direct_addr_type, ev->rssi, NULL, 0);
5138
5139		ptr += sizeof(*ev);
5140	}
5141
5142	hci_dev_unlock(hdev);
5143}
5144
5145static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5146{
5147	struct hci_ev_le_meta *le_ev = (void *) skb->data;
5148
5149	skb_pull(skb, sizeof(*le_ev));
5150
5151	switch (le_ev->subevent) {
5152	case HCI_EV_LE_CONN_COMPLETE:
5153		hci_le_conn_complete_evt(hdev, skb);
5154		break;
5155
5156	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5157		hci_le_conn_update_complete_evt(hdev, skb);
5158		break;
5159
5160	case HCI_EV_LE_ADVERTISING_REPORT:
5161		hci_le_adv_report_evt(hdev, skb);
5162		break;
5163
5164	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5165		hci_le_remote_feat_complete_evt(hdev, skb);
5166		break;
5167
5168	case HCI_EV_LE_LTK_REQ:
5169		hci_le_ltk_request_evt(hdev, skb);
5170		break;
5171
5172	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5173		hci_le_remote_conn_param_req_evt(hdev, skb);
5174		break;
5175
5176	case HCI_EV_LE_DIRECT_ADV_REPORT:
5177		hci_le_direct_adv_report_evt(hdev, skb);
5178		break;
5179
5180	default:
5181		break;
5182	}
5183}
5184
5185static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5186				 u8 event, struct sk_buff *skb)
5187{
5188	struct hci_ev_cmd_complete *ev;
5189	struct hci_event_hdr *hdr;
5190
5191	if (!skb)
5192		return false;
5193
5194	if (skb->len < sizeof(*hdr)) {
5195		BT_ERR("Too short HCI event");
5196		return false;
5197	}
5198
5199	hdr = (void *) skb->data;
5200	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5201
5202	if (event) {
5203		if (hdr->evt != event)
5204			return false;
5205		return true;
5206	}
5207
5208	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5209		BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5210		return false;
5211	}
5212
5213	if (skb->len < sizeof(*ev)) {
5214		BT_ERR("Too short cmd_complete event");
5215		return false;
5216	}
5217
5218	ev = (void *) skb->data;
5219	skb_pull(skb, sizeof(*ev));
5220
5221	if (opcode != __le16_to_cpu(ev->opcode)) {
5222		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5223		       __le16_to_cpu(ev->opcode));
5224		return false;
5225	}
5226
5227	return true;
5228}
5229
5230void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5231{
5232	struct hci_event_hdr *hdr = (void *) skb->data;
5233	hci_req_complete_t req_complete = NULL;
5234	hci_req_complete_skb_t req_complete_skb = NULL;
5235	struct sk_buff *orig_skb = NULL;
5236	u8 status = 0, event = hdr->evt, req_evt = 0;
5237	u16 opcode = HCI_OP_NOP;
5238
5239	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5240		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5241		opcode = __le16_to_cpu(cmd_hdr->opcode);
5242		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5243				     &req_complete_skb);
5244		req_evt = event;
5245	}
5246
5247	/* If it looks like we might end up having to call
5248	 * req_complete_skb, store a pristine copy of the skb since the
5249	 * various handlers may modify the original one through
5250	 * skb_pull() calls, etc.
5251	 */
5252	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5253	    event == HCI_EV_CMD_COMPLETE)
5254		orig_skb = skb_clone(skb, GFP_KERNEL);
5255
5256	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5257
5258	switch (event) {
5259	case HCI_EV_INQUIRY_COMPLETE:
5260		hci_inquiry_complete_evt(hdev, skb);
5261		break;
5262
5263	case HCI_EV_INQUIRY_RESULT:
5264		hci_inquiry_result_evt(hdev, skb);
5265		break;
5266
5267	case HCI_EV_CONN_COMPLETE:
5268		hci_conn_complete_evt(hdev, skb);
5269		break;
5270
5271	case HCI_EV_CONN_REQUEST:
5272		hci_conn_request_evt(hdev, skb);
5273		break;
5274
5275	case HCI_EV_DISCONN_COMPLETE:
5276		hci_disconn_complete_evt(hdev, skb);
5277		break;
5278
5279	case HCI_EV_AUTH_COMPLETE:
5280		hci_auth_complete_evt(hdev, skb);
5281		break;
5282
5283	case HCI_EV_REMOTE_NAME:
5284		hci_remote_name_evt(hdev, skb);
5285		break;
5286
5287	case HCI_EV_ENCRYPT_CHANGE:
5288		hci_encrypt_change_evt(hdev, skb);
5289		break;
5290
5291	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5292		hci_change_link_key_complete_evt(hdev, skb);
5293		break;
5294
5295	case HCI_EV_REMOTE_FEATURES:
5296		hci_remote_features_evt(hdev, skb);
5297		break;
5298
5299	case HCI_EV_CMD_COMPLETE:
5300		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5301				     &req_complete, &req_complete_skb);
5302		break;
5303
5304	case HCI_EV_CMD_STATUS:
5305		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5306				   &req_complete_skb);
5307		break;
5308
5309	case HCI_EV_HARDWARE_ERROR:
5310		hci_hardware_error_evt(hdev, skb);
5311		break;
5312
5313	case HCI_EV_ROLE_CHANGE:
5314		hci_role_change_evt(hdev, skb);
5315		break;
5316
5317	case HCI_EV_NUM_COMP_PKTS:
5318		hci_num_comp_pkts_evt(hdev, skb);
5319		break;
5320
5321	case HCI_EV_MODE_CHANGE:
5322		hci_mode_change_evt(hdev, skb);
5323		break;
5324
5325	case HCI_EV_PIN_CODE_REQ:
5326		hci_pin_code_request_evt(hdev, skb);
5327		break;
5328
5329	case HCI_EV_LINK_KEY_REQ:
5330		hci_link_key_request_evt(hdev, skb);
5331		break;
5332
5333	case HCI_EV_LINK_KEY_NOTIFY:
5334		hci_link_key_notify_evt(hdev, skb);
5335		break;
5336
5337	case HCI_EV_CLOCK_OFFSET:
5338		hci_clock_offset_evt(hdev, skb);
5339		break;
5340
5341	case HCI_EV_PKT_TYPE_CHANGE:
5342		hci_pkt_type_change_evt(hdev, skb);
5343		break;
5344
5345	case HCI_EV_PSCAN_REP_MODE:
5346		hci_pscan_rep_mode_evt(hdev, skb);
5347		break;
5348
5349	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5350		hci_inquiry_result_with_rssi_evt(hdev, skb);
5351		break;
5352
5353	case HCI_EV_REMOTE_EXT_FEATURES:
5354		hci_remote_ext_features_evt(hdev, skb);
5355		break;
5356
5357	case HCI_EV_SYNC_CONN_COMPLETE:
5358		hci_sync_conn_complete_evt(hdev, skb);
5359		break;
5360
5361	case HCI_EV_EXTENDED_INQUIRY_RESULT:
5362		hci_extended_inquiry_result_evt(hdev, skb);
5363		break;
5364
5365	case HCI_EV_KEY_REFRESH_COMPLETE:
5366		hci_key_refresh_complete_evt(hdev, skb);
5367		break;
5368
5369	case HCI_EV_IO_CAPA_REQUEST:
5370		hci_io_capa_request_evt(hdev, skb);
5371		break;
5372
5373	case HCI_EV_IO_CAPA_REPLY:
5374		hci_io_capa_reply_evt(hdev, skb);
5375		break;
5376
5377	case HCI_EV_USER_CONFIRM_REQUEST:
5378		hci_user_confirm_request_evt(hdev, skb);
5379		break;
5380
5381	case HCI_EV_USER_PASSKEY_REQUEST:
5382		hci_user_passkey_request_evt(hdev, skb);
5383		break;
5384
5385	case HCI_EV_USER_PASSKEY_NOTIFY:
5386		hci_user_passkey_notify_evt(hdev, skb);
5387		break;
5388
5389	case HCI_EV_KEYPRESS_NOTIFY:
5390		hci_keypress_notify_evt(hdev, skb);
5391		break;
5392
5393	case HCI_EV_SIMPLE_PAIR_COMPLETE:
5394		hci_simple_pair_complete_evt(hdev, skb);
5395		break;
5396
5397	case HCI_EV_REMOTE_HOST_FEATURES:
5398		hci_remote_host_features_evt(hdev, skb);
5399		break;
5400
5401	case HCI_EV_LE_META:
5402		hci_le_meta_evt(hdev, skb);
5403		break;
5404
5405	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5406		hci_remote_oob_data_request_evt(hdev, skb);
5407		break;
5408
5409#if IS_ENABLED(CONFIG_BT_HS)
5410	case HCI_EV_CHANNEL_SELECTED:
5411		hci_chan_selected_evt(hdev, skb);
5412		break;
5413
5414	case HCI_EV_PHY_LINK_COMPLETE:
5415		hci_phy_link_complete_evt(hdev, skb);
5416		break;
5417
5418	case HCI_EV_LOGICAL_LINK_COMPLETE:
5419		hci_loglink_complete_evt(hdev, skb);
5420		break;
5421
5422	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5423		hci_disconn_loglink_complete_evt(hdev, skb);
5424		break;
5425
5426	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5427		hci_disconn_phylink_complete_evt(hdev, skb);
5428		break;
5429#endif
5430
5431	case HCI_EV_NUM_COMP_BLOCKS:
5432		hci_num_comp_blocks_evt(hdev, skb);
5433		break;
5434
5435	default:
5436		BT_DBG("%s event 0x%2.2x", hdev->name, event);
5437		break;
5438	}
5439
5440	if (req_complete) {
5441		req_complete(hdev, status, opcode);
5442	} else if (req_complete_skb) {
5443		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5444			kfree_skb(orig_skb);
5445			orig_skb = NULL;
5446		}
5447		req_complete_skb(hdev, status, opcode, orig_skb);
5448	}
5449
5450	kfree_skb(orig_skb);
5451	kfree_skb(skb);
5452	hdev->stat.evt_rx++;
5453}