Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
   4   Copyright 2023 NXP
   5
   6   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   7
   8   This program is free software; you can redistribute it and/or modify
   9   it under the terms of the GNU General Public License version 2 as
  10   published by the Free Software Foundation;
  11
  12   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  13   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  15   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  16   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  17   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  18   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  19   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  20
  21   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  22   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  23   SOFTWARE IS DISCLAIMED.
  24*/
  25
  26/* Bluetooth HCI event handling. */
  27
  28#include <asm/unaligned.h>
  29#include <linux/crypto.h>
  30#include <crypto/algapi.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "hci_request.h"
  37#include "hci_debugfs.h"
  38#include "hci_codec.h"
  39#include "a2mp.h"
  40#include "amp.h"
  41#include "smp.h"
  42#include "msft.h"
  43#include "eir.h"
  44
  45#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
  46		 "\x00\x00\x00\x00\x00\x00\x00\x00"
  47
  48#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
  49
  50/* Handle HCI Event packets */
  51
  52static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
  53			     u8 ev, size_t len)
  54{
  55	void *data;
  56
  57	data = skb_pull_data(skb, len);
  58	if (!data)
  59		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
  60
  61	return data;
  62}
  63
  64static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
  65			     u16 op, size_t len)
  66{
  67	void *data;
  68
  69	data = skb_pull_data(skb, len);
  70	if (!data)
  71		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
  72
  73	return data;
  74}
  75
  76static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
  77				u8 ev, size_t len)
  78{
  79	void *data;
  80
  81	data = skb_pull_data(skb, len);
  82	if (!data)
  83		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
  84
  85	return data;
  86}
  87
  88static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
  89				struct sk_buff *skb)
  90{
  91	struct hci_ev_status *rp = data;
  92
  93	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
  94
  95	/* It is possible that we receive Inquiry Complete event right
  96	 * before we receive Inquiry Cancel Command Complete event, in
  97	 * which case the latter event should have status of Command
  98	 * Disallowed (0x0c). This should not be treated as error, since
  99	 * we actually achieve what Inquiry Cancel wants to achieve,
 100	 * which is to end the last Inquiry session.
 101	 */
 102	if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
 103		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
 104		rp->status = 0x00;
 105	}
 106
 107	if (rp->status)
 108		return rp->status;
 109
 110	clear_bit(HCI_INQUIRY, &hdev->flags);
 111	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
 112	wake_up_bit(&hdev->flags, HCI_INQUIRY);
 113
 114	hci_dev_lock(hdev);
 115	/* Set discovery state to stopped if we're not doing LE active
 116	 * scanning.
 117	 */
 118	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
 119	    hdev->le_scan_type != LE_SCAN_ACTIVE)
 120		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 121	hci_dev_unlock(hdev);
 122
 123	hci_conn_check_pending(hdev);
 124
 125	return rp->status;
 126}
 127
 128static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
 129			      struct sk_buff *skb)
 130{
 131	struct hci_ev_status *rp = data;
 132
 133	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 134
 135	if (rp->status)
 136		return rp->status;
 137
 138	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
 139
 140	return rp->status;
 141}
 142
 143static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
 144				   struct sk_buff *skb)
 145{
 146	struct hci_ev_status *rp = data;
 147
 148	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 149
 150	if (rp->status)
 151		return rp->status;
 152
 153	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
 154
 155	hci_conn_check_pending(hdev);
 156
 157	return rp->status;
 158}
 159
 160static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
 161					struct sk_buff *skb)
 162{
 163	struct hci_ev_status *rp = data;
 164
 165	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 166
 167	return rp->status;
 168}
 169
 170static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
 171				struct sk_buff *skb)
 172{
 173	struct hci_rp_role_discovery *rp = data;
 174	struct hci_conn *conn;
 175
 176	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 177
 178	if (rp->status)
 179		return rp->status;
 180
 181	hci_dev_lock(hdev);
 182
 183	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 184	if (conn)
 185		conn->role = rp->role;
 186
 187	hci_dev_unlock(hdev);
 188
 189	return rp->status;
 190}
 191
 192static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
 193				  struct sk_buff *skb)
 194{
 195	struct hci_rp_read_link_policy *rp = data;
 196	struct hci_conn *conn;
 197
 198	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 199
 200	if (rp->status)
 201		return rp->status;
 202
 203	hci_dev_lock(hdev);
 204
 205	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 206	if (conn)
 207		conn->link_policy = __le16_to_cpu(rp->policy);
 208
 209	hci_dev_unlock(hdev);
 210
 211	return rp->status;
 212}
 213
 214static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
 215				   struct sk_buff *skb)
 216{
 217	struct hci_rp_write_link_policy *rp = data;
 218	struct hci_conn *conn;
 219	void *sent;
 220
 221	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 222
 223	if (rp->status)
 224		return rp->status;
 225
 226	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
 227	if (!sent)
 228		return rp->status;
 229
 230	hci_dev_lock(hdev);
 231
 232	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 233	if (conn)
 234		conn->link_policy = get_unaligned_le16(sent + 2);
 235
 236	hci_dev_unlock(hdev);
 237
 238	return rp->status;
 239}
 240
 241static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
 242				      struct sk_buff *skb)
 243{
 244	struct hci_rp_read_def_link_policy *rp = data;
 245
 246	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 247
 248	if (rp->status)
 249		return rp->status;
 250
 251	hdev->link_policy = __le16_to_cpu(rp->policy);
 252
 253	return rp->status;
 254}
 255
 256static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
 257				       struct sk_buff *skb)
 258{
 259	struct hci_ev_status *rp = data;
 260	void *sent;
 261
 262	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 263
 264	if (rp->status)
 265		return rp->status;
 266
 267	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
 268	if (!sent)
 269		return rp->status;
 270
 271	hdev->link_policy = get_unaligned_le16(sent);
 272
 273	return rp->status;
 274}
 275
 276static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
 277{
 278	struct hci_ev_status *rp = data;
 279
 280	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 281
 282	clear_bit(HCI_RESET, &hdev->flags);
 283
 284	if (rp->status)
 285		return rp->status;
 286
 287	/* Reset all non-persistent flags */
 288	hci_dev_clear_volatile_flags(hdev);
 289
 290	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 291
 292	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
 293	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 294
 295	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
 296	hdev->adv_data_len = 0;
 297
 298	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
 299	hdev->scan_rsp_data_len = 0;
 300
 301	hdev->le_scan_type = LE_SCAN_PASSIVE;
 302
 303	hdev->ssp_debug_mode = 0;
 304
 305	hci_bdaddr_list_clear(&hdev->le_accept_list);
 306	hci_bdaddr_list_clear(&hdev->le_resolv_list);
 307
 308	return rp->status;
 309}
 310
 311static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
 312				      struct sk_buff *skb)
 313{
 314	struct hci_rp_read_stored_link_key *rp = data;
 315	struct hci_cp_read_stored_link_key *sent;
 316
 317	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 318
 319	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
 320	if (!sent)
 321		return rp->status;
 322
 323	if (!rp->status && sent->read_all == 0x01) {
 324		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
 325		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
 326	}
 327
 328	return rp->status;
 329}
 330
 331static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
 332					struct sk_buff *skb)
 333{
 334	struct hci_rp_delete_stored_link_key *rp = data;
 335	u16 num_keys;
 336
 337	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 338
 339	if (rp->status)
 340		return rp->status;
 341
 342	num_keys = le16_to_cpu(rp->num_keys);
 343
 344	if (num_keys <= hdev->stored_num_keys)
 345		hdev->stored_num_keys -= num_keys;
 346	else
 347		hdev->stored_num_keys = 0;
 348
 349	return rp->status;
 350}
 351
 352static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
 353				  struct sk_buff *skb)
 354{
 355	struct hci_ev_status *rp = data;
 356	void *sent;
 357
 358	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 359
 360	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
 361	if (!sent)
 362		return rp->status;
 363
 364	hci_dev_lock(hdev);
 365
 366	if (hci_dev_test_flag(hdev, HCI_MGMT))
 367		mgmt_set_local_name_complete(hdev, sent, rp->status);
 368	else if (!rp->status)
 369		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
 370
 371	hci_dev_unlock(hdev);
 372
 373	return rp->status;
 374}
 375
 376static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
 377				 struct sk_buff *skb)
 378{
 379	struct hci_rp_read_local_name *rp = data;
 380
 381	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 382
 383	if (rp->status)
 384		return rp->status;
 385
 386	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 387	    hci_dev_test_flag(hdev, HCI_CONFIG))
 388		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
 389
 390	return rp->status;
 391}
 392
 393static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
 394				   struct sk_buff *skb)
 395{
 396	struct hci_ev_status *rp = data;
 397	void *sent;
 398
 399	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 400
 401	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
 402	if (!sent)
 403		return rp->status;
 404
 405	hci_dev_lock(hdev);
 406
 407	if (!rp->status) {
 408		__u8 param = *((__u8 *) sent);
 409
 410		if (param == AUTH_ENABLED)
 411			set_bit(HCI_AUTH, &hdev->flags);
 412		else
 413			clear_bit(HCI_AUTH, &hdev->flags);
 414	}
 415
 416	if (hci_dev_test_flag(hdev, HCI_MGMT))
 417		mgmt_auth_enable_complete(hdev, rp->status);
 418
 419	hci_dev_unlock(hdev);
 420
 421	return rp->status;
 422}
 423
 424static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
 425				    struct sk_buff *skb)
 426{
 427	struct hci_ev_status *rp = data;
 428	__u8 param;
 429	void *sent;
 430
 431	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 432
 433	if (rp->status)
 434		return rp->status;
 435
 436	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
 437	if (!sent)
 438		return rp->status;
 439
 440	param = *((__u8 *) sent);
 441
 442	if (param)
 443		set_bit(HCI_ENCRYPT, &hdev->flags);
 444	else
 445		clear_bit(HCI_ENCRYPT, &hdev->flags);
 446
 447	return rp->status;
 448}
 449
 450static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
 451				   struct sk_buff *skb)
 452{
 453	struct hci_ev_status *rp = data;
 454	__u8 param;
 455	void *sent;
 456
 457	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 458
 459	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
 460	if (!sent)
 461		return rp->status;
 462
 463	param = *((__u8 *) sent);
 464
 465	hci_dev_lock(hdev);
 466
 467	if (rp->status) {
 468		hdev->discov_timeout = 0;
 469		goto done;
 470	}
 471
 472	if (param & SCAN_INQUIRY)
 473		set_bit(HCI_ISCAN, &hdev->flags);
 474	else
 475		clear_bit(HCI_ISCAN, &hdev->flags);
 476
 477	if (param & SCAN_PAGE)
 478		set_bit(HCI_PSCAN, &hdev->flags);
 479	else
 480		clear_bit(HCI_PSCAN, &hdev->flags);
 481
 482done:
 483	hci_dev_unlock(hdev);
 484
 485	return rp->status;
 486}
 487
 488static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
 489				  struct sk_buff *skb)
 490{
 491	struct hci_ev_status *rp = data;
 492	struct hci_cp_set_event_filter *cp;
 493	void *sent;
 494
 495	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 496
 497	if (rp->status)
 498		return rp->status;
 499
 500	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
 501	if (!sent)
 502		return rp->status;
 503
 504	cp = (struct hci_cp_set_event_filter *)sent;
 505
 506	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
 507		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
 508	else
 509		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
 510
 511	return rp->status;
 512}
 513
 514static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
 515				   struct sk_buff *skb)
 516{
 517	struct hci_rp_read_class_of_dev *rp = data;
 518
 519	if (WARN_ON(!hdev))
 520		return HCI_ERROR_UNSPECIFIED;
 521
 522	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 523
 524	if (rp->status)
 525		return rp->status;
 526
 527	memcpy(hdev->dev_class, rp->dev_class, 3);
 528
 529	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
 530		   hdev->dev_class[1], hdev->dev_class[0]);
 531
 532	return rp->status;
 533}
 534
 535static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
 536				    struct sk_buff *skb)
 537{
 538	struct hci_ev_status *rp = data;
 539	void *sent;
 540
 541	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 542
 543	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
 544	if (!sent)
 545		return rp->status;
 546
 547	hci_dev_lock(hdev);
 548
 549	if (!rp->status)
 550		memcpy(hdev->dev_class, sent, 3);
 551
 552	if (hci_dev_test_flag(hdev, HCI_MGMT))
 553		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
 554
 555	hci_dev_unlock(hdev);
 556
 557	return rp->status;
 558}
 559
 560static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
 561				    struct sk_buff *skb)
 562{
 563	struct hci_rp_read_voice_setting *rp = data;
 564	__u16 setting;
 565
 566	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 567
 568	if (rp->status)
 569		return rp->status;
 570
 571	setting = __le16_to_cpu(rp->voice_setting);
 572
 573	if (hdev->voice_setting == setting)
 574		return rp->status;
 575
 576	hdev->voice_setting = setting;
 577
 578	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
 579
 580	if (hdev->notify)
 581		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 582
 583	return rp->status;
 584}
 585
 586static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
 587				     struct sk_buff *skb)
 588{
 589	struct hci_ev_status *rp = data;
 590	__u16 setting;
 591	void *sent;
 592
 593	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 594
 595	if (rp->status)
 596		return rp->status;
 597
 598	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
 599	if (!sent)
 600		return rp->status;
 601
 602	setting = get_unaligned_le16(sent);
 603
 604	if (hdev->voice_setting == setting)
 605		return rp->status;
 606
 607	hdev->voice_setting = setting;
 608
 609	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
 610
 611	if (hdev->notify)
 612		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 613
 614	return rp->status;
 615}
 616
 617static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
 618					struct sk_buff *skb)
 619{
 620	struct hci_rp_read_num_supported_iac *rp = data;
 621
 622	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 623
 624	if (rp->status)
 625		return rp->status;
 626
 627	hdev->num_iac = rp->num_iac;
 628
 629	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
 630
 631	return rp->status;
 632}
 633
 634static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
 635				struct sk_buff *skb)
 636{
 637	struct hci_ev_status *rp = data;
 638	struct hci_cp_write_ssp_mode *sent;
 639
 640	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 641
 642	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
 643	if (!sent)
 644		return rp->status;
 645
 646	hci_dev_lock(hdev);
 647
 648	if (!rp->status) {
 649		if (sent->mode)
 650			hdev->features[1][0] |= LMP_HOST_SSP;
 651		else
 652			hdev->features[1][0] &= ~LMP_HOST_SSP;
 653	}
 654
 655	if (!rp->status) {
 
 
 656		if (sent->mode)
 657			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
 658		else
 659			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
 660	}
 661
 662	hci_dev_unlock(hdev);
 663
 664	return rp->status;
 665}
 666
 667static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
 668				  struct sk_buff *skb)
 669{
 670	struct hci_ev_status *rp = data;
 671	struct hci_cp_write_sc_support *sent;
 672
 673	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 674
 675	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
 676	if (!sent)
 677		return rp->status;
 678
 679	hci_dev_lock(hdev);
 680
 681	if (!rp->status) {
 682		if (sent->support)
 683			hdev->features[1][0] |= LMP_HOST_SC;
 684		else
 685			hdev->features[1][0] &= ~LMP_HOST_SC;
 686	}
 687
 688	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
 689		if (sent->support)
 690			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
 691		else
 692			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
 693	}
 694
 695	hci_dev_unlock(hdev);
 696
 697	return rp->status;
 698}
 699
 700static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
 701				    struct sk_buff *skb)
 702{
 703	struct hci_rp_read_local_version *rp = data;
 704
 705	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 706
 707	if (rp->status)
 708		return rp->status;
 709
 710	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 711	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
 712		hdev->hci_ver = rp->hci_ver;
 713		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
 714		hdev->lmp_ver = rp->lmp_ver;
 715		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
 716		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
 717	}
 718
 719	return rp->status;
 720}
 721
 722static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
 723				   struct sk_buff *skb)
 724{
 725	struct hci_rp_read_enc_key_size *rp = data;
 726	struct hci_conn *conn;
 727	u16 handle;
 728	u8 status = rp->status;
 729
 730	bt_dev_dbg(hdev, "status 0x%2.2x", status);
 731
 732	handle = le16_to_cpu(rp->handle);
 733
 734	hci_dev_lock(hdev);
 735
 736	conn = hci_conn_hash_lookup_handle(hdev, handle);
 737	if (!conn) {
 738		status = 0xFF;
 739		goto done;
 740	}
 741
 742	/* While unexpected, the read_enc_key_size command may fail. The most
 743	 * secure approach is to then assume the key size is 0 to force a
 744	 * disconnection.
 745	 */
 746	if (status) {
 747		bt_dev_err(hdev, "failed to read key size for handle %u",
 748			   handle);
 749		conn->enc_key_size = 0;
 750	} else {
 751		conn->enc_key_size = rp->key_size;
 752		status = 0;
 753
 754		if (conn->enc_key_size < hdev->min_enc_key_size) {
 755			/* As slave role, the conn->state has been set to
 756			 * BT_CONNECTED and l2cap conn req might not be received
 757			 * yet, at this moment the l2cap layer almost does
 758			 * nothing with the non-zero status.
 759			 * So we also clear encrypt related bits, and then the
 760			 * handler of l2cap conn req will get the right secure
 761			 * state at a later time.
 762			 */
 763			status = HCI_ERROR_AUTH_FAILURE;
 764			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
 765			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
 766		}
 767	}
 768
 769	hci_encrypt_cfm(conn, status);
 770
 771done:
 772	hci_dev_unlock(hdev);
 773
 774	return status;
 775}
 776
 777static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
 778				     struct sk_buff *skb)
 779{
 780	struct hci_rp_read_local_commands *rp = data;
 781
 782	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 783
 784	if (rp->status)
 785		return rp->status;
 786
 787	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 788	    hci_dev_test_flag(hdev, HCI_CONFIG))
 789		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
 790
 791	return rp->status;
 792}
 793
 794static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
 795					   struct sk_buff *skb)
 796{
 797	struct hci_rp_read_auth_payload_to *rp = data;
 798	struct hci_conn *conn;
 799
 800	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 801
 802	if (rp->status)
 803		return rp->status;
 804
 805	hci_dev_lock(hdev);
 806
 807	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 808	if (conn)
 809		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
 810
 811	hci_dev_unlock(hdev);
 812
 813	return rp->status;
 814}
 815
 816static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
 817					    struct sk_buff *skb)
 818{
 819	struct hci_rp_write_auth_payload_to *rp = data;
 820	struct hci_conn *conn;
 821	void *sent;
 822
 823	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 824
 825	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
 826	if (!sent)
 827		return rp->status;
 828
 829	hci_dev_lock(hdev);
 830
 831	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 832	if (!conn) {
 833		rp->status = 0xff;
 834		goto unlock;
 835	}
 836
 837	if (!rp->status)
 838		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
 839
 840unlock:
 841	hci_dev_unlock(hdev);
 842
 843	return rp->status;
 844}
 845
 846static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
 847				     struct sk_buff *skb)
 848{
 849	struct hci_rp_read_local_features *rp = data;
 850
 851	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 852
 853	if (rp->status)
 854		return rp->status;
 855
 856	memcpy(hdev->features, rp->features, 8);
 857
 858	/* Adjust default settings according to features
 859	 * supported by device. */
 860
 861	if (hdev->features[0][0] & LMP_3SLOT)
 862		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
 863
 864	if (hdev->features[0][0] & LMP_5SLOT)
 865		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
 866
 867	if (hdev->features[0][1] & LMP_HV2) {
 868		hdev->pkt_type  |= (HCI_HV2);
 869		hdev->esco_type |= (ESCO_HV2);
 870	}
 871
 872	if (hdev->features[0][1] & LMP_HV3) {
 873		hdev->pkt_type  |= (HCI_HV3);
 874		hdev->esco_type |= (ESCO_HV3);
 875	}
 876
 877	if (lmp_esco_capable(hdev))
 878		hdev->esco_type |= (ESCO_EV3);
 879
 880	if (hdev->features[0][4] & LMP_EV4)
 881		hdev->esco_type |= (ESCO_EV4);
 882
 883	if (hdev->features[0][4] & LMP_EV5)
 884		hdev->esco_type |= (ESCO_EV5);
 885
 886	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
 887		hdev->esco_type |= (ESCO_2EV3);
 888
 889	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
 890		hdev->esco_type |= (ESCO_3EV3);
 891
 892	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
 893		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
 894
 895	return rp->status;
 896}
 897
 898static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
 899					 struct sk_buff *skb)
 900{
 901	struct hci_rp_read_local_ext_features *rp = data;
 902
 903	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 904
 905	if (rp->status)
 906		return rp->status;
 907
 908	if (hdev->max_page < rp->max_page) {
 909		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
 910			     &hdev->quirks))
 911			bt_dev_warn(hdev, "broken local ext features page 2");
 912		else
 913			hdev->max_page = rp->max_page;
 914	}
 915
 916	if (rp->page < HCI_MAX_PAGES)
 917		memcpy(hdev->features[rp->page], rp->features, 8);
 918
 919	return rp->status;
 920}
 921
 922static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
 923					struct sk_buff *skb)
 924{
 925	struct hci_rp_read_flow_control_mode *rp = data;
 926
 927	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 928
 929	if (rp->status)
 930		return rp->status;
 931
 932	hdev->flow_ctl_mode = rp->mode;
 933
 934	return rp->status;
 935}
 936
 937static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
 938				  struct sk_buff *skb)
 939{
 940	struct hci_rp_read_buffer_size *rp = data;
 941
 942	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 943
 944	if (rp->status)
 945		return rp->status;
 946
 947	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
 948	hdev->sco_mtu  = rp->sco_mtu;
 949	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
 950	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
 951
 952	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
 953		hdev->sco_mtu  = 64;
 954		hdev->sco_pkts = 8;
 955	}
 956
 957	hdev->acl_cnt = hdev->acl_pkts;
 958	hdev->sco_cnt = hdev->sco_pkts;
 959
 960	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
 961	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
 962
 963	return rp->status;
 964}
 965
 966static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
 967			      struct sk_buff *skb)
 968{
 969	struct hci_rp_read_bd_addr *rp = data;
 970
 971	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 972
 973	if (rp->status)
 974		return rp->status;
 975
 976	if (test_bit(HCI_INIT, &hdev->flags))
 977		bacpy(&hdev->bdaddr, &rp->bdaddr);
 978
 979	if (hci_dev_test_flag(hdev, HCI_SETUP))
 980		bacpy(&hdev->setup_addr, &rp->bdaddr);
 981
 982	return rp->status;
 983}
 984
 985static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
 986					 struct sk_buff *skb)
 987{
 988	struct hci_rp_read_local_pairing_opts *rp = data;
 989
 990	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 991
 992	if (rp->status)
 993		return rp->status;
 994
 995	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 996	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
 997		hdev->pairing_opts = rp->pairing_opts;
 998		hdev->max_enc_key_size = rp->max_key_size;
 999	}
1000
1001	return rp->status;
1002}
1003
1004static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1005					 struct sk_buff *skb)
1006{
1007	struct hci_rp_read_page_scan_activity *rp = data;
1008
1009	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1010
1011	if (rp->status)
1012		return rp->status;
1013
1014	if (test_bit(HCI_INIT, &hdev->flags)) {
1015		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1016		hdev->page_scan_window = __le16_to_cpu(rp->window);
1017	}
1018
1019	return rp->status;
1020}
1021
1022static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1023					  struct sk_buff *skb)
1024{
1025	struct hci_ev_status *rp = data;
1026	struct hci_cp_write_page_scan_activity *sent;
1027
1028	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1029
1030	if (rp->status)
1031		return rp->status;
1032
1033	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1034	if (!sent)
1035		return rp->status;
1036
1037	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1038	hdev->page_scan_window = __le16_to_cpu(sent->window);
1039
1040	return rp->status;
1041}
1042
1043static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1044				     struct sk_buff *skb)
1045{
1046	struct hci_rp_read_page_scan_type *rp = data;
1047
1048	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1049
1050	if (rp->status)
1051		return rp->status;
1052
1053	if (test_bit(HCI_INIT, &hdev->flags))
1054		hdev->page_scan_type = rp->type;
1055
1056	return rp->status;
1057}
1058
1059static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1060				      struct sk_buff *skb)
1061{
1062	struct hci_ev_status *rp = data;
1063	u8 *type;
1064
1065	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1066
1067	if (rp->status)
1068		return rp->status;
1069
1070	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1071	if (type)
1072		hdev->page_scan_type = *type;
1073
1074	return rp->status;
1075}
1076
1077static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1078				      struct sk_buff *skb)
1079{
1080	struct hci_rp_read_data_block_size *rp = data;
1081
1082	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1083
1084	if (rp->status)
1085		return rp->status;
1086
1087	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1088	hdev->block_len = __le16_to_cpu(rp->block_len);
1089	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1090
1091	hdev->block_cnt = hdev->num_blocks;
1092
1093	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1094	       hdev->block_cnt, hdev->block_len);
1095
1096	return rp->status;
1097}
1098
1099static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1100			    struct sk_buff *skb)
1101{
1102	struct hci_rp_read_clock *rp = data;
1103	struct hci_cp_read_clock *cp;
1104	struct hci_conn *conn;
1105
1106	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
 
1107
1108	if (rp->status)
1109		return rp->status;
1110
1111	hci_dev_lock(hdev);
1112
1113	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1114	if (!cp)
1115		goto unlock;
1116
1117	if (cp->which == 0x00) {
1118		hdev->clock = le32_to_cpu(rp->clock);
1119		goto unlock;
1120	}
1121
1122	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1123	if (conn) {
1124		conn->clock = le32_to_cpu(rp->clock);
1125		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1126	}
1127
1128unlock:
1129	hci_dev_unlock(hdev);
1130	return rp->status;
1131}
1132
1133static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1134				     struct sk_buff *skb)
1135{
1136	struct hci_rp_read_local_amp_info *rp = data;
1137
1138	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1139
1140	if (rp->status)
1141		return rp->status;
1142
1143	hdev->amp_status = rp->amp_status;
1144	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1145	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1146	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1147	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1148	hdev->amp_type = rp->amp_type;
1149	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1150	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1151	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1152	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1153
1154	return rp->status;
1155}
1156
1157static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1158				       struct sk_buff *skb)
1159{
1160	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1161
1162	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1163
1164	if (rp->status)
1165		return rp->status;
1166
1167	hdev->inq_tx_power = rp->tx_power;
1168
1169	return rp->status;
1170}
1171
1172static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1173					     struct sk_buff *skb)
1174{
1175	struct hci_rp_read_def_err_data_reporting *rp = data;
1176
1177	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1178
1179	if (rp->status)
1180		return rp->status;
1181
1182	hdev->err_data_reporting = rp->err_data_reporting;
1183
1184	return rp->status;
1185}
1186
1187static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1188					      struct sk_buff *skb)
1189{
1190	struct hci_ev_status *rp = data;
1191	struct hci_cp_write_def_err_data_reporting *cp;
1192
1193	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1194
1195	if (rp->status)
1196		return rp->status;
1197
1198	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1199	if (!cp)
1200		return rp->status;
1201
1202	hdev->err_data_reporting = cp->err_data_reporting;
1203
1204	return rp->status;
1205}
1206
1207static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1208				struct sk_buff *skb)
1209{
1210	struct hci_rp_pin_code_reply *rp = data;
1211	struct hci_cp_pin_code_reply *cp;
1212	struct hci_conn *conn;
1213
1214	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1215
1216	hci_dev_lock(hdev);
1217
1218	if (hci_dev_test_flag(hdev, HCI_MGMT))
1219		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1220
1221	if (rp->status)
1222		goto unlock;
1223
1224	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1225	if (!cp)
1226		goto unlock;
1227
1228	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1229	if (conn)
1230		conn->pin_length = cp->pin_len;
1231
1232unlock:
1233	hci_dev_unlock(hdev);
1234	return rp->status;
1235}
1236
1237static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1238				    struct sk_buff *skb)
1239{
1240	struct hci_rp_pin_code_neg_reply *rp = data;
1241
1242	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1243
1244	hci_dev_lock(hdev);
1245
1246	if (hci_dev_test_flag(hdev, HCI_MGMT))
1247		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1248						 rp->status);
1249
1250	hci_dev_unlock(hdev);
1251
1252	return rp->status;
1253}
1254
1255static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1256				     struct sk_buff *skb)
1257{
1258	struct hci_rp_le_read_buffer_size *rp = data;
1259
1260	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1261
1262	if (rp->status)
1263		return rp->status;
1264
1265	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1266	hdev->le_pkts = rp->le_max_pkt;
1267
1268	hdev->le_cnt = hdev->le_pkts;
1269
1270	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1271
1272	return rp->status;
1273}
1274
1275static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1276					struct sk_buff *skb)
1277{
1278	struct hci_rp_le_read_local_features *rp = data;
1279
1280	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1281
1282	if (rp->status)
1283		return rp->status;
1284
1285	memcpy(hdev->le_features, rp->features, 8);
1286
1287	return rp->status;
1288}
1289
1290static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1291				      struct sk_buff *skb)
1292{
1293	struct hci_rp_le_read_adv_tx_power *rp = data;
1294
1295	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1296
1297	if (rp->status)
1298		return rp->status;
1299
1300	hdev->adv_tx_power = rp->tx_power;
1301
1302	return rp->status;
1303}
1304
1305static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1306				    struct sk_buff *skb)
1307{
1308	struct hci_rp_user_confirm_reply *rp = data;
1309
1310	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1311
1312	hci_dev_lock(hdev);
1313
1314	if (hci_dev_test_flag(hdev, HCI_MGMT))
1315		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1316						 rp->status);
1317
1318	hci_dev_unlock(hdev);
1319
1320	return rp->status;
1321}
1322
1323static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1324					struct sk_buff *skb)
1325{
1326	struct hci_rp_user_confirm_reply *rp = data;
1327
1328	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1329
1330	hci_dev_lock(hdev);
1331
1332	if (hci_dev_test_flag(hdev, HCI_MGMT))
1333		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1334						     ACL_LINK, 0, rp->status);
1335
1336	hci_dev_unlock(hdev);
1337
1338	return rp->status;
1339}
1340
1341static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1342				    struct sk_buff *skb)
1343{
1344	struct hci_rp_user_confirm_reply *rp = data;
1345
1346	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1347
1348	hci_dev_lock(hdev);
1349
1350	if (hci_dev_test_flag(hdev, HCI_MGMT))
1351		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1352						 0, rp->status);
1353
1354	hci_dev_unlock(hdev);
1355
1356	return rp->status;
1357}
1358
1359static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1360					struct sk_buff *skb)
1361{
1362	struct hci_rp_user_confirm_reply *rp = data;
1363
1364	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1365
1366	hci_dev_lock(hdev);
1367
1368	if (hci_dev_test_flag(hdev, HCI_MGMT))
1369		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1370						     ACL_LINK, 0, rp->status);
1371
1372	hci_dev_unlock(hdev);
1373
1374	return rp->status;
1375}
1376
1377static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1378				     struct sk_buff *skb)
1379{
1380	struct hci_rp_read_local_oob_data *rp = data;
1381
1382	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1383
1384	return rp->status;
1385}
1386
1387static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1388					 struct sk_buff *skb)
1389{
1390	struct hci_rp_read_local_oob_ext_data *rp = data;
1391
1392	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1393
1394	return rp->status;
1395}
1396
1397static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1398				    struct sk_buff *skb)
1399{
1400	struct hci_ev_status *rp = data;
1401	bdaddr_t *sent;
1402
1403	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1404
1405	if (rp->status)
1406		return rp->status;
1407
1408	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1409	if (!sent)
1410		return rp->status;
1411
1412	hci_dev_lock(hdev);
1413
1414	bacpy(&hdev->random_addr, sent);
1415
1416	if (!bacmp(&hdev->rpa, sent)) {
1417		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1418		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1419				   secs_to_jiffies(hdev->rpa_timeout));
1420	}
1421
1422	hci_dev_unlock(hdev);
1423
1424	return rp->status;
1425}
1426
1427static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1428				    struct sk_buff *skb)
1429{
1430	struct hci_ev_status *rp = data;
1431	struct hci_cp_le_set_default_phy *cp;
1432
1433	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1434
1435	if (rp->status)
1436		return rp->status;
1437
1438	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1439	if (!cp)
1440		return rp->status;
1441
1442	hci_dev_lock(hdev);
1443
1444	hdev->le_tx_def_phys = cp->tx_phys;
1445	hdev->le_rx_def_phys = cp->rx_phys;
1446
1447	hci_dev_unlock(hdev);
1448
1449	return rp->status;
1450}
1451
1452static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1453					    struct sk_buff *skb)
1454{
1455	struct hci_ev_status *rp = data;
1456	struct hci_cp_le_set_adv_set_rand_addr *cp;
1457	struct adv_info *adv;
1458
1459	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1460
1461	if (rp->status)
1462		return rp->status;
1463
1464	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1465	/* Update only in case the adv instance since handle 0x00 shall be using
1466	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1467	 * non-extended adverting.
1468	 */
1469	if (!cp || !cp->handle)
1470		return rp->status;
1471
1472	hci_dev_lock(hdev);
1473
1474	adv = hci_find_adv_instance(hdev, cp->handle);
1475	if (adv) {
1476		bacpy(&adv->random_addr, &cp->bdaddr);
1477		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1478			adv->rpa_expired = false;
1479			queue_delayed_work(hdev->workqueue,
1480					   &adv->rpa_expired_cb,
1481					   secs_to_jiffies(hdev->rpa_timeout));
1482		}
1483	}
1484
1485	hci_dev_unlock(hdev);
1486
1487	return rp->status;
1488}
1489
1490static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1491				   struct sk_buff *skb)
1492{
1493	struct hci_ev_status *rp = data;
1494	u8 *instance;
1495	int err;
1496
1497	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1498
1499	if (rp->status)
1500		return rp->status;
1501
1502	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1503	if (!instance)
1504		return rp->status;
1505
1506	hci_dev_lock(hdev);
1507
1508	err = hci_remove_adv_instance(hdev, *instance);
1509	if (!err)
1510		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1511					 *instance);
1512
1513	hci_dev_unlock(hdev);
1514
1515	return rp->status;
1516}
1517
1518static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1519				   struct sk_buff *skb)
1520{
1521	struct hci_ev_status *rp = data;
1522	struct adv_info *adv, *n;
1523	int err;
1524
1525	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1526
1527	if (rp->status)
1528		return rp->status;
1529
1530	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1531		return rp->status;
1532
1533	hci_dev_lock(hdev);
1534
1535	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1536		u8 instance = adv->instance;
1537
1538		err = hci_remove_adv_instance(hdev, instance);
1539		if (!err)
1540			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1541						 hdev, instance);
1542	}
1543
1544	hci_dev_unlock(hdev);
1545
1546	return rp->status;
1547}
1548
1549static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1550					struct sk_buff *skb)
1551{
1552	struct hci_rp_le_read_transmit_power *rp = data;
1553
1554	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1555
1556	if (rp->status)
1557		return rp->status;
1558
1559	hdev->min_le_tx_power = rp->min_le_tx_power;
1560	hdev->max_le_tx_power = rp->max_le_tx_power;
1561
1562	return rp->status;
1563}
1564
1565static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1566				     struct sk_buff *skb)
1567{
1568	struct hci_ev_status *rp = data;
1569	struct hci_cp_le_set_privacy_mode *cp;
1570	struct hci_conn_params *params;
1571
1572	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1573
1574	if (rp->status)
1575		return rp->status;
1576
1577	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1578	if (!cp)
1579		return rp->status;
1580
1581	hci_dev_lock(hdev);
1582
1583	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1584	if (params)
1585		WRITE_ONCE(params->privacy_mode, cp->mode);
1586
1587	hci_dev_unlock(hdev);
1588
1589	return rp->status;
1590}
1591
1592static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1593				   struct sk_buff *skb)
1594{
1595	struct hci_ev_status *rp = data;
1596	__u8 *sent;
1597
1598	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1599
1600	if (rp->status)
1601		return rp->status;
1602
1603	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1604	if (!sent)
1605		return rp->status;
1606
1607	hci_dev_lock(hdev);
1608
1609	/* If we're doing connection initiation as peripheral. Set a
1610	 * timeout in case something goes wrong.
1611	 */
1612	if (*sent) {
1613		struct hci_conn *conn;
1614
1615		hci_dev_set_flag(hdev, HCI_LE_ADV);
1616
1617		conn = hci_lookup_le_connect(hdev);
1618		if (conn)
1619			queue_delayed_work(hdev->workqueue,
1620					   &conn->le_conn_timeout,
1621					   conn->conn_timeout);
1622	} else {
1623		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1624	}
1625
1626	hci_dev_unlock(hdev);
1627
1628	return rp->status;
1629}
1630
1631static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1632				       struct sk_buff *skb)
1633{
1634	struct hci_cp_le_set_ext_adv_enable *cp;
1635	struct hci_cp_ext_adv_set *set;
1636	struct adv_info *adv = NULL, *n;
1637	struct hci_ev_status *rp = data;
1638
1639	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1640
1641	if (rp->status)
1642		return rp->status;
1643
1644	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1645	if (!cp)
1646		return rp->status;
1647
1648	set = (void *)cp->data;
1649
1650	hci_dev_lock(hdev);
1651
1652	if (cp->num_of_sets)
1653		adv = hci_find_adv_instance(hdev, set->handle);
1654
1655	if (cp->enable) {
1656		struct hci_conn *conn;
1657
1658		hci_dev_set_flag(hdev, HCI_LE_ADV);
1659
1660		if (adv && !adv->periodic)
1661			adv->enabled = true;
1662
1663		conn = hci_lookup_le_connect(hdev);
1664		if (conn)
1665			queue_delayed_work(hdev->workqueue,
1666					   &conn->le_conn_timeout,
1667					   conn->conn_timeout);
1668	} else {
1669		if (cp->num_of_sets) {
1670			if (adv)
1671				adv->enabled = false;
1672
1673			/* If just one instance was disabled check if there are
1674			 * any other instance enabled before clearing HCI_LE_ADV
1675			 */
1676			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1677						 list) {
1678				if (adv->enabled)
1679					goto unlock;
1680			}
1681		} else {
1682			/* All instances shall be considered disabled */
1683			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1684						 list)
1685				adv->enabled = false;
1686		}
1687
1688		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1689	}
1690
1691unlock:
1692	hci_dev_unlock(hdev);
1693	return rp->status;
1694}
1695
1696static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1697				   struct sk_buff *skb)
1698{
1699	struct hci_cp_le_set_scan_param *cp;
1700	struct hci_ev_status *rp = data;
1701
1702	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1703
1704	if (rp->status)
1705		return rp->status;
1706
1707	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1708	if (!cp)
1709		return rp->status;
1710
1711	hci_dev_lock(hdev);
1712
1713	hdev->le_scan_type = cp->type;
1714
1715	hci_dev_unlock(hdev);
1716
1717	return rp->status;
1718}
1719
1720static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1721				       struct sk_buff *skb)
1722{
1723	struct hci_cp_le_set_ext_scan_params *cp;
1724	struct hci_ev_status *rp = data;
1725	struct hci_cp_le_scan_phy_params *phy_param;
1726
1727	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1728
1729	if (rp->status)
1730		return rp->status;
1731
1732	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1733	if (!cp)
1734		return rp->status;
1735
1736	phy_param = (void *)cp->data;
1737
1738	hci_dev_lock(hdev);
1739
1740	hdev->le_scan_type = phy_param->type;
1741
1742	hci_dev_unlock(hdev);
1743
1744	return rp->status;
1745}
1746
1747static bool has_pending_adv_report(struct hci_dev *hdev)
1748{
1749	struct discovery_state *d = &hdev->discovery;
1750
1751	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1752}
1753
1754static void clear_pending_adv_report(struct hci_dev *hdev)
1755{
1756	struct discovery_state *d = &hdev->discovery;
1757
1758	bacpy(&d->last_adv_addr, BDADDR_ANY);
1759	d->last_adv_data_len = 0;
1760}
1761
1762static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1763				     u8 bdaddr_type, s8 rssi, u32 flags,
1764				     u8 *data, u8 len)
1765{
1766	struct discovery_state *d = &hdev->discovery;
1767
1768	if (len > max_adv_len(hdev))
1769		return;
1770
1771	bacpy(&d->last_adv_addr, bdaddr);
1772	d->last_adv_addr_type = bdaddr_type;
1773	d->last_adv_rssi = rssi;
1774	d->last_adv_flags = flags;
1775	memcpy(d->last_adv_data, data, len);
1776	d->last_adv_data_len = len;
1777}
1778
1779static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
 
1780{
 
 
 
 
 
 
 
 
 
 
 
 
1781	hci_dev_lock(hdev);
1782
1783	switch (enable) {
1784	case LE_SCAN_ENABLE:
1785		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1786		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1787			clear_pending_adv_report(hdev);
1788		if (hci_dev_test_flag(hdev, HCI_MESH))
1789			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1790		break;
1791
1792	case LE_SCAN_DISABLE:
1793		/* We do this here instead of when setting DISCOVERY_STOPPED
1794		 * since the latter would potentially require waiting for
1795		 * inquiry to stop too.
1796		 */
1797		if (has_pending_adv_report(hdev)) {
1798			struct discovery_state *d = &hdev->discovery;
1799
1800			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1801					  d->last_adv_addr_type, NULL,
1802					  d->last_adv_rssi, d->last_adv_flags,
1803					  d->last_adv_data,
1804					  d->last_adv_data_len, NULL, 0, 0);
1805		}
1806
1807		/* Cancel this timer so that we don't try to disable scanning
1808		 * when it's already disabled.
1809		 */
1810		cancel_delayed_work(&hdev->le_scan_disable);
1811
1812		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1813
1814		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1815		 * interrupted scanning due to a connect request. Mark
1816		 * therefore discovery as stopped.
 
 
 
1817		 */
1818		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1819			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1820		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1821			 hdev->discovery.state == DISCOVERY_FINDING)
1822			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1823
1824		break;
1825
1826	default:
1827		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1828			   enable);
1829		break;
1830	}
1831
1832	hci_dev_unlock(hdev);
1833}
1834
1835static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1836				    struct sk_buff *skb)
1837{
1838	struct hci_cp_le_set_scan_enable *cp;
1839	struct hci_ev_status *rp = data;
1840
1841	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1842
1843	if (rp->status)
1844		return rp->status;
1845
1846	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1847	if (!cp)
1848		return rp->status;
1849
1850	le_set_scan_enable_complete(hdev, cp->enable);
1851
1852	return rp->status;
1853}
1854
1855static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1856					struct sk_buff *skb)
1857{
1858	struct hci_cp_le_set_ext_scan_enable *cp;
1859	struct hci_ev_status *rp = data;
1860
1861	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1862
1863	if (rp->status)
1864		return rp->status;
1865
1866	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1867	if (!cp)
1868		return rp->status;
1869
1870	le_set_scan_enable_complete(hdev, cp->enable);
1871
1872	return rp->status;
1873}
1874
1875static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1876				      struct sk_buff *skb)
1877{
1878	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1879
1880	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1881		   rp->num_of_sets);
1882
1883	if (rp->status)
1884		return rp->status;
1885
1886	hdev->le_num_of_adv_sets = rp->num_of_sets;
1887
1888	return rp->status;
1889}
1890
1891static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1892					  struct sk_buff *skb)
1893{
1894	struct hci_rp_le_read_accept_list_size *rp = data;
1895
1896	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1897
1898	if (rp->status)
1899		return rp->status;
1900
1901	hdev->le_accept_list_size = rp->size;
1902
1903	return rp->status;
1904}
1905
1906static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1907				      struct sk_buff *skb)
1908{
1909	struct hci_ev_status *rp = data;
1910
1911	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1912
1913	if (rp->status)
1914		return rp->status;
1915
1916	hci_dev_lock(hdev);
1917	hci_bdaddr_list_clear(&hdev->le_accept_list);
1918	hci_dev_unlock(hdev);
1919
1920	return rp->status;
1921}
1922
1923static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1924				       struct sk_buff *skb)
1925{
1926	struct hci_cp_le_add_to_accept_list *sent;
1927	struct hci_ev_status *rp = data;
1928
1929	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1930
1931	if (rp->status)
1932		return rp->status;
1933
1934	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1935	if (!sent)
1936		return rp->status;
1937
1938	hci_dev_lock(hdev);
1939	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1940			    sent->bdaddr_type);
1941	hci_dev_unlock(hdev);
1942
1943	return rp->status;
 
1944}
1945
1946static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1947					 struct sk_buff *skb)
1948{
1949	struct hci_cp_le_del_from_accept_list *sent;
1950	struct hci_ev_status *rp = data;
1951
1952	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1953
1954	if (rp->status)
1955		return rp->status;
1956
1957	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1958	if (!sent)
1959		return rp->status;
1960
1961	hci_dev_lock(hdev);
1962	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1963			    sent->bdaddr_type);
1964	hci_dev_unlock(hdev);
1965
1966	return rp->status;
1967}
1968
1969static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1970					  struct sk_buff *skb)
1971{
1972	struct hci_rp_le_read_supported_states *rp = data;
1973
1974	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1975
1976	if (rp->status)
1977		return rp->status;
1978
1979	memcpy(hdev->le_states, rp->le_states, 8);
1980
1981	return rp->status;
1982}
1983
1984static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1985				      struct sk_buff *skb)
1986{
1987	struct hci_rp_le_read_def_data_len *rp = data;
1988
1989	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1990
1991	if (rp->status)
1992		return rp->status;
1993
1994	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1995	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1996
1997	return rp->status;
1998}
1999
2000static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2001				       struct sk_buff *skb)
2002{
2003	struct hci_cp_le_write_def_data_len *sent;
2004	struct hci_ev_status *rp = data;
2005
2006	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2007
2008	if (rp->status)
2009		return rp->status;
2010
2011	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2012	if (!sent)
2013		return rp->status;
2014
2015	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2016	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2017
2018	return rp->status;
2019}
2020
2021static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2022				       struct sk_buff *skb)
2023{
2024	struct hci_cp_le_add_to_resolv_list *sent;
2025	struct hci_ev_status *rp = data;
2026
2027	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028
2029	if (rp->status)
2030		return rp->status;
2031
2032	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2033	if (!sent)
2034		return rp->status;
2035
2036	hci_dev_lock(hdev);
2037	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2038				sent->bdaddr_type, sent->peer_irk,
2039				sent->local_irk);
2040	hci_dev_unlock(hdev);
2041
2042	return rp->status;
2043}
2044
2045static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2046					 struct sk_buff *skb)
2047{
2048	struct hci_cp_le_del_from_resolv_list *sent;
2049	struct hci_ev_status *rp = data;
2050
2051	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2052
2053	if (rp->status)
2054		return rp->status;
2055
2056	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2057	if (!sent)
2058		return rp->status;
2059
2060	hci_dev_lock(hdev);
2061	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2062			    sent->bdaddr_type);
2063	hci_dev_unlock(hdev);
2064
2065	return rp->status;
2066}
2067
2068static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2069				      struct sk_buff *skb)
2070{
2071	struct hci_ev_status *rp = data;
2072
2073	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2074
2075	if (rp->status)
2076		return rp->status;
2077
2078	hci_dev_lock(hdev);
2079	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2080	hci_dev_unlock(hdev);
2081
2082	return rp->status;
2083}
2084
2085static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2086					  struct sk_buff *skb)
2087{
2088	struct hci_rp_le_read_resolv_list_size *rp = data;
2089
2090	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2091
2092	if (rp->status)
2093		return rp->status;
2094
2095	hdev->le_resolv_list_size = rp->size;
2096
2097	return rp->status;
2098}
2099
2100static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2101					       struct sk_buff *skb)
2102{
2103	struct hci_ev_status *rp = data;
2104	__u8 *sent;
2105
2106	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2107
2108	if (rp->status)
2109		return rp->status;
2110
2111	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2112	if (!sent)
2113		return rp->status;
2114
2115	hci_dev_lock(hdev);
2116
2117	if (*sent)
2118		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2119	else
2120		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2121
2122	hci_dev_unlock(hdev);
2123
2124	return rp->status;
2125}
2126
2127static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2128				      struct sk_buff *skb)
2129{
2130	struct hci_rp_le_read_max_data_len *rp = data;
2131
2132	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2133
2134	if (rp->status)
2135		return rp->status;
2136
2137	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2138	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2139	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2140	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2141
2142	return rp->status;
2143}
2144
2145static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2146					 struct sk_buff *skb)
2147{
2148	struct hci_cp_write_le_host_supported *sent;
2149	struct hci_ev_status *rp = data;
2150
2151	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2152
2153	if (rp->status)
2154		return rp->status;
2155
2156	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2157	if (!sent)
2158		return rp->status;
2159
2160	hci_dev_lock(hdev);
2161
2162	if (sent->le) {
2163		hdev->features[1][0] |= LMP_HOST_LE;
2164		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2165	} else {
2166		hdev->features[1][0] &= ~LMP_HOST_LE;
2167		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2168		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2169	}
2170
2171	if (sent->simul)
2172		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2173	else
2174		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2175
2176	hci_dev_unlock(hdev);
2177
2178	return rp->status;
2179}
2180
2181static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2182			       struct sk_buff *skb)
2183{
2184	struct hci_cp_le_set_adv_param *cp;
2185	struct hci_ev_status *rp = data;
2186
2187	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2188
2189	if (rp->status)
2190		return rp->status;
2191
2192	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2193	if (!cp)
2194		return rp->status;
2195
2196	hci_dev_lock(hdev);
2197	hdev->adv_addr_type = cp->own_address_type;
2198	hci_dev_unlock(hdev);
2199
2200	return rp->status;
2201}
2202
2203static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2204				   struct sk_buff *skb)
2205{
2206	struct hci_rp_le_set_ext_adv_params *rp = data;
2207	struct hci_cp_le_set_ext_adv_params *cp;
2208	struct adv_info *adv_instance;
2209
2210	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2211
2212	if (rp->status)
2213		return rp->status;
2214
2215	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2216	if (!cp)
2217		return rp->status;
2218
2219	hci_dev_lock(hdev);
2220	hdev->adv_addr_type = cp->own_addr_type;
2221	if (!cp->handle) {
2222		/* Store in hdev for instance 0 */
2223		hdev->adv_tx_power = rp->tx_power;
2224	} else {
2225		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2226		if (adv_instance)
2227			adv_instance->tx_power = rp->tx_power;
2228	}
2229	/* Update adv data as tx power is known now */
2230	hci_update_adv_data(hdev, cp->handle);
2231
2232	hci_dev_unlock(hdev);
2233
2234	return rp->status;
2235}
2236
2237static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2238			   struct sk_buff *skb)
2239{
2240	struct hci_rp_read_rssi *rp = data;
2241	struct hci_conn *conn;
2242
2243	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2244
2245	if (rp->status)
2246		return rp->status;
2247
2248	hci_dev_lock(hdev);
2249
2250	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2251	if (conn)
2252		conn->rssi = rp->rssi;
2253
2254	hci_dev_unlock(hdev);
2255
2256	return rp->status;
2257}
2258
2259static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2260			       struct sk_buff *skb)
2261{
2262	struct hci_cp_read_tx_power *sent;
2263	struct hci_rp_read_tx_power *rp = data;
2264	struct hci_conn *conn;
2265
2266	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2267
2268	if (rp->status)
2269		return rp->status;
2270
2271	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2272	if (!sent)
2273		return rp->status;
2274
2275	hci_dev_lock(hdev);
2276
2277	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2278	if (!conn)
2279		goto unlock;
2280
2281	switch (sent->type) {
2282	case 0x00:
2283		conn->tx_power = rp->tx_power;
2284		break;
2285	case 0x01:
2286		conn->max_tx_power = rp->tx_power;
2287		break;
2288	}
2289
2290unlock:
2291	hci_dev_unlock(hdev);
2292	return rp->status;
2293}
2294
2295static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2296				      struct sk_buff *skb)
2297{
2298	struct hci_ev_status *rp = data;
2299	u8 *mode;
2300
2301	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2302
2303	if (rp->status)
2304		return rp->status;
2305
2306	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2307	if (mode)
2308		hdev->ssp_debug_mode = *mode;
2309
2310	return rp->status;
2311}
2312
2313static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2314{
2315	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2316
2317	if (status) {
2318		hci_conn_check_pending(hdev);
2319		return;
2320	}
2321
2322	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2323		set_bit(HCI_INQUIRY, &hdev->flags);
2324}
2325
2326static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2327{
2328	struct hci_cp_create_conn *cp;
2329	struct hci_conn *conn;
2330
2331	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2332
2333	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2334	if (!cp)
2335		return;
2336
2337	hci_dev_lock(hdev);
2338
2339	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2340
2341	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2342
2343	if (status) {
2344		if (conn && conn->state == BT_CONNECT) {
2345			if (status != 0x0c || conn->attempt > 2) {
2346				conn->state = BT_CLOSED;
2347				hci_connect_cfm(conn, status);
2348				hci_conn_del(conn);
2349			} else
2350				conn->state = BT_CONNECT2;
2351		}
2352	} else {
2353		if (!conn) {
2354			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2355						  HCI_ROLE_MASTER);
2356			if (!conn)
2357				bt_dev_err(hdev, "no memory for new connection");
2358		}
2359	}
2360
2361	hci_dev_unlock(hdev);
2362}
2363
2364static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2365{
2366	struct hci_cp_add_sco *cp;
2367	struct hci_conn *acl;
2368	struct hci_link *link;
2369	__u16 handle;
2370
2371	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2372
2373	if (!status)
2374		return;
2375
2376	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2377	if (!cp)
2378		return;
2379
2380	handle = __le16_to_cpu(cp->handle);
2381
2382	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2383
2384	hci_dev_lock(hdev);
2385
2386	acl = hci_conn_hash_lookup_handle(hdev, handle);
2387	if (acl) {
2388		link = list_first_entry_or_null(&acl->link_list,
2389						struct hci_link, list);
2390		if (link && link->conn) {
2391			link->conn->state = BT_CLOSED;
2392
2393			hci_connect_cfm(link->conn, status);
2394			hci_conn_del(link->conn);
2395		}
2396	}
2397
2398	hci_dev_unlock(hdev);
2399}
2400
2401static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2402{
2403	struct hci_cp_auth_requested *cp;
2404	struct hci_conn *conn;
2405
2406	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2407
2408	if (!status)
2409		return;
2410
2411	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2412	if (!cp)
2413		return;
2414
2415	hci_dev_lock(hdev);
2416
2417	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2418	if (conn) {
2419		if (conn->state == BT_CONFIG) {
2420			hci_connect_cfm(conn, status);
2421			hci_conn_drop(conn);
2422		}
2423	}
2424
2425	hci_dev_unlock(hdev);
2426}
2427
2428static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2429{
2430	struct hci_cp_set_conn_encrypt *cp;
2431	struct hci_conn *conn;
2432
2433	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2434
2435	if (!status)
2436		return;
2437
2438	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2439	if (!cp)
2440		return;
2441
2442	hci_dev_lock(hdev);
2443
2444	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2445	if (conn) {
2446		if (conn->state == BT_CONFIG) {
2447			hci_connect_cfm(conn, status);
2448			hci_conn_drop(conn);
2449		}
2450	}
2451
2452	hci_dev_unlock(hdev);
2453}
2454
2455static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2456				    struct hci_conn *conn)
2457{
2458	if (conn->state != BT_CONFIG || !conn->out)
2459		return 0;
2460
2461	if (conn->pending_sec_level == BT_SECURITY_SDP)
2462		return 0;
2463
2464	/* Only request authentication for SSP connections or non-SSP
2465	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2466	 * is requested.
2467	 */
2468	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2469	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2470	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2471	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2472		return 0;
2473
2474	return 1;
2475}
2476
2477static int hci_resolve_name(struct hci_dev *hdev,
2478				   struct inquiry_entry *e)
2479{
2480	struct hci_cp_remote_name_req cp;
2481
2482	memset(&cp, 0, sizeof(cp));
2483
2484	bacpy(&cp.bdaddr, &e->data.bdaddr);
2485	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2486	cp.pscan_mode = e->data.pscan_mode;
2487	cp.clock_offset = e->data.clock_offset;
2488
2489	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2490}
2491
2492static bool hci_resolve_next_name(struct hci_dev *hdev)
2493{
2494	struct discovery_state *discov = &hdev->discovery;
2495	struct inquiry_entry *e;
2496
2497	if (list_empty(&discov->resolve))
2498		return false;
2499
2500	/* We should stop if we already spent too much time resolving names. */
2501	if (time_after(jiffies, discov->name_resolve_timeout)) {
2502		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2503		return false;
2504	}
2505
2506	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2507	if (!e)
2508		return false;
2509
2510	if (hci_resolve_name(hdev, e) == 0) {
2511		e->name_state = NAME_PENDING;
2512		return true;
2513	}
2514
2515	return false;
2516}
2517
2518static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2519				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2520{
2521	struct discovery_state *discov = &hdev->discovery;
2522	struct inquiry_entry *e;
2523
2524	/* Update the mgmt connected state if necessary. Be careful with
2525	 * conn objects that exist but are not (yet) connected however.
2526	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2527	 * considered connected.
2528	 */
2529	if (conn &&
2530	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2531	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2532		mgmt_device_connected(hdev, conn, name, name_len);
2533
2534	if (discov->state == DISCOVERY_STOPPED)
2535		return;
2536
2537	if (discov->state == DISCOVERY_STOPPING)
2538		goto discov_complete;
2539
2540	if (discov->state != DISCOVERY_RESOLVING)
2541		return;
2542
2543	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2544	/* If the device was not found in a list of found devices names of which
2545	 * are pending. there is no need to continue resolving a next name as it
2546	 * will be done upon receiving another Remote Name Request Complete
2547	 * Event */
2548	if (!e)
2549		return;
2550
2551	list_del(&e->list);
2552
2553	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2554	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2555			 name, name_len);
 
 
 
2556
2557	if (hci_resolve_next_name(hdev))
2558		return;
2559
2560discov_complete:
2561	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2562}
2563
2564static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2565{
2566	struct hci_cp_remote_name_req *cp;
2567	struct hci_conn *conn;
2568
2569	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2570
2571	/* If successful wait for the name req complete event before
2572	 * checking for the need to do authentication */
2573	if (!status)
2574		return;
2575
2576	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2577	if (!cp)
2578		return;
2579
2580	hci_dev_lock(hdev);
2581
2582	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2583
2584	if (hci_dev_test_flag(hdev, HCI_MGMT))
2585		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2586
2587	if (!conn)
2588		goto unlock;
2589
2590	if (!hci_outgoing_auth_needed(hdev, conn))
2591		goto unlock;
2592
2593	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2594		struct hci_cp_auth_requested auth_cp;
2595
2596		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2597
2598		auth_cp.handle = __cpu_to_le16(conn->handle);
2599		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2600			     sizeof(auth_cp), &auth_cp);
2601	}
2602
2603unlock:
2604	hci_dev_unlock(hdev);
2605}
2606
2607static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2608{
2609	struct hci_cp_read_remote_features *cp;
2610	struct hci_conn *conn;
2611
2612	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2613
2614	if (!status)
2615		return;
2616
2617	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2618	if (!cp)
2619		return;
2620
2621	hci_dev_lock(hdev);
2622
2623	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2624	if (conn) {
2625		if (conn->state == BT_CONFIG) {
2626			hci_connect_cfm(conn, status);
2627			hci_conn_drop(conn);
2628		}
2629	}
2630
2631	hci_dev_unlock(hdev);
2632}
2633
2634static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2635{
2636	struct hci_cp_read_remote_ext_features *cp;
2637	struct hci_conn *conn;
2638
2639	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2640
2641	if (!status)
2642		return;
2643
2644	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2645	if (!cp)
2646		return;
2647
2648	hci_dev_lock(hdev);
2649
2650	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2651	if (conn) {
2652		if (conn->state == BT_CONFIG) {
2653			hci_connect_cfm(conn, status);
2654			hci_conn_drop(conn);
2655		}
2656	}
2657
2658	hci_dev_unlock(hdev);
2659}
2660
2661static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2662				       __u8 status)
2663{
2664	struct hci_conn *acl;
2665	struct hci_link *link;
2666
2667	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2668
2669	hci_dev_lock(hdev);
2670
2671	acl = hci_conn_hash_lookup_handle(hdev, handle);
2672	if (acl) {
2673		link = list_first_entry_or_null(&acl->link_list,
2674						struct hci_link, list);
2675		if (link && link->conn) {
2676			link->conn->state = BT_CLOSED;
2677
2678			hci_connect_cfm(link->conn, status);
2679			hci_conn_del(link->conn);
2680		}
2681	}
2682
2683	hci_dev_unlock(hdev);
2684}
2685
2686static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2687{
2688	struct hci_cp_setup_sync_conn *cp;
 
 
2689
2690	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2691
2692	if (!status)
2693		return;
2694
2695	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2696	if (!cp)
2697		return;
2698
2699	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2700}
2701
2702static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2703{
2704	struct hci_cp_enhanced_setup_sync_conn *cp;
2705
2706	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2707
2708	if (!status)
2709		return;
 
 
 
2710
2711	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2712	if (!cp)
2713		return;
 
2714
2715	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2716}
2717
2718static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2719{
2720	struct hci_cp_sniff_mode *cp;
2721	struct hci_conn *conn;
2722
2723	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2724
2725	if (!status)
2726		return;
2727
2728	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2729	if (!cp)
2730		return;
2731
2732	hci_dev_lock(hdev);
2733
2734	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2735	if (conn) {
2736		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2737
2738		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2739			hci_sco_setup(conn, status);
2740	}
2741
2742	hci_dev_unlock(hdev);
2743}
2744
2745static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2746{
2747	struct hci_cp_exit_sniff_mode *cp;
2748	struct hci_conn *conn;
2749
2750	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2751
2752	if (!status)
2753		return;
2754
2755	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2756	if (!cp)
2757		return;
2758
2759	hci_dev_lock(hdev);
2760
2761	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2762	if (conn) {
2763		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2764
2765		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2766			hci_sco_setup(conn, status);
2767	}
2768
2769	hci_dev_unlock(hdev);
2770}
2771
2772static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2773{
2774	struct hci_cp_disconnect *cp;
2775	struct hci_conn_params *params;
2776	struct hci_conn *conn;
2777	bool mgmt_conn;
2778
2779	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2780
2781	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2782	 * otherwise cleanup the connection immediately.
2783	 */
2784	if (!status && !hdev->suspended)
2785		return;
2786
2787	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2788	if (!cp)
2789		return;
2790
2791	hci_dev_lock(hdev);
2792
2793	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2794	if (!conn)
2795		goto unlock;
2796
2797	if (status) {
2798		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2799				       conn->dst_type, status);
2800
2801		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2802			hdev->cur_adv_instance = conn->adv_instance;
2803			hci_enable_advertising(hdev);
2804		}
2805
2806		/* Inform sockets conn is gone before we delete it */
2807		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2808
2809		goto done;
2810	}
2811
2812	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2813
2814	if (conn->type == ACL_LINK) {
2815		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2816			hci_remove_link_key(hdev, &conn->dst);
2817	}
2818
2819	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2820	if (params) {
2821		switch (params->auto_connect) {
2822		case HCI_AUTO_CONN_LINK_LOSS:
2823			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2824				break;
2825			fallthrough;
2826
2827		case HCI_AUTO_CONN_DIRECT:
2828		case HCI_AUTO_CONN_ALWAYS:
2829			hci_pend_le_list_del_init(params);
2830			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2831			break;
2832
2833		default:
2834			break;
2835		}
2836	}
2837
2838	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2839				 cp->reason, mgmt_conn);
2840
2841	hci_disconn_cfm(conn, cp->reason);
2842
2843done:
2844	/* If the disconnection failed for any reason, the upper layer
2845	 * does not retry to disconnect in current implementation.
2846	 * Hence, we need to do some basic cleanup here and re-enable
2847	 * advertising if necessary.
2848	 */
2849	hci_conn_del(conn);
2850unlock:
2851	hci_dev_unlock(hdev);
2852}
2853
2854static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2855{
2856	/* When using controller based address resolution, then the new
2857	 * address types 0x02 and 0x03 are used. These types need to be
2858	 * converted back into either public address or random address type
2859	 */
2860	switch (type) {
2861	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2862		if (resolved)
2863			*resolved = true;
2864		return ADDR_LE_DEV_PUBLIC;
2865	case ADDR_LE_DEV_RANDOM_RESOLVED:
2866		if (resolved)
2867			*resolved = true;
2868		return ADDR_LE_DEV_RANDOM;
2869	}
2870
2871	if (resolved)
2872		*resolved = false;
2873	return type;
2874}
2875
2876static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2877			      u8 peer_addr_type, u8 own_address_type,
2878			      u8 filter_policy)
2879{
2880	struct hci_conn *conn;
2881
2882	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2883				       peer_addr_type);
2884	if (!conn)
2885		return;
2886
2887	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2888
2889	/* Store the initiator and responder address information which
2890	 * is needed for SMP. These values will not change during the
2891	 * lifetime of the connection.
2892	 */
2893	conn->init_addr_type = own_address_type;
2894	if (own_address_type == ADDR_LE_DEV_RANDOM)
2895		bacpy(&conn->init_addr, &hdev->random_addr);
2896	else
2897		bacpy(&conn->init_addr, &hdev->bdaddr);
2898
2899	conn->resp_addr_type = peer_addr_type;
2900	bacpy(&conn->resp_addr, peer_addr);
2901}
2902
2903static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2904{
2905	struct hci_cp_le_create_conn *cp;
 
2906
2907	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2908
2909	/* All connection failure handling is taken care of by the
2910	 * hci_conn_failed function which is triggered by the HCI
2911	 * request completion callbacks used for connecting.
2912	 */
2913	if (status)
2914		return;
2915
2916	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2917	if (!cp)
2918		return;
2919
2920	hci_dev_lock(hdev);
2921
2922	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2923			  cp->own_address_type, cp->filter_policy);
2924
2925	hci_dev_unlock(hdev);
2926}
2927
2928static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2929{
2930	struct hci_cp_le_ext_create_conn *cp;
2931
2932	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2933
2934	/* All connection failure handling is taken care of by the
2935	 * hci_conn_failed function which is triggered by the HCI
2936	 * request completion callbacks used for connecting.
2937	 */
2938	if (status)
2939		return;
2940
2941	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2942	if (!cp)
2943		return;
2944
2945	hci_dev_lock(hdev);
 
2946
2947	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2948			  cp->own_addr_type, cp->filter_policy);
 
 
 
 
 
 
 
2949
 
2950	hci_dev_unlock(hdev);
2951}
2952
2953static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2954{
2955	struct hci_cp_le_read_remote_features *cp;
2956	struct hci_conn *conn;
2957
2958	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2959
2960	if (!status)
2961		return;
2962
2963	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2964	if (!cp)
2965		return;
2966
2967	hci_dev_lock(hdev);
2968
2969	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2970	if (conn) {
2971		if (conn->state == BT_CONFIG) {
2972			hci_connect_cfm(conn, status);
2973			hci_conn_drop(conn);
2974		}
2975	}
2976
2977	hci_dev_unlock(hdev);
2978}
2979
2980static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2981{
2982	struct hci_cp_le_start_enc *cp;
2983	struct hci_conn *conn;
2984
2985	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2986
2987	if (!status)
2988		return;
2989
2990	hci_dev_lock(hdev);
2991
2992	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2993	if (!cp)
2994		goto unlock;
2995
2996	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2997	if (!conn)
2998		goto unlock;
2999
3000	if (conn->state != BT_CONNECTED)
3001		goto unlock;
3002
3003	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3004	hci_conn_drop(conn);
3005
3006unlock:
3007	hci_dev_unlock(hdev);
3008}
3009
3010static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3011{
3012	struct hci_cp_switch_role *cp;
3013	struct hci_conn *conn;
3014
3015	BT_DBG("%s status 0x%2.2x", hdev->name, status);
3016
3017	if (!status)
3018		return;
3019
3020	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3021	if (!cp)
3022		return;
3023
3024	hci_dev_lock(hdev);
3025
3026	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3027	if (conn)
3028		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3029
3030	hci_dev_unlock(hdev);
3031}
3032
3033static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3034				     struct sk_buff *skb)
3035{
3036	struct hci_ev_status *ev = data;
3037	struct discovery_state *discov = &hdev->discovery;
3038	struct inquiry_entry *e;
3039
3040	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3041
3042	hci_conn_check_pending(hdev);
3043
3044	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3045		return;
3046
3047	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3048	wake_up_bit(&hdev->flags, HCI_INQUIRY);
3049
3050	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3051		return;
3052
3053	hci_dev_lock(hdev);
3054
3055	if (discov->state != DISCOVERY_FINDING)
3056		goto unlock;
3057
3058	if (list_empty(&discov->resolve)) {
3059		/* When BR/EDR inquiry is active and no LE scanning is in
3060		 * progress, then change discovery state to indicate completion.
3061		 *
3062		 * When running LE scanning and BR/EDR inquiry simultaneously
3063		 * and the LE scan already finished, then change the discovery
3064		 * state to indicate completion.
3065		 */
3066		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3067		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3068			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3069		goto unlock;
3070	}
3071
3072	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3073	if (e && hci_resolve_name(hdev, e) == 0) {
3074		e->name_state = NAME_PENDING;
3075		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3076		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3077	} else {
3078		/* When BR/EDR inquiry is active and no LE scanning is in
3079		 * progress, then change discovery state to indicate completion.
3080		 *
3081		 * When running LE scanning and BR/EDR inquiry simultaneously
3082		 * and the LE scan already finished, then change the discovery
3083		 * state to indicate completion.
3084		 */
3085		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3086		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3087			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3088	}
3089
3090unlock:
3091	hci_dev_unlock(hdev);
3092}
3093
3094static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3095				   struct sk_buff *skb)
3096{
3097	struct hci_ev_inquiry_result *ev = edata;
3098	struct inquiry_data data;
3099	int i;
3100
3101	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3102			     flex_array_size(ev, info, ev->num)))
3103		return;
3104
3105	bt_dev_dbg(hdev, "num %d", ev->num);
3106
3107	if (!ev->num)
3108		return;
3109
3110	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3111		return;
3112
3113	hci_dev_lock(hdev);
3114
3115	for (i = 0; i < ev->num; i++) {
3116		struct inquiry_info *info = &ev->info[i];
3117		u32 flags;
3118
3119		bacpy(&data.bdaddr, &info->bdaddr);
3120		data.pscan_rep_mode	= info->pscan_rep_mode;
3121		data.pscan_period_mode	= info->pscan_period_mode;
3122		data.pscan_mode		= info->pscan_mode;
3123		memcpy(data.dev_class, info->dev_class, 3);
3124		data.clock_offset	= info->clock_offset;
3125		data.rssi		= HCI_RSSI_INVALID;
3126		data.ssp_mode		= 0x00;
3127
3128		flags = hci_inquiry_cache_update(hdev, &data, false);
3129
3130		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3131				  info->dev_class, HCI_RSSI_INVALID,
3132				  flags, NULL, 0, NULL, 0, 0);
3133	}
3134
3135	hci_dev_unlock(hdev);
3136}
3137
3138static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3139				  struct sk_buff *skb)
3140{
3141	struct hci_ev_conn_complete *ev = data;
3142	struct hci_conn *conn;
3143	u8 status = ev->status;
3144
3145	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3146
3147	hci_dev_lock(hdev);
3148
3149	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3150	if (!conn) {
3151		/* In case of error status and there is no connection pending
3152		 * just unlock as there is nothing to cleanup.
3153		 */
3154		if (ev->status)
3155			goto unlock;
3156
3157		/* Connection may not exist if auto-connected. Check the bredr
3158		 * allowlist to see if this device is allowed to auto connect.
3159		 * If link is an ACL type, create a connection class
3160		 * automatically.
3161		 *
3162		 * Auto-connect will only occur if the event filter is
3163		 * programmed with a given address. Right now, event filter is
3164		 * only used during suspend.
3165		 */
3166		if (ev->link_type == ACL_LINK &&
3167		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3168						      &ev->bdaddr,
3169						      BDADDR_BREDR)) {
3170			conn = hci_conn_add_unset(hdev, ev->link_type,
3171						  &ev->bdaddr, HCI_ROLE_SLAVE);
3172			if (!conn) {
3173				bt_dev_err(hdev, "no memory for new conn");
3174				goto unlock;
3175			}
3176		} else {
3177			if (ev->link_type != SCO_LINK)
3178				goto unlock;
3179
3180			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3181						       &ev->bdaddr);
3182			if (!conn)
3183				goto unlock;
3184
3185			conn->type = SCO_LINK;
3186		}
3187	}
3188
3189	/* The HCI_Connection_Complete event is only sent once per connection.
3190	 * Processing it more than once per connection can corrupt kernel memory.
3191	 *
3192	 * As the connection handle is set here for the first time, it indicates
3193	 * whether the connection is already set up.
3194	 */
3195	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3196		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3197		goto unlock;
3198	}
3199
3200	if (!status) {
3201		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3202		if (status)
3203			goto done;
3204
3205		if (conn->type == ACL_LINK) {
3206			conn->state = BT_CONFIG;
3207			hci_conn_hold(conn);
3208
3209			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3210			    !hci_find_link_key(hdev, &ev->bdaddr))
3211				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3212			else
3213				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3214		} else
3215			conn->state = BT_CONNECTED;
3216
3217		hci_debugfs_create_conn(conn);
3218		hci_conn_add_sysfs(conn);
3219
3220		if (test_bit(HCI_AUTH, &hdev->flags))
3221			set_bit(HCI_CONN_AUTH, &conn->flags);
3222
3223		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3224			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3225
3226		/* Get remote features */
3227		if (conn->type == ACL_LINK) {
3228			struct hci_cp_read_remote_features cp;
3229			cp.handle = ev->handle;
3230			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3231				     sizeof(cp), &cp);
3232
3233			hci_update_scan(hdev);
3234		}
3235
3236		/* Set packet type for incoming connection */
3237		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3238			struct hci_cp_change_conn_ptype cp;
3239			cp.handle = ev->handle;
3240			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3241			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3242				     &cp);
3243		}
 
 
 
 
 
3244	}
3245
3246	if (conn->type == ACL_LINK)
3247		hci_sco_setup(conn, ev->status);
3248
3249done:
3250	if (status) {
3251		hci_conn_failed(conn, status);
3252	} else if (ev->link_type == SCO_LINK) {
3253		switch (conn->setting & SCO_AIRMODE_MASK) {
3254		case SCO_AIRMODE_CVSD:
3255			if (hdev->notify)
3256				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3257			break;
3258		}
3259
3260		hci_connect_cfm(conn, status);
3261	}
3262
3263unlock:
3264	hci_dev_unlock(hdev);
3265
3266	hci_conn_check_pending(hdev);
3267}
3268
3269static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3270{
3271	struct hci_cp_reject_conn_req cp;
3272
3273	bacpy(&cp.bdaddr, bdaddr);
3274	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3275	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3276}
3277
3278static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3279				 struct sk_buff *skb)
3280{
3281	struct hci_ev_conn_request *ev = data;
3282	int mask = hdev->link_mode;
3283	struct inquiry_entry *ie;
3284	struct hci_conn *conn;
3285	__u8 flags = 0;
3286
3287	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3288
3289	/* Reject incoming connection from device with same BD ADDR against
3290	 * CVE-2020-26555
3291	 */
3292	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3293		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3294			   &ev->bdaddr);
3295		hci_reject_conn(hdev, &ev->bdaddr);
3296		return;
3297	}
3298
3299	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3300				      &flags);
3301
3302	if (!(mask & HCI_LM_ACCEPT)) {
3303		hci_reject_conn(hdev, &ev->bdaddr);
3304		return;
3305	}
3306
3307	hci_dev_lock(hdev);
3308
3309	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3310				   BDADDR_BREDR)) {
3311		hci_reject_conn(hdev, &ev->bdaddr);
3312		goto unlock;
3313	}
3314
3315	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3316	 * connection. These features are only touched through mgmt so
3317	 * only do the checks if HCI_MGMT is set.
3318	 */
3319	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3320	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3321	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3322					       BDADDR_BREDR)) {
3323		hci_reject_conn(hdev, &ev->bdaddr);
3324		goto unlock;
3325	}
3326
3327	/* Connection accepted */
3328
 
 
3329	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3330	if (ie)
3331		memcpy(ie->data.dev_class, ev->dev_class, 3);
3332
3333	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3334			&ev->bdaddr);
3335	if (!conn) {
3336		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3337					  HCI_ROLE_SLAVE);
3338		if (!conn) {
3339			bt_dev_err(hdev, "no memory for new connection");
3340			goto unlock;
 
3341		}
3342	}
3343
3344	memcpy(conn->dev_class, ev->dev_class, 3);
3345
3346	hci_dev_unlock(hdev);
3347
3348	if (ev->link_type == ACL_LINK ||
3349	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3350		struct hci_cp_accept_conn_req cp;
3351		conn->state = BT_CONNECT;
3352
3353		bacpy(&cp.bdaddr, &ev->bdaddr);
3354
3355		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3356			cp.role = 0x00; /* Become central */
3357		else
3358			cp.role = 0x01; /* Remain peripheral */
3359
3360		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3361	} else if (!(flags & HCI_PROTO_DEFER)) {
3362		struct hci_cp_accept_sync_conn_req cp;
3363		conn->state = BT_CONNECT;
3364
3365		bacpy(&cp.bdaddr, &ev->bdaddr);
3366		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3367
3368		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3369		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3370		cp.max_latency    = cpu_to_le16(0xffff);
3371		cp.content_format = cpu_to_le16(hdev->voice_setting);
3372		cp.retrans_effort = 0xff;
3373
3374		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3375			     &cp);
3376	} else {
3377		conn->state = BT_CONNECT2;
3378		hci_connect_cfm(conn, 0);
3379	}
3380
3381	return;
3382unlock:
3383	hci_dev_unlock(hdev);
3384}
3385
3386static u8 hci_to_mgmt_reason(u8 err)
3387{
3388	switch (err) {
3389	case HCI_ERROR_CONNECTION_TIMEOUT:
3390		return MGMT_DEV_DISCONN_TIMEOUT;
3391	case HCI_ERROR_REMOTE_USER_TERM:
3392	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3393	case HCI_ERROR_REMOTE_POWER_OFF:
3394		return MGMT_DEV_DISCONN_REMOTE;
3395	case HCI_ERROR_LOCAL_HOST_TERM:
3396		return MGMT_DEV_DISCONN_LOCAL_HOST;
3397	default:
3398		return MGMT_DEV_DISCONN_UNKNOWN;
3399	}
3400}
3401
3402static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3403				     struct sk_buff *skb)
3404{
3405	struct hci_ev_disconn_complete *ev = data;
3406	u8 reason;
3407	struct hci_conn_params *params;
3408	struct hci_conn *conn;
3409	bool mgmt_connected;
 
3410
3411	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3412
3413	hci_dev_lock(hdev);
3414
3415	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3416	if (!conn)
3417		goto unlock;
3418
3419	if (ev->status) {
3420		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3421				       conn->dst_type, ev->status);
3422		goto unlock;
3423	}
3424
3425	conn->state = BT_CLOSED;
3426
3427	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3428
3429	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3430		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3431	else
3432		reason = hci_to_mgmt_reason(ev->reason);
3433
3434	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3435				reason, mgmt_connected);
3436
3437	if (conn->type == ACL_LINK) {
3438		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3439			hci_remove_link_key(hdev, &conn->dst);
3440
3441		hci_update_scan(hdev);
3442	}
3443
3444	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3445	if (params) {
3446		switch (params->auto_connect) {
3447		case HCI_AUTO_CONN_LINK_LOSS:
3448			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3449				break;
3450			fallthrough;
3451
3452		case HCI_AUTO_CONN_DIRECT:
3453		case HCI_AUTO_CONN_ALWAYS:
3454			hci_pend_le_list_del_init(params);
3455			hci_pend_le_list_add(params, &hdev->pend_le_conns);
3456			hci_update_passive_scan(hdev);
3457			break;
3458
3459		default:
3460			break;
3461		}
3462	}
3463
 
 
3464	hci_disconn_cfm(conn, ev->reason);
 
3465
3466	/* Re-enable advertising if necessary, since it might
3467	 * have been disabled by the connection. From the
3468	 * HCI_LE_Set_Advertise_Enable command description in
3469	 * the core specification (v4.0):
3470	 * "The Controller shall continue advertising until the Host
3471	 * issues an LE_Set_Advertise_Enable command with
3472	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3473	 * or until a connection is created or until the Advertising
3474	 * is timed out due to Directed Advertising."
3475	 */
3476	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3477		hdev->cur_adv_instance = conn->adv_instance;
3478		hci_enable_advertising(hdev);
3479	}
3480
3481	hci_conn_del(conn);
3482
3483unlock:
3484	hci_dev_unlock(hdev);
3485}
3486
3487static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3488				  struct sk_buff *skb)
3489{
3490	struct hci_ev_auth_complete *ev = data;
3491	struct hci_conn *conn;
3492
3493	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3494
3495	hci_dev_lock(hdev);
3496
3497	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3498	if (!conn)
3499		goto unlock;
3500
3501	if (!ev->status) {
3502		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3503		set_bit(HCI_CONN_AUTH, &conn->flags);
3504		conn->sec_level = conn->pending_sec_level;
 
 
 
 
 
 
3505	} else {
3506		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3507			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3508
3509		mgmt_auth_failed(conn, ev->status);
3510	}
3511
3512	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
 
3513
3514	if (conn->state == BT_CONFIG) {
3515		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3516			struct hci_cp_set_conn_encrypt cp;
3517			cp.handle  = ev->handle;
3518			cp.encrypt = 0x01;
3519			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3520				     &cp);
3521		} else {
3522			conn->state = BT_CONNECTED;
3523			hci_connect_cfm(conn, ev->status);
3524			hci_conn_drop(conn);
3525		}
3526	} else {
3527		hci_auth_cfm(conn, ev->status);
3528
3529		hci_conn_hold(conn);
3530		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3531		hci_conn_drop(conn);
3532	}
3533
3534	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3535		if (!ev->status) {
3536			struct hci_cp_set_conn_encrypt cp;
3537			cp.handle  = ev->handle;
3538			cp.encrypt = 0x01;
3539			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3540				     &cp);
3541		} else {
3542			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3543			hci_encrypt_cfm(conn, ev->status);
3544		}
3545	}
3546
3547unlock:
3548	hci_dev_unlock(hdev);
3549}
3550
3551static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3552				struct sk_buff *skb)
3553{
3554	struct hci_ev_remote_name *ev = data;
3555	struct hci_conn *conn;
3556
3557	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3558
3559	hci_conn_check_pending(hdev);
3560
3561	hci_dev_lock(hdev);
3562
3563	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3564
3565	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3566		goto check_auth;
3567
3568	if (ev->status == 0)
3569		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3570				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3571	else
3572		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3573
3574check_auth:
3575	if (!conn)
3576		goto unlock;
3577
3578	if (!hci_outgoing_auth_needed(hdev, conn))
3579		goto unlock;
3580
3581	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3582		struct hci_cp_auth_requested cp;
3583
3584		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3585
3586		cp.handle = __cpu_to_le16(conn->handle);
3587		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3588	}
3589
3590unlock:
3591	hci_dev_unlock(hdev);
3592}
3593
3594static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3595				   struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3596{
3597	struct hci_ev_encrypt_change *ev = data;
3598	struct hci_conn *conn;
3599
3600	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3601
3602	hci_dev_lock(hdev);
3603
3604	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3605	if (!conn)
3606		goto unlock;
3607
3608	if (!ev->status) {
3609		if (ev->encrypt) {
3610			/* Encryption implies authentication */
3611			set_bit(HCI_CONN_AUTH, &conn->flags);
3612			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3613			conn->sec_level = conn->pending_sec_level;
3614
3615			/* P-256 authentication key implies FIPS */
3616			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3617				set_bit(HCI_CONN_FIPS, &conn->flags);
3618
3619			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3620			    conn->type == LE_LINK)
3621				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3622		} else {
3623			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3624			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3625		}
3626	}
3627
3628	/* We should disregard the current RPA and generate a new one
3629	 * whenever the encryption procedure fails.
3630	 */
3631	if (ev->status && conn->type == LE_LINK) {
3632		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3633		hci_adv_instances_set_rpa_expired(hdev, true);
3634	}
3635
3636	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3637
3638	/* Check link security requirements are met */
3639	if (!hci_conn_check_link_mode(conn))
3640		ev->status = HCI_ERROR_AUTH_FAILURE;
3641
3642	if (ev->status && conn->state == BT_CONNECTED) {
3643		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3644			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3645
3646		/* Notify upper layers so they can cleanup before
3647		 * disconnecting.
3648		 */
3649		hci_encrypt_cfm(conn, ev->status);
3650		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3651		hci_conn_drop(conn);
3652		goto unlock;
3653	}
3654
 
 
 
 
 
 
 
 
 
 
 
 
3655	/* Try reading the encryption key size for encrypted ACL links */
3656	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3657		struct hci_cp_read_enc_key_size cp;
 
3658
3659		/* Only send HCI_Read_Encryption_Key_Size if the
3660		 * controller really supports it. If it doesn't, assume
3661		 * the default size (16).
3662		 */
3663		if (!(hdev->commands[20] & 0x10)) {
3664			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3665			goto notify;
3666		}
3667
 
 
3668		cp.handle = cpu_to_le16(conn->handle);
3669		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3670				 sizeof(cp), &cp)) {
 
3671			bt_dev_err(hdev, "sending read key size failed");
3672			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3673			goto notify;
3674		}
3675
3676		goto unlock;
3677	}
3678
3679	/* Set the default Authenticated Payload Timeout after
3680	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3681	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3682	 * sent when the link is active and Encryption is enabled, the conn
3683	 * type can be either LE or ACL and controller must support LMP Ping.
3684	 * Ensure for AES-CCM encryption as well.
3685	 */
3686	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3687	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3688	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3689	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3690		struct hci_cp_write_auth_payload_to cp;
3691
3692		cp.handle = cpu_to_le16(conn->handle);
3693		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3694		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3695				 sizeof(cp), &cp))
3696			bt_dev_err(hdev, "write auth payload timeout failed");
3697	}
3698
3699notify:
3700	hci_encrypt_cfm(conn, ev->status);
 
 
 
 
 
 
 
3701
3702unlock:
3703	hci_dev_unlock(hdev);
3704}
3705
3706static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3707					     struct sk_buff *skb)
3708{
3709	struct hci_ev_change_link_key_complete *ev = data;
3710	struct hci_conn *conn;
3711
3712	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3713
3714	hci_dev_lock(hdev);
3715
3716	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3717	if (conn) {
3718		if (!ev->status)
3719			set_bit(HCI_CONN_SECURE, &conn->flags);
3720
3721		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3722
3723		hci_key_change_cfm(conn, ev->status);
3724	}
3725
3726	hci_dev_unlock(hdev);
3727}
3728
3729static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3730				    struct sk_buff *skb)
3731{
3732	struct hci_ev_remote_features *ev = data;
3733	struct hci_conn *conn;
3734
3735	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3736
3737	hci_dev_lock(hdev);
3738
3739	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3740	if (!conn)
3741		goto unlock;
3742
3743	if (!ev->status)
3744		memcpy(conn->features[0], ev->features, 8);
3745
3746	if (conn->state != BT_CONFIG)
3747		goto unlock;
3748
3749	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3750	    lmp_ext_feat_capable(conn)) {
3751		struct hci_cp_read_remote_ext_features cp;
3752		cp.handle = ev->handle;
3753		cp.page = 0x01;
3754		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3755			     sizeof(cp), &cp);
3756		goto unlock;
3757	}
3758
3759	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3760		struct hci_cp_remote_name_req cp;
3761		memset(&cp, 0, sizeof(cp));
3762		bacpy(&cp.bdaddr, &conn->dst);
3763		cp.pscan_rep_mode = 0x02;
3764		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3765	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3766		mgmt_device_connected(hdev, conn, NULL, 0);
3767
3768	if (!hci_outgoing_auth_needed(hdev, conn)) {
3769		conn->state = BT_CONNECTED;
3770		hci_connect_cfm(conn, ev->status);
3771		hci_conn_drop(conn);
3772	}
3773
3774unlock:
3775	hci_dev_unlock(hdev);
3776}
3777
3778static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
 
 
 
3779{
3780	cancel_delayed_work(&hdev->cmd_timer);
3781
3782	rcu_read_lock();
3783	if (!test_bit(HCI_RESET, &hdev->flags)) {
3784		if (ncmd) {
3785			cancel_delayed_work(&hdev->ncmd_timer);
3786			atomic_set(&hdev->cmd_cnt, 1);
3787		} else {
3788			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3789				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3790						   HCI_NCMD_TIMEOUT);
3791		}
3792	}
3793	rcu_read_unlock();
3794}
3795
3796static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3797					struct sk_buff *skb)
3798{
3799	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3800
3801	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
 
3802
3803	if (rp->status)
3804		return rp->status;
 
3805
3806	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3807	hdev->le_pkts  = rp->acl_max_pkt;
3808	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3809	hdev->iso_pkts = rp->iso_max_pkt;
3810
3811	hdev->le_cnt  = hdev->le_pkts;
3812	hdev->iso_cnt = hdev->iso_pkts;
 
3813
3814	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3815	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
 
3816
3817	return rp->status;
3818}
 
3819
3820static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3821{
3822	struct hci_conn *conn, *tmp;
3823
3824	lockdep_assert_held(&hdev->lock);
 
 
3825
3826	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3827		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3828		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3829			continue;
3830
3831		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3832			hci_conn_failed(conn, status);
3833	}
3834}
3835
3836static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3837				   struct sk_buff *skb)
3838{
3839	struct hci_rp_le_set_cig_params *rp = data;
3840	struct hci_cp_le_set_cig_params *cp;
3841	struct hci_conn *conn;
3842	u8 status = rp->status;
3843	bool pending = false;
3844	int i;
3845
3846	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
3847
3848	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3849	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3850			    rp->cig_id != cp->cig_id)) {
3851		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3852		status = HCI_ERROR_UNSPECIFIED;
3853	}
3854
3855	hci_dev_lock(hdev);
 
 
3856
3857	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3858	 *
3859	 * If the Status return parameter is non-zero, then the state of the CIG
3860	 * and its CIS configurations shall not be changed by the command. If
3861	 * the CIG did not already exist, it shall not be created.
3862	 */
3863	if (status) {
3864		/* Keep current configuration, fail only the unbound CIS */
3865		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3866		goto unlock;
3867	}
3868
3869	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3870	 *
3871	 * If the Status return parameter is zero, then the Controller shall
3872	 * set the Connection_Handle arrayed return parameter to the connection
3873	 * handle(s) corresponding to the CIS configurations specified in
3874	 * the CIS_IDs command parameter, in the same order.
3875	 */
3876	for (i = 0; i < rp->num_handles; ++i) {
3877		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3878						cp->cis[i].cis_id);
3879		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3880			continue;
3881
3882		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3883			continue;
 
3884
3885		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3886			continue;
 
3887
3888		if (conn->state == BT_CONNECT)
3889			pending = true;
3890	}
3891
3892unlock:
3893	if (pending)
3894		hci_le_create_cis_pending(hdev);
3895
3896	hci_dev_unlock(hdev);
 
 
3897
3898	return rp->status;
3899}
 
3900
3901static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3902				   struct sk_buff *skb)
3903{
3904	struct hci_rp_le_setup_iso_path *rp = data;
3905	struct hci_cp_le_setup_iso_path *cp;
3906	struct hci_conn *conn;
3907
3908	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
3909
3910	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3911	if (!cp)
3912		return rp->status;
3913
3914	hci_dev_lock(hdev);
 
 
3915
3916	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3917	if (!conn)
3918		goto unlock;
3919
3920	if (rp->status) {
3921		hci_connect_cfm(conn, rp->status);
3922		hci_conn_del(conn);
3923		goto unlock;
3924	}
3925
3926	switch (cp->direction) {
3927	/* Input (Host to Controller) */
3928	case 0x00:
3929		/* Only confirm connection if output only */
3930		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3931			hci_connect_cfm(conn, rp->status);
3932		break;
3933	/* Output (Controller to Host) */
3934	case 0x01:
3935		/* Confirm connection since conn->iso_qos is always configured
3936		 * last.
3937		 */
3938		hci_connect_cfm(conn, rp->status);
3939		break;
3940	}
3941
3942unlock:
3943	hci_dev_unlock(hdev);
3944	return rp->status;
3945}
3946
3947static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3948{
3949	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3950}
3951
3952static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3953				   struct sk_buff *skb)
3954{
3955	struct hci_ev_status *rp = data;
3956	struct hci_cp_le_set_per_adv_params *cp;
3957
3958	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
3959
3960	if (rp->status)
3961		return rp->status;
 
3962
3963	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3964	if (!cp)
3965		return rp->status;
3966
3967	/* TODO: set the conn state */
3968	return rp->status;
3969}
3970
3971static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3972				       struct sk_buff *skb)
3973{
3974	struct hci_ev_status *rp = data;
3975	struct hci_cp_le_set_per_adv_enable *cp;
3976	struct adv_info *adv = NULL, *n;
3977	u8 per_adv_cnt = 0;
3978
3979	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
 
3980
3981	if (rp->status)
3982		return rp->status;
 
3983
3984	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3985	if (!cp)
3986		return rp->status;
3987
3988	hci_dev_lock(hdev);
 
 
3989
3990	adv = hci_find_adv_instance(hdev, cp->handle);
 
 
3991
3992	if (cp->enable) {
3993		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
 
3994
3995		if (adv)
3996			adv->enabled = true;
3997	} else {
3998		/* If just one instance was disabled check if there are
3999		 * any other instance enabled before clearing HCI_LE_PER_ADV.
4000		 * The current periodic adv instance will be marked as
4001		 * disabled once extended advertising is also disabled.
4002		 */
4003		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4004					 list) {
4005			if (adv->periodic && adv->enabled)
4006				per_adv_cnt++;
4007		}
4008
4009		if (per_adv_cnt > 1)
4010			goto unlock;
 
4011
4012		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4013	}
 
4014
4015unlock:
4016	hci_dev_unlock(hdev);
 
4017
4018	return rp->status;
4019}
 
4020
4021#define HCI_CC_VL(_op, _func, _min, _max) \
4022{ \
4023	.op = _op, \
4024	.func = _func, \
4025	.min_len = _min, \
4026	.max_len = _max, \
4027}
4028
4029#define HCI_CC(_op, _func, _len) \
4030	HCI_CC_VL(_op, _func, _len, _len)
4031
4032#define HCI_CC_STATUS(_op, _func) \
4033	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4034
4035static const struct hci_cc {
4036	u16  op;
4037	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4038	u16  min_len;
4039	u16  max_len;
4040} hci_cc_table[] = {
4041	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4042	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4043	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4044	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4045		      hci_cc_remote_name_req_cancel),
4046	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4047	       sizeof(struct hci_rp_role_discovery)),
4048	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4049	       sizeof(struct hci_rp_read_link_policy)),
4050	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4051	       sizeof(struct hci_rp_write_link_policy)),
4052	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4053	       sizeof(struct hci_rp_read_def_link_policy)),
4054	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4055		      hci_cc_write_def_link_policy),
4056	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4057	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4058	       sizeof(struct hci_rp_read_stored_link_key)),
4059	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4060	       sizeof(struct hci_rp_delete_stored_link_key)),
4061	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4062	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4063	       sizeof(struct hci_rp_read_local_name)),
4064	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4065	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4066	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4067	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4068	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4069	       sizeof(struct hci_rp_read_class_of_dev)),
4070	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4071	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4072	       sizeof(struct hci_rp_read_voice_setting)),
4073	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4074	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4075	       sizeof(struct hci_rp_read_num_supported_iac)),
4076	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4077	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4078	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4079	       sizeof(struct hci_rp_read_auth_payload_to)),
4080	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4081	       sizeof(struct hci_rp_write_auth_payload_to)),
4082	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4083	       sizeof(struct hci_rp_read_local_version)),
4084	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4085	       sizeof(struct hci_rp_read_local_commands)),
4086	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4087	       sizeof(struct hci_rp_read_local_features)),
4088	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4089	       sizeof(struct hci_rp_read_local_ext_features)),
4090	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4091	       sizeof(struct hci_rp_read_buffer_size)),
4092	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4093	       sizeof(struct hci_rp_read_bd_addr)),
4094	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4095	       sizeof(struct hci_rp_read_local_pairing_opts)),
4096	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4097	       sizeof(struct hci_rp_read_page_scan_activity)),
4098	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4099		      hci_cc_write_page_scan_activity),
4100	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4101	       sizeof(struct hci_rp_read_page_scan_type)),
4102	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4103	HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4104	       sizeof(struct hci_rp_read_data_block_size)),
4105	HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4106	       sizeof(struct hci_rp_read_flow_control_mode)),
4107	HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4108	       sizeof(struct hci_rp_read_local_amp_info)),
4109	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4110	       sizeof(struct hci_rp_read_clock)),
4111	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4112	       sizeof(struct hci_rp_read_enc_key_size)),
4113	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4114	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4115	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4116	       hci_cc_read_def_err_data_reporting,
4117	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4118	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4119		      hci_cc_write_def_err_data_reporting),
4120	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4121	       sizeof(struct hci_rp_pin_code_reply)),
4122	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4123	       sizeof(struct hci_rp_pin_code_neg_reply)),
4124	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4125	       sizeof(struct hci_rp_read_local_oob_data)),
4126	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4127	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4128	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4129	       sizeof(struct hci_rp_le_read_buffer_size)),
4130	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4131	       sizeof(struct hci_rp_le_read_local_features)),
4132	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4133	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4134	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4135	       sizeof(struct hci_rp_user_confirm_reply)),
4136	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4137	       sizeof(struct hci_rp_user_confirm_reply)),
4138	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4139	       sizeof(struct hci_rp_user_confirm_reply)),
4140	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4141	       sizeof(struct hci_rp_user_confirm_reply)),
4142	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4143	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4144	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4145	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4146	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4147	       hci_cc_le_read_accept_list_size,
4148	       sizeof(struct hci_rp_le_read_accept_list_size)),
4149	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4150	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4151		      hci_cc_le_add_to_accept_list),
4152	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4153		      hci_cc_le_del_from_accept_list),
4154	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4155	       sizeof(struct hci_rp_le_read_supported_states)),
4156	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4157	       sizeof(struct hci_rp_le_read_def_data_len)),
4158	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4159		      hci_cc_le_write_def_data_len),
4160	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4161		      hci_cc_le_add_to_resolv_list),
4162	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4163		      hci_cc_le_del_from_resolv_list),
4164	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4165		      hci_cc_le_clear_resolv_list),
4166	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4167	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4168	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4169		      hci_cc_le_set_addr_resolution_enable),
4170	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4171	       sizeof(struct hci_rp_le_read_max_data_len)),
4172	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4173		      hci_cc_write_le_host_supported),
4174	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4175	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4176	       sizeof(struct hci_rp_read_rssi)),
4177	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4178	       sizeof(struct hci_rp_read_tx_power)),
4179	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4180	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4181		      hci_cc_le_set_ext_scan_param),
4182	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4183		      hci_cc_le_set_ext_scan_enable),
4184	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4185	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4186	       hci_cc_le_read_num_adv_sets,
4187	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4188	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4189	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4190	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4191		      hci_cc_le_set_ext_adv_enable),
4192	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4193		      hci_cc_le_set_adv_set_random_addr),
4194	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4195	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4196	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4197	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4198		      hci_cc_le_set_per_adv_enable),
4199	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4200	       sizeof(struct hci_rp_le_read_transmit_power)),
4201	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4202	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4203	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4204	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4205		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4206	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4207	       sizeof(struct hci_rp_le_setup_iso_path)),
4208};
4209
4210static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4211		      struct sk_buff *skb)
4212{
4213	void *data;
4214
4215	if (skb->len < cc->min_len) {
4216		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4217			   cc->op, skb->len, cc->min_len);
4218		return HCI_ERROR_UNSPECIFIED;
4219	}
4220
4221	/* Just warn if the length is over max_len size it still be possible to
4222	 * partially parse the cc so leave to callback to decide if that is
4223	 * acceptable.
4224	 */
4225	if (skb->len > cc->max_len)
4226		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4227			    cc->op, skb->len, cc->max_len);
4228
4229	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4230	if (!data)
4231		return HCI_ERROR_UNSPECIFIED;
4232
4233	return cc->func(hdev, data, skb);
4234}
 
4235
4236static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4237				 struct sk_buff *skb, u16 *opcode, u8 *status,
4238				 hci_req_complete_t *req_complete,
4239				 hci_req_complete_skb_t *req_complete_skb)
4240{
4241	struct hci_ev_cmd_complete *ev = data;
4242	int i;
4243
4244	*opcode = __le16_to_cpu(ev->opcode);
 
 
4245
4246	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
 
 
4247
4248	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4249		if (hci_cc_table[i].op == *opcode) {
4250			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4251			break;
4252		}
4253	}
4254
4255	if (i == ARRAY_SIZE(hci_cc_table)) {
4256		/* Unknown opcode, assume byte 0 contains the status, so
4257		 * that e.g. __hci_cmd_sync() properly returns errors
4258		 * for vendor specific commands send by HCI drivers.
4259		 * If a vendor doesn't actually follow this convention we may
4260		 * need to introduce a vendor CC table in order to properly set
4261		 * the status.
4262		 */
4263		*status = skb->data[0];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4264	}
4265
4266	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
 
 
 
 
4267
4268	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4269			     req_complete_skb);
4270
4271	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4272		bt_dev_err(hdev,
4273			   "unexpected event for opcode 0x%4.4x", *opcode);
4274		return;
4275	}
4276
4277	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4278		queue_work(hdev->workqueue, &hdev->cmd_work);
4279}
4280
4281static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
 
 
 
4282{
4283	struct hci_cp_le_create_cis *cp;
4284	bool pending = false;
4285	int i;
4286
4287	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4288
4289	if (!status)
4290		return;
4291
4292	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4293	if (!cp)
4294		return;
 
4295
4296	hci_dev_lock(hdev);
 
 
4297
4298	/* Remove connection if command failed */
4299	for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4300		struct hci_conn *conn;
4301		u16 handle;
4302
4303		handle = __le16_to_cpu(cp->cis[i].cis_handle);
 
 
4304
4305		conn = hci_conn_hash_lookup_handle(hdev, handle);
4306		if (conn) {
4307			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4308					       &conn->flags))
4309				pending = true;
4310			conn->state = BT_CLOSED;
4311			hci_connect_cfm(conn, status);
4312			hci_conn_del(conn);
4313		}
4314	}
4315
4316	if (pending)
4317		hci_le_create_cis_pending(hdev);
 
4318
4319	hci_dev_unlock(hdev);
4320}
 
4321
4322#define HCI_CS(_op, _func) \
4323{ \
4324	.op = _op, \
4325	.func = _func, \
4326}
4327
4328static const struct hci_cs {
4329	u16  op;
4330	void (*func)(struct hci_dev *hdev, __u8 status);
4331} hci_cs_table[] = {
4332	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4333	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4334	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4335	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4336	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4337	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4338	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4339	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4340	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4341	       hci_cs_read_remote_ext_features),
4342	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4343	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4344	       hci_cs_enhanced_setup_sync_conn),
4345	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4346	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4347	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4348	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4349	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4350	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4351	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4352	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4353	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4354};
4355
4356static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4357			       struct sk_buff *skb, u16 *opcode, u8 *status,
4358			       hci_req_complete_t *req_complete,
4359			       hci_req_complete_skb_t *req_complete_skb)
4360{
4361	struct hci_ev_cmd_status *ev = data;
4362	int i;
4363
4364	*opcode = __le16_to_cpu(ev->opcode);
4365	*status = ev->status;
 
4366
4367	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
 
 
4368
4369	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4370		if (hci_cs_table[i].op == *opcode) {
4371			hci_cs_table[i].func(hdev, ev->status);
4372			break;
4373		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4374	}
4375
4376	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
 
 
 
 
4377
4378	/* Indicate request completion if the command failed. Also, if
4379	 * we're not waiting for a special event and we get a success
4380	 * command status we should try to flag the request as completed
4381	 * (since for this kind of commands there will not be a command
4382	 * complete event).
4383	 */
4384	if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
 
4385		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4386				     req_complete_skb);
4387		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4388			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4389				   *opcode);
4390			return;
4391		}
4392	}
4393
4394	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4395		queue_work(hdev->workqueue, &hdev->cmd_work);
4396}
4397
4398static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4399				   struct sk_buff *skb)
4400{
4401	struct hci_ev_hardware_error *ev = data;
4402
4403	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4404
4405	hdev->hw_error_code = ev->code;
4406
4407	queue_work(hdev->req_workqueue, &hdev->error_reset);
4408}
4409
4410static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4411				struct sk_buff *skb)
4412{
4413	struct hci_ev_role_change *ev = data;
4414	struct hci_conn *conn;
4415
4416	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4417
4418	hci_dev_lock(hdev);
4419
4420	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4421	if (conn) {
4422		if (!ev->status)
4423			conn->role = ev->role;
4424
4425		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4426
4427		hci_role_switch_cfm(conn, ev->status, ev->role);
4428	}
4429
4430	hci_dev_unlock(hdev);
4431}
4432
4433static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4434				  struct sk_buff *skb)
4435{
4436	struct hci_ev_num_comp_pkts *ev = data;
4437	int i;
4438
4439	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4440			     flex_array_size(ev, handles, ev->num)))
4441		return;
4442
4443	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4444		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4445		return;
4446	}
4447
4448	bt_dev_dbg(hdev, "num %d", ev->num);
 
 
 
 
 
 
4449
4450	for (i = 0; i < ev->num; i++) {
4451		struct hci_comp_pkts_info *info = &ev->handles[i];
4452		struct hci_conn *conn;
4453		__u16  handle, count;
4454
4455		handle = __le16_to_cpu(info->handle);
4456		count  = __le16_to_cpu(info->count);
4457
4458		conn = hci_conn_hash_lookup_handle(hdev, handle);
4459		if (!conn)
4460			continue;
4461
4462		conn->sent -= count;
4463
4464		switch (conn->type) {
4465		case ACL_LINK:
4466			hdev->acl_cnt += count;
4467			if (hdev->acl_cnt > hdev->acl_pkts)
4468				hdev->acl_cnt = hdev->acl_pkts;
4469			break;
4470
4471		case LE_LINK:
4472			if (hdev->le_pkts) {
4473				hdev->le_cnt += count;
4474				if (hdev->le_cnt > hdev->le_pkts)
4475					hdev->le_cnt = hdev->le_pkts;
4476			} else {
4477				hdev->acl_cnt += count;
4478				if (hdev->acl_cnt > hdev->acl_pkts)
4479					hdev->acl_cnt = hdev->acl_pkts;
4480			}
4481			break;
4482
4483		case SCO_LINK:
4484			hdev->sco_cnt += count;
4485			if (hdev->sco_cnt > hdev->sco_pkts)
4486				hdev->sco_cnt = hdev->sco_pkts;
4487			break;
4488
4489		case ISO_LINK:
4490			if (hdev->iso_pkts) {
4491				hdev->iso_cnt += count;
4492				if (hdev->iso_cnt > hdev->iso_pkts)
4493					hdev->iso_cnt = hdev->iso_pkts;
4494			} else if (hdev->le_pkts) {
4495				hdev->le_cnt += count;
4496				if (hdev->le_cnt > hdev->le_pkts)
4497					hdev->le_cnt = hdev->le_pkts;
4498			} else {
4499				hdev->acl_cnt += count;
4500				if (hdev->acl_cnt > hdev->acl_pkts)
4501					hdev->acl_cnt = hdev->acl_pkts;
4502			}
4503			break;
4504
4505		default:
4506			bt_dev_err(hdev, "unknown type %d conn %p",
4507				   conn->type, conn);
4508			break;
4509		}
4510	}
4511
4512	queue_work(hdev->workqueue, &hdev->tx_work);
4513}
4514
4515static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4516						 __u16 handle)
4517{
4518	struct hci_chan *chan;
4519
4520	switch (hdev->dev_type) {
4521	case HCI_PRIMARY:
4522		return hci_conn_hash_lookup_handle(hdev, handle);
4523	case HCI_AMP:
4524		chan = hci_chan_lookup_handle(hdev, handle);
4525		if (chan)
4526			return chan->conn;
4527		break;
4528	default:
4529		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4530		break;
4531	}
4532
4533	return NULL;
4534}
4535
4536static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4537				    struct sk_buff *skb)
4538{
4539	struct hci_ev_num_comp_blocks *ev = data;
4540	int i;
4541
4542	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4543			     flex_array_size(ev, handles, ev->num_hndl)))
4544		return;
 
4545
4546	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4547		bt_dev_err(hdev, "wrong event for mode %d",
4548			   hdev->flow_ctl_mode);
4549		return;
4550	}
4551
4552	bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4553		   ev->num_hndl);
4554
4555	for (i = 0; i < ev->num_hndl; i++) {
4556		struct hci_comp_blocks_info *info = &ev->handles[i];
4557		struct hci_conn *conn = NULL;
4558		__u16  handle, block_count;
4559
4560		handle = __le16_to_cpu(info->handle);
4561		block_count = __le16_to_cpu(info->blocks);
4562
4563		conn = __hci_conn_lookup_handle(hdev, handle);
4564		if (!conn)
4565			continue;
4566
4567		conn->sent -= block_count;
4568
4569		switch (conn->type) {
4570		case ACL_LINK:
4571		case AMP_LINK:
4572			hdev->block_cnt += block_count;
4573			if (hdev->block_cnt > hdev->num_blocks)
4574				hdev->block_cnt = hdev->num_blocks;
4575			break;
4576
4577		default:
4578			bt_dev_err(hdev, "unknown type %d conn %p",
4579				   conn->type, conn);
4580			break;
4581		}
4582	}
4583
4584	queue_work(hdev->workqueue, &hdev->tx_work);
4585}
4586
4587static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4588				struct sk_buff *skb)
4589{
4590	struct hci_ev_mode_change *ev = data;
4591	struct hci_conn *conn;
4592
4593	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4594
4595	hci_dev_lock(hdev);
4596
4597	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4598	if (conn) {
4599		conn->mode = ev->mode;
4600
4601		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4602					&conn->flags)) {
4603			if (conn->mode == HCI_CM_ACTIVE)
4604				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4605			else
4606				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4607		}
4608
4609		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4610			hci_sco_setup(conn, ev->status);
4611	}
4612
4613	hci_dev_unlock(hdev);
4614}
4615
4616static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4617				     struct sk_buff *skb)
4618{
4619	struct hci_ev_pin_code_req *ev = data;
4620	struct hci_conn *conn;
4621
4622	bt_dev_dbg(hdev, "");
4623
4624	hci_dev_lock(hdev);
4625
4626	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4627	if (!conn)
4628		goto unlock;
4629
4630	if (conn->state == BT_CONNECTED) {
4631		hci_conn_hold(conn);
4632		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4633		hci_conn_drop(conn);
4634	}
4635
4636	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4637	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4638		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4639			     sizeof(ev->bdaddr), &ev->bdaddr);
4640	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4641		u8 secure;
4642
4643		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4644			secure = 1;
4645		else
4646			secure = 0;
4647
4648		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4649	}
4650
4651unlock:
4652	hci_dev_unlock(hdev);
4653}
4654
4655static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4656{
4657	if (key_type == HCI_LK_CHANGED_COMBINATION)
4658		return;
4659
4660	conn->pin_length = pin_len;
4661	conn->key_type = key_type;
4662
4663	switch (key_type) {
4664	case HCI_LK_LOCAL_UNIT:
4665	case HCI_LK_REMOTE_UNIT:
4666	case HCI_LK_DEBUG_COMBINATION:
4667		return;
4668	case HCI_LK_COMBINATION:
4669		if (pin_len == 16)
4670			conn->pending_sec_level = BT_SECURITY_HIGH;
4671		else
4672			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4673		break;
4674	case HCI_LK_UNAUTH_COMBINATION_P192:
4675	case HCI_LK_UNAUTH_COMBINATION_P256:
4676		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4677		break;
4678	case HCI_LK_AUTH_COMBINATION_P192:
4679		conn->pending_sec_level = BT_SECURITY_HIGH;
4680		break;
4681	case HCI_LK_AUTH_COMBINATION_P256:
4682		conn->pending_sec_level = BT_SECURITY_FIPS;
4683		break;
4684	}
4685}
4686
4687static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4688				     struct sk_buff *skb)
4689{
4690	struct hci_ev_link_key_req *ev = data;
4691	struct hci_cp_link_key_reply cp;
4692	struct hci_conn *conn;
4693	struct link_key *key;
4694
4695	bt_dev_dbg(hdev, "");
4696
4697	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4698		return;
4699
4700	hci_dev_lock(hdev);
4701
4702	key = hci_find_link_key(hdev, &ev->bdaddr);
4703	if (!key) {
4704		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
 
4705		goto not_found;
4706	}
4707
4708	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
 
4709
4710	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4711	if (conn) {
4712		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4713
4714		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4715		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4716		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4717			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4718			goto not_found;
4719		}
4720
4721		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4722		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4723		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4724			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
 
4725			goto not_found;
4726		}
4727
4728		conn_set_key(conn, key->type, key->pin_len);
4729	}
4730
4731	bacpy(&cp.bdaddr, &ev->bdaddr);
4732	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4733
4734	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4735
4736	hci_dev_unlock(hdev);
4737
4738	return;
4739
4740not_found:
4741	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4742	hci_dev_unlock(hdev);
4743}
4744
4745static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4746				    struct sk_buff *skb)
4747{
4748	struct hci_ev_link_key_notify *ev = data;
4749	struct hci_conn *conn;
4750	struct link_key *key;
4751	bool persistent;
4752	u8 pin_len = 0;
4753
4754	bt_dev_dbg(hdev, "");
4755
4756	hci_dev_lock(hdev);
4757
4758	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4759	if (!conn)
4760		goto unlock;
4761
4762	/* Ignore NULL link key against CVE-2020-26555 */
4763	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4764		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4765			   &ev->bdaddr);
4766		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4767		hci_conn_drop(conn);
4768		goto unlock;
4769	}
4770
4771	hci_conn_hold(conn);
4772	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4773	hci_conn_drop(conn);
4774
4775	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4776	conn_set_key(conn, ev->key_type, conn->pin_length);
4777
4778	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4779		goto unlock;
4780
4781	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4782			        ev->key_type, pin_len, &persistent);
4783	if (!key)
4784		goto unlock;
4785
4786	/* Update connection information since adding the key will have
4787	 * fixed up the type in the case of changed combination keys.
4788	 */
4789	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4790		conn_set_key(conn, key->type, key->pin_len);
4791
4792	mgmt_new_link_key(hdev, key, persistent);
4793
4794	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4795	 * is set. If it's not set simply remove the key from the kernel
4796	 * list (we've still notified user space about it but with
4797	 * store_hint being 0).
4798	 */
4799	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4800	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4801		list_del_rcu(&key->list);
4802		kfree_rcu(key, rcu);
4803		goto unlock;
4804	}
4805
4806	if (persistent)
4807		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4808	else
4809		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4810
4811unlock:
4812	hci_dev_unlock(hdev);
4813}
4814
4815static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4816				 struct sk_buff *skb)
4817{
4818	struct hci_ev_clock_offset *ev = data;
4819	struct hci_conn *conn;
4820
4821	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4822
4823	hci_dev_lock(hdev);
4824
4825	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4826	if (conn && !ev->status) {
4827		struct inquiry_entry *ie;
4828
4829		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4830		if (ie) {
4831			ie->data.clock_offset = ev->clock_offset;
4832			ie->timestamp = jiffies;
4833		}
4834	}
4835
4836	hci_dev_unlock(hdev);
4837}
4838
4839static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4840				    struct sk_buff *skb)
4841{
4842	struct hci_ev_pkt_type_change *ev = data;
4843	struct hci_conn *conn;
4844
4845	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4846
4847	hci_dev_lock(hdev);
4848
4849	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4850	if (conn && !ev->status)
4851		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4852
4853	hci_dev_unlock(hdev);
4854}
4855
4856static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4857				   struct sk_buff *skb)
4858{
4859	struct hci_ev_pscan_rep_mode *ev = data;
4860	struct inquiry_entry *ie;
4861
4862	bt_dev_dbg(hdev, "");
4863
4864	hci_dev_lock(hdev);
4865
4866	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4867	if (ie) {
4868		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4869		ie->timestamp = jiffies;
4870	}
4871
4872	hci_dev_unlock(hdev);
4873}
4874
4875static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4876					     struct sk_buff *skb)
4877{
4878	struct hci_ev_inquiry_result_rssi *ev = edata;
4879	struct inquiry_data data;
4880	int i;
4881
4882	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4883
4884	if (!ev->num)
4885		return;
4886
4887	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4888		return;
4889
4890	hci_dev_lock(hdev);
4891
4892	if (skb->len == array_size(ev->num,
4893				   sizeof(struct inquiry_info_rssi_pscan))) {
4894		struct inquiry_info_rssi_pscan *info;
4895
4896		for (i = 0; i < ev->num; i++) {
4897			u32 flags;
4898
4899			info = hci_ev_skb_pull(hdev, skb,
4900					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4901					       sizeof(*info));
4902			if (!info) {
4903				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4904					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4905				goto unlock;
4906			}
4907
4908			bacpy(&data.bdaddr, &info->bdaddr);
4909			data.pscan_rep_mode	= info->pscan_rep_mode;
4910			data.pscan_period_mode	= info->pscan_period_mode;
4911			data.pscan_mode		= info->pscan_mode;
4912			memcpy(data.dev_class, info->dev_class, 3);
4913			data.clock_offset	= info->clock_offset;
4914			data.rssi		= info->rssi;
4915			data.ssp_mode		= 0x00;
4916
4917			flags = hci_inquiry_cache_update(hdev, &data, false);
4918
4919			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4920					  info->dev_class, info->rssi,
4921					  flags, NULL, 0, NULL, 0, 0);
4922		}
4923	} else if (skb->len == array_size(ev->num,
4924					  sizeof(struct inquiry_info_rssi))) {
4925		struct inquiry_info_rssi *info;
4926
4927		for (i = 0; i < ev->num; i++) {
4928			u32 flags;
4929
4930			info = hci_ev_skb_pull(hdev, skb,
4931					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4932					       sizeof(*info));
4933			if (!info) {
4934				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4935					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4936				goto unlock;
4937			}
4938
4939			bacpy(&data.bdaddr, &info->bdaddr);
4940			data.pscan_rep_mode	= info->pscan_rep_mode;
4941			data.pscan_period_mode	= info->pscan_period_mode;
4942			data.pscan_mode		= 0x00;
4943			memcpy(data.dev_class, info->dev_class, 3);
4944			data.clock_offset	= info->clock_offset;
4945			data.rssi		= info->rssi;
4946			data.ssp_mode		= 0x00;
4947
4948			flags = hci_inquiry_cache_update(hdev, &data, false);
4949
4950			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4951					  info->dev_class, info->rssi,
4952					  flags, NULL, 0, NULL, 0, 0);
4953		}
4954	} else {
4955		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4956			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4957	}
4958unlock:
4959	hci_dev_unlock(hdev);
4960}
4961
4962static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4963					struct sk_buff *skb)
4964{
4965	struct hci_ev_remote_ext_features *ev = data;
4966	struct hci_conn *conn;
4967
4968	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4969
4970	hci_dev_lock(hdev);
4971
4972	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4973	if (!conn)
4974		goto unlock;
4975
4976	if (ev->page < HCI_MAX_PAGES)
4977		memcpy(conn->features[ev->page], ev->features, 8);
4978
4979	if (!ev->status && ev->page == 0x01) {
4980		struct inquiry_entry *ie;
4981
4982		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4983		if (ie)
4984			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4985
4986		if (ev->features[0] & LMP_HOST_SSP) {
4987			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4988		} else {
4989			/* It is mandatory by the Bluetooth specification that
4990			 * Extended Inquiry Results are only used when Secure
4991			 * Simple Pairing is enabled, but some devices violate
4992			 * this.
4993			 *
4994			 * To make these devices work, the internal SSP
4995			 * enabled flag needs to be cleared if the remote host
4996			 * features do not indicate SSP support */
4997			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4998		}
4999
5000		if (ev->features[0] & LMP_HOST_SC)
5001			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5002	}
5003
5004	if (conn->state != BT_CONFIG)
5005		goto unlock;
5006
5007	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5008		struct hci_cp_remote_name_req cp;
5009		memset(&cp, 0, sizeof(cp));
5010		bacpy(&cp.bdaddr, &conn->dst);
5011		cp.pscan_rep_mode = 0x02;
5012		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5013	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5014		mgmt_device_connected(hdev, conn, NULL, 0);
5015
5016	if (!hci_outgoing_auth_needed(hdev, conn)) {
5017		conn->state = BT_CONNECTED;
5018		hci_connect_cfm(conn, ev->status);
5019		hci_conn_drop(conn);
5020	}
5021
5022unlock:
5023	hci_dev_unlock(hdev);
5024}
5025
5026static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5027				       struct sk_buff *skb)
5028{
5029	struct hci_ev_sync_conn_complete *ev = data;
5030	struct hci_conn *conn;
5031	u8 status = ev->status;
5032
5033	switch (ev->link_type) {
5034	case SCO_LINK:
5035	case ESCO_LINK:
5036		break;
5037	default:
5038		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5039		 * for HCI_Synchronous_Connection_Complete is limited to
5040		 * either SCO or eSCO
5041		 */
5042		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5043		return;
5044	}
5045
5046	bt_dev_dbg(hdev, "status 0x%2.2x", status);
5047
5048	hci_dev_lock(hdev);
5049
5050	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5051	if (!conn) {
5052		if (ev->link_type == ESCO_LINK)
5053			goto unlock;
5054
5055		/* When the link type in the event indicates SCO connection
5056		 * and lookup of the connection object fails, then check
5057		 * if an eSCO connection object exists.
5058		 *
5059		 * The core limits the synchronous connections to either
5060		 * SCO or eSCO. The eSCO connection is preferred and tried
5061		 * to be setup first and until successfully established,
5062		 * the link type will be hinted as eSCO.
5063		 */
5064		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5065		if (!conn)
5066			goto unlock;
5067	}
5068
5069	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5070	 * Processing it more than once per connection can corrupt kernel memory.
5071	 *
5072	 * As the connection handle is set here for the first time, it indicates
5073	 * whether the connection is already set up.
5074	 */
5075	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5076		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5077		goto unlock;
5078	}
5079
5080	switch (status) {
5081	case 0x00:
5082		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5083		if (status) {
5084			conn->state = BT_CLOSED;
5085			break;
5086		}
5087
5088		conn->state  = BT_CONNECTED;
5089		conn->type   = ev->link_type;
5090
5091		hci_debugfs_create_conn(conn);
5092		hci_conn_add_sysfs(conn);
5093		break;
5094
5095	case 0x10:	/* Connection Accept Timeout */
5096	case 0x0d:	/* Connection Rejected due to Limited Resources */
5097	case 0x11:	/* Unsupported Feature or Parameter Value */
5098	case 0x1c:	/* SCO interval rejected */
5099	case 0x1a:	/* Unsupported Remote Feature */
5100	case 0x1e:	/* Invalid LMP Parameters */
5101	case 0x1f:	/* Unspecified error */
5102	case 0x20:	/* Unsupported LMP Parameter value */
5103		if (conn->out) {
5104			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5105					(hdev->esco_type & EDR_ESCO_MASK);
5106			if (hci_setup_sync(conn, conn->parent->handle))
5107				goto unlock;
5108		}
5109		fallthrough;
5110
5111	default:
5112		conn->state = BT_CLOSED;
5113		break;
5114	}
5115
5116	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5117	/* Notify only in case of SCO over HCI transport data path which
5118	 * is zero and non-zero value shall be non-HCI transport data path
5119	 */
5120	if (conn->codec.data_path == 0 && hdev->notify) {
5121		switch (ev->air_mode) {
5122		case 0x02:
5123			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5124			break;
5125		case 0x03:
5126			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5127			break;
5128		}
5129	}
5130
5131	hci_connect_cfm(conn, status);
5132	if (status)
5133		hci_conn_del(conn);
5134
5135unlock:
5136	hci_dev_unlock(hdev);
5137}
5138
5139static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5140{
5141	size_t parsed = 0;
5142
5143	while (parsed < eir_len) {
5144		u8 field_len = eir[0];
5145
5146		if (field_len == 0)
5147			return parsed;
5148
5149		parsed += field_len + 1;
5150		eir += field_len + 1;
5151	}
5152
5153	return eir_len;
5154}
5155
5156static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5157					    struct sk_buff *skb)
5158{
5159	struct hci_ev_ext_inquiry_result *ev = edata;
5160	struct inquiry_data data;
 
 
5161	size_t eir_len;
5162	int i;
5163
5164	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5165			     flex_array_size(ev, info, ev->num)))
5166		return;
5167
5168	bt_dev_dbg(hdev, "num %d", ev->num);
5169
5170	if (!ev->num)
5171		return;
5172
5173	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5174		return;
5175
5176	hci_dev_lock(hdev);
5177
5178	for (i = 0; i < ev->num; i++) {
5179		struct extended_inquiry_info *info = &ev->info[i];
5180		u32 flags;
5181		bool name_known;
5182
5183		bacpy(&data.bdaddr, &info->bdaddr);
5184		data.pscan_rep_mode	= info->pscan_rep_mode;
5185		data.pscan_period_mode	= info->pscan_period_mode;
5186		data.pscan_mode		= 0x00;
5187		memcpy(data.dev_class, info->dev_class, 3);
5188		data.clock_offset	= info->clock_offset;
5189		data.rssi		= info->rssi;
5190		data.ssp_mode		= 0x01;
5191
5192		if (hci_dev_test_flag(hdev, HCI_MGMT))
5193			name_known = eir_get_data(info->data,
5194						  sizeof(info->data),
5195						  EIR_NAME_COMPLETE, NULL);
5196		else
5197			name_known = true;
5198
5199		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5200
5201		eir_len = eir_get_length(info->data, sizeof(info->data));
5202
5203		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5204				  info->dev_class, info->rssi,
5205				  flags, info->data, eir_len, NULL, 0, 0);
5206	}
5207
5208	hci_dev_unlock(hdev);
5209}
5210
5211static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5212					 struct sk_buff *skb)
5213{
5214	struct hci_ev_key_refresh_complete *ev = data;
5215	struct hci_conn *conn;
5216
5217	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5218		   __le16_to_cpu(ev->handle));
5219
5220	hci_dev_lock(hdev);
5221
5222	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5223	if (!conn)
5224		goto unlock;
5225
5226	/* For BR/EDR the necessary steps are taken through the
5227	 * auth_complete event.
5228	 */
5229	if (conn->type != LE_LINK)
5230		goto unlock;
5231
5232	if (!ev->status)
5233		conn->sec_level = conn->pending_sec_level;
5234
5235	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5236
5237	if (ev->status && conn->state == BT_CONNECTED) {
5238		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5239		hci_conn_drop(conn);
5240		goto unlock;
5241	}
5242
5243	if (conn->state == BT_CONFIG) {
5244		if (!ev->status)
5245			conn->state = BT_CONNECTED;
5246
5247		hci_connect_cfm(conn, ev->status);
5248		hci_conn_drop(conn);
5249	} else {
5250		hci_auth_cfm(conn, ev->status);
5251
5252		hci_conn_hold(conn);
5253		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5254		hci_conn_drop(conn);
5255	}
5256
5257unlock:
5258	hci_dev_unlock(hdev);
5259}
5260
5261static u8 hci_get_auth_req(struct hci_conn *conn)
5262{
5263	/* If remote requests no-bonding follow that lead */
5264	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5265	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5266		return conn->remote_auth | (conn->auth_type & 0x01);
5267
5268	/* If both remote and local have enough IO capabilities, require
5269	 * MITM protection
5270	 */
5271	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5272	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5273		return conn->remote_auth | 0x01;
5274
5275	/* No MITM protection possible so ignore remote requirement */
5276	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5277}
5278
5279static u8 bredr_oob_data_present(struct hci_conn *conn)
5280{
5281	struct hci_dev *hdev = conn->hdev;
5282	struct oob_data *data;
5283
5284	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5285	if (!data)
5286		return 0x00;
5287
5288	if (bredr_sc_enabled(hdev)) {
5289		/* When Secure Connections is enabled, then just
5290		 * return the present value stored with the OOB
5291		 * data. The stored value contains the right present
5292		 * information. However it can only be trusted when
5293		 * not in Secure Connection Only mode.
5294		 */
5295		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5296			return data->present;
5297
5298		/* When Secure Connections Only mode is enabled, then
5299		 * the P-256 values are required. If they are not
5300		 * available, then do not declare that OOB data is
5301		 * present.
5302		 */
5303		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5304		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5305			return 0x00;
5306
5307		return 0x02;
5308	}
5309
5310	/* When Secure Connections is not enabled or actually
5311	 * not supported by the hardware, then check that if
5312	 * P-192 data values are present.
5313	 */
5314	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5315	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5316		return 0x00;
5317
5318	return 0x01;
5319}
5320
5321static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5322				    struct sk_buff *skb)
5323{
5324	struct hci_ev_io_capa_request *ev = data;
5325	struct hci_conn *conn;
5326
5327	bt_dev_dbg(hdev, "");
5328
5329	hci_dev_lock(hdev);
5330
5331	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5332	if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5333		goto unlock;
5334
5335	/* Assume remote supports SSP since it has triggered this event */
5336	set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5337
5338	hci_conn_hold(conn);
5339
5340	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5341		goto unlock;
5342
5343	/* Allow pairing if we're pairable, the initiators of the
5344	 * pairing or if the remote is not requesting bonding.
5345	 */
5346	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5347	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5348	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5349		struct hci_cp_io_capability_reply cp;
5350
5351		bacpy(&cp.bdaddr, &ev->bdaddr);
5352		/* Change the IO capability from KeyboardDisplay
5353		 * to DisplayYesNo as it is not supported by BT spec. */
5354		cp.capability = (conn->io_capability == 0x04) ?
5355				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5356
5357		/* If we are initiators, there is no remote information yet */
5358		if (conn->remote_auth == 0xff) {
5359			/* Request MITM protection if our IO caps allow it
5360			 * except for the no-bonding case.
5361			 */
5362			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5363			    conn->auth_type != HCI_AT_NO_BONDING)
5364				conn->auth_type |= 0x01;
5365		} else {
5366			conn->auth_type = hci_get_auth_req(conn);
5367		}
5368
5369		/* If we're not bondable, force one of the non-bondable
5370		 * authentication requirement values.
5371		 */
5372		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5373			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5374
5375		cp.authentication = conn->auth_type;
5376		cp.oob_data = bredr_oob_data_present(conn);
5377
5378		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5379			     sizeof(cp), &cp);
5380	} else {
5381		struct hci_cp_io_capability_neg_reply cp;
5382
5383		bacpy(&cp.bdaddr, &ev->bdaddr);
5384		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5385
5386		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5387			     sizeof(cp), &cp);
5388	}
5389
5390unlock:
5391	hci_dev_unlock(hdev);
5392}
5393
5394static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5395				  struct sk_buff *skb)
5396{
5397	struct hci_ev_io_capa_reply *ev = data;
5398	struct hci_conn *conn;
5399
5400	bt_dev_dbg(hdev, "");
5401
5402	hci_dev_lock(hdev);
5403
5404	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5405	if (!conn)
5406		goto unlock;
5407
5408	conn->remote_cap = ev->capability;
5409	conn->remote_auth = ev->authentication;
5410
5411unlock:
5412	hci_dev_unlock(hdev);
5413}
5414
5415static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5416					 struct sk_buff *skb)
5417{
5418	struct hci_ev_user_confirm_req *ev = data;
5419	int loc_mitm, rem_mitm, confirm_hint = 0;
5420	struct hci_conn *conn;
5421
5422	bt_dev_dbg(hdev, "");
5423
5424	hci_dev_lock(hdev);
5425
5426	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5427		goto unlock;
5428
5429	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5430	if (!conn)
5431		goto unlock;
5432
5433	loc_mitm = (conn->auth_type & 0x01);
5434	rem_mitm = (conn->remote_auth & 0x01);
5435
5436	/* If we require MITM but the remote device can't provide that
5437	 * (it has NoInputNoOutput) then reject the confirmation
5438	 * request. We check the security level here since it doesn't
5439	 * necessarily match conn->auth_type.
5440	 */
5441	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5442	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5443		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5444		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5445			     sizeof(ev->bdaddr), &ev->bdaddr);
5446		goto unlock;
5447	}
5448
5449	/* If no side requires MITM protection; auto-accept */
5450	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5451	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5452
5453		/* If we're not the initiators request authorization to
5454		 * proceed from user space (mgmt_user_confirm with
5455		 * confirm_hint set to 1). The exception is if neither
5456		 * side had MITM or if the local IO capability is
5457		 * NoInputNoOutput, in which case we do auto-accept
5458		 */
5459		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5460		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5461		    (loc_mitm || rem_mitm)) {
5462			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5463			confirm_hint = 1;
5464			goto confirm;
5465		}
5466
5467		/* If there already exists link key in local host, leave the
5468		 * decision to user space since the remote device could be
5469		 * legitimate or malicious.
5470		 */
5471		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5472			bt_dev_dbg(hdev, "Local host already has link key");
5473			confirm_hint = 1;
5474			goto confirm;
5475		}
5476
5477		BT_DBG("Auto-accept of user confirmation with %ums delay",
5478		       hdev->auto_accept_delay);
5479
5480		if (hdev->auto_accept_delay > 0) {
5481			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5482			queue_delayed_work(conn->hdev->workqueue,
5483					   &conn->auto_accept_work, delay);
5484			goto unlock;
5485		}
5486
5487		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5488			     sizeof(ev->bdaddr), &ev->bdaddr);
5489		goto unlock;
5490	}
5491
5492confirm:
5493	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5494				  le32_to_cpu(ev->passkey), confirm_hint);
5495
5496unlock:
5497	hci_dev_unlock(hdev);
5498}
5499
5500static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5501					 struct sk_buff *skb)
5502{
5503	struct hci_ev_user_passkey_req *ev = data;
5504
5505	bt_dev_dbg(hdev, "");
5506
5507	if (hci_dev_test_flag(hdev, HCI_MGMT))
5508		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5509}
5510
5511static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5512					struct sk_buff *skb)
5513{
5514	struct hci_ev_user_passkey_notify *ev = data;
5515	struct hci_conn *conn;
5516
5517	bt_dev_dbg(hdev, "");
5518
5519	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5520	if (!conn)
5521		return;
5522
5523	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5524	conn->passkey_entered = 0;
5525
5526	if (hci_dev_test_flag(hdev, HCI_MGMT))
5527		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5528					 conn->dst_type, conn->passkey_notify,
5529					 conn->passkey_entered);
5530}
5531
5532static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5533				    struct sk_buff *skb)
5534{
5535	struct hci_ev_keypress_notify *ev = data;
5536	struct hci_conn *conn;
5537
5538	bt_dev_dbg(hdev, "");
5539
5540	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5541	if (!conn)
5542		return;
5543
5544	switch (ev->type) {
5545	case HCI_KEYPRESS_STARTED:
5546		conn->passkey_entered = 0;
5547		return;
5548
5549	case HCI_KEYPRESS_ENTERED:
5550		conn->passkey_entered++;
5551		break;
5552
5553	case HCI_KEYPRESS_ERASED:
5554		conn->passkey_entered--;
5555		break;
5556
5557	case HCI_KEYPRESS_CLEARED:
5558		conn->passkey_entered = 0;
5559		break;
5560
5561	case HCI_KEYPRESS_COMPLETED:
5562		return;
5563	}
5564
5565	if (hci_dev_test_flag(hdev, HCI_MGMT))
5566		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5567					 conn->dst_type, conn->passkey_notify,
5568					 conn->passkey_entered);
5569}
5570
5571static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5572					 struct sk_buff *skb)
5573{
5574	struct hci_ev_simple_pair_complete *ev = data;
5575	struct hci_conn *conn;
5576
5577	bt_dev_dbg(hdev, "");
5578
5579	hci_dev_lock(hdev);
5580
5581	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5582	if (!conn || !hci_conn_ssp_enabled(conn))
5583		goto unlock;
5584
5585	/* Reset the authentication requirement to unknown */
5586	conn->remote_auth = 0xff;
5587
5588	/* To avoid duplicate auth_failed events to user space we check
5589	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5590	 * initiated the authentication. A traditional auth_complete
5591	 * event gets always produced as initiator and is also mapped to
5592	 * the mgmt_auth_failed event */
5593	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5594		mgmt_auth_failed(conn, ev->status);
5595
5596	hci_conn_drop(conn);
5597
5598unlock:
5599	hci_dev_unlock(hdev);
5600}
5601
5602static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5603					 struct sk_buff *skb)
5604{
5605	struct hci_ev_remote_host_features *ev = data;
5606	struct inquiry_entry *ie;
5607	struct hci_conn *conn;
5608
5609	bt_dev_dbg(hdev, "");
5610
5611	hci_dev_lock(hdev);
5612
5613	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5614	if (conn)
5615		memcpy(conn->features[1], ev->features, 8);
5616
5617	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5618	if (ie)
5619		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5620
5621	hci_dev_unlock(hdev);
5622}
5623
5624static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5625					    struct sk_buff *skb)
5626{
5627	struct hci_ev_remote_oob_data_request *ev = edata;
5628	struct oob_data *data;
5629
5630	bt_dev_dbg(hdev, "");
5631
5632	hci_dev_lock(hdev);
5633
5634	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5635		goto unlock;
5636
5637	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5638	if (!data) {
5639		struct hci_cp_remote_oob_data_neg_reply cp;
5640
5641		bacpy(&cp.bdaddr, &ev->bdaddr);
5642		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5643			     sizeof(cp), &cp);
5644		goto unlock;
5645	}
5646
5647	if (bredr_sc_enabled(hdev)) {
5648		struct hci_cp_remote_oob_ext_data_reply cp;
5649
5650		bacpy(&cp.bdaddr, &ev->bdaddr);
5651		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5652			memset(cp.hash192, 0, sizeof(cp.hash192));
5653			memset(cp.rand192, 0, sizeof(cp.rand192));
5654		} else {
5655			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5656			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5657		}
5658		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5659		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5660
5661		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5662			     sizeof(cp), &cp);
5663	} else {
5664		struct hci_cp_remote_oob_data_reply cp;
5665
5666		bacpy(&cp.bdaddr, &ev->bdaddr);
5667		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5668		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5669
5670		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5671			     sizeof(cp), &cp);
5672	}
5673
5674unlock:
5675	hci_dev_unlock(hdev);
5676}
5677
5678#if IS_ENABLED(CONFIG_BT_HS)
5679static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5680				  struct sk_buff *skb)
5681{
5682	struct hci_ev_channel_selected *ev = data;
5683	struct hci_conn *hcon;
5684
5685	bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
 
 
5686
5687	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5688	if (!hcon)
5689		return;
5690
5691	amp_read_loc_assoc_final_data(hdev, hcon);
5692}
5693
5694static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5695				      struct sk_buff *skb)
5696{
5697	struct hci_ev_phy_link_complete *ev = data;
5698	struct hci_conn *hcon, *bredr_hcon;
5699
5700	bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5701		   ev->status);
5702
5703	hci_dev_lock(hdev);
5704
5705	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5706	if (!hcon)
5707		goto unlock;
5708
5709	if (!hcon->amp_mgr)
5710		goto unlock;
5711
5712	if (ev->status) {
5713		hci_conn_del(hcon);
5714		goto unlock;
 
5715	}
5716
5717	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5718
5719	hcon->state = BT_CONNECTED;
5720	bacpy(&hcon->dst, &bredr_hcon->dst);
5721
5722	hci_conn_hold(hcon);
5723	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5724	hci_conn_drop(hcon);
5725
5726	hci_debugfs_create_conn(hcon);
5727	hci_conn_add_sysfs(hcon);
5728
5729	amp_physical_cfm(bredr_hcon, hcon);
5730
5731unlock:
5732	hci_dev_unlock(hdev);
5733}
5734
5735static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5736				     struct sk_buff *skb)
5737{
5738	struct hci_ev_logical_link_complete *ev = data;
5739	struct hci_conn *hcon;
5740	struct hci_chan *hchan;
5741	struct amp_mgr *mgr;
5742
5743	bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5744		   le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
 
5745
5746	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5747	if (!hcon)
5748		return;
5749
5750	/* Create AMP hchan */
5751	hchan = hci_chan_create(hcon);
5752	if (!hchan)
5753		return;
5754
5755	hchan->handle = le16_to_cpu(ev->handle);
5756	hchan->amp = true;
5757
5758	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5759
5760	mgr = hcon->amp_mgr;
5761	if (mgr && mgr->bredr_chan) {
5762		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5763
5764		l2cap_chan_lock(bredr_chan);
5765
5766		bredr_chan->conn->mtu = hdev->block_mtu;
5767		l2cap_logical_cfm(bredr_chan, hchan, 0);
5768		hci_conn_hold(hcon);
5769
5770		l2cap_chan_unlock(bredr_chan);
5771	}
5772}
5773
5774static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5775					     struct sk_buff *skb)
5776{
5777	struct hci_ev_disconn_logical_link_complete *ev = data;
5778	struct hci_chan *hchan;
5779
5780	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5781		   le16_to_cpu(ev->handle), ev->status);
5782
5783	if (ev->status)
5784		return;
5785
5786	hci_dev_lock(hdev);
5787
5788	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5789	if (!hchan || !hchan->amp)
5790		goto unlock;
5791
5792	amp_destroy_logical_link(hchan, ev->reason);
5793
5794unlock:
5795	hci_dev_unlock(hdev);
5796}
5797
5798static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5799					     struct sk_buff *skb)
5800{
5801	struct hci_ev_disconn_phy_link_complete *ev = data;
5802	struct hci_conn *hcon;
5803
5804	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5805
5806	if (ev->status)
5807		return;
5808
5809	hci_dev_lock(hdev);
5810
5811	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5812	if (hcon && hcon->type == AMP_LINK) {
5813		hcon->state = BT_CLOSED;
5814		hci_disconn_cfm(hcon, ev->reason);
5815		hci_conn_del(hcon);
5816	}
5817
5818	hci_dev_unlock(hdev);
5819}
5820#endif
5821
5822static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5823				u8 bdaddr_type, bdaddr_t *local_rpa)
5824{
5825	if (conn->out) {
5826		conn->dst_type = bdaddr_type;
5827		conn->resp_addr_type = bdaddr_type;
5828		bacpy(&conn->resp_addr, bdaddr);
5829
5830		/* Check if the controller has set a Local RPA then it must be
5831		 * used instead or hdev->rpa.
5832		 */
5833		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5834			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5835			bacpy(&conn->init_addr, local_rpa);
5836		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5837			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5838			bacpy(&conn->init_addr, &conn->hdev->rpa);
5839		} else {
5840			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5841						  &conn->init_addr_type);
5842		}
5843	} else {
5844		conn->resp_addr_type = conn->hdev->adv_addr_type;
5845		/* Check if the controller has set a Local RPA then it must be
5846		 * used instead or hdev->rpa.
5847		 */
5848		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5849			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5850			bacpy(&conn->resp_addr, local_rpa);
5851		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5852			/* In case of ext adv, resp_addr will be updated in
5853			 * Adv Terminated event.
5854			 */
5855			if (!ext_adv_capable(conn->hdev))
5856				bacpy(&conn->resp_addr,
5857				      &conn->hdev->random_addr);
5858		} else {
5859			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5860		}
5861
5862		conn->init_addr_type = bdaddr_type;
5863		bacpy(&conn->init_addr, bdaddr);
5864
5865		/* For incoming connections, set the default minimum
5866		 * and maximum connection interval. They will be used
5867		 * to check if the parameters are in range and if not
5868		 * trigger the connection update procedure.
5869		 */
5870		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5871		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5872	}
5873}
5874
5875static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5876				 bdaddr_t *bdaddr, u8 bdaddr_type,
5877				 bdaddr_t *local_rpa, u8 role, u16 handle,
5878				 u16 interval, u16 latency,
5879				 u16 supervision_timeout)
5880{
 
5881	struct hci_conn_params *params;
5882	struct hci_conn *conn;
5883	struct smp_irk *irk;
5884	u8 addr_type;
5885
 
 
5886	hci_dev_lock(hdev);
5887
5888	/* All controllers implicitly stop advertising in the event of a
5889	 * connection, so ensure that the state bit is cleared.
5890	 */
5891	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5892
5893	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5894	if (!conn) {
5895		/* In case of error status and there is no connection pending
5896		 * just unlock as there is nothing to cleanup.
5897		 */
5898		if (status)
5899			goto unlock;
5900
5901		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5902		if (!conn) {
5903			bt_dev_err(hdev, "no memory for new connection");
5904			goto unlock;
5905		}
5906
5907		conn->dst_type = bdaddr_type;
5908
5909		/* If we didn't have a hci_conn object previously
5910		 * but we're in central role this must be something
5911		 * initiated using an accept list. Since accept list based
5912		 * connections are not "first class citizens" we don't
5913		 * have full tracking of them. Therefore, we go ahead
5914		 * with a "best effort" approach of determining the
5915		 * initiator address based on the HCI_PRIVACY flag.
5916		 */
5917		if (conn->out) {
5918			conn->resp_addr_type = bdaddr_type;
5919			bacpy(&conn->resp_addr, bdaddr);
5920			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5921				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5922				bacpy(&conn->init_addr, &hdev->rpa);
5923			} else {
5924				hci_copy_identity_address(hdev,
5925							  &conn->init_addr,
5926							  &conn->init_addr_type);
5927			}
5928		}
5929	} else {
5930		cancel_delayed_work(&conn->le_conn_timeout);
5931	}
5932
5933	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5934	 * Processing it more than once per connection can corrupt kernel memory.
5935	 *
5936	 * As the connection handle is set here for the first time, it indicates
5937	 * whether the connection is already set up.
5938	 */
5939	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5940		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5941		goto unlock;
5942	}
5943
5944	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
 
 
 
 
 
 
 
 
 
 
5945
5946	/* Lookup the identity address from the stored connection
5947	 * address and address type.
5948	 *
5949	 * When establishing connections to an identity address, the
5950	 * connection procedure will store the resolvable random
5951	 * address first. Now if it can be converted back into the
5952	 * identity address, start using the identity address from
5953	 * now on.
5954	 */
5955	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5956	if (irk) {
5957		bacpy(&conn->dst, &irk->bdaddr);
5958		conn->dst_type = irk->addr_type;
5959	}
5960
5961	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5962
5963	/* All connection failure handling is taken care of by the
5964	 * hci_conn_failed function which is triggered by the HCI
5965	 * request completion callbacks used for connecting.
5966	 */
5967	if (status || hci_conn_set_handle(conn, handle))
5968		goto unlock;
5969
5970	/* Drop the connection if it has been aborted */
5971	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5972		hci_conn_drop(conn);
5973		goto unlock;
5974	}
5975
5976	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5977		addr_type = BDADDR_LE_PUBLIC;
5978	else
5979		addr_type = BDADDR_LE_RANDOM;
5980
5981	/* Drop the connection if the device is blocked */
5982	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5983		hci_conn_drop(conn);
5984		goto unlock;
5985	}
5986
5987	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5988		mgmt_device_connected(hdev, conn, NULL, 0);
5989
5990	conn->sec_level = BT_SECURITY_LOW;
 
5991	conn->state = BT_CONFIG;
5992
5993	/* Store current advertising instance as connection advertising instance
5994	 * when sotfware rotation is in use so it can be re-enabled when
5995	 * disconnected.
5996	 */
5997	if (!ext_adv_capable(hdev))
5998		conn->adv_instance = hdev->cur_adv_instance;
5999
6000	conn->le_conn_interval = interval;
6001	conn->le_conn_latency = latency;
6002	conn->le_supv_timeout = supervision_timeout;
6003
6004	hci_debugfs_create_conn(conn);
6005	hci_conn_add_sysfs(conn);
6006
6007	/* The remote features procedure is defined for central
6008	 * role only. So only in case of an initiated connection
6009	 * request the remote features.
6010	 *
6011	 * If the local controller supports peripheral-initiated features
6012	 * exchange, then requesting the remote features in peripheral
6013	 * role is possible. Otherwise just transition into the
6014	 * connected state without requesting the remote features.
6015	 */
6016	if (conn->out ||
6017	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6018		struct hci_cp_le_read_remote_features cp;
 
6019
6020		cp.handle = __cpu_to_le16(conn->handle);
6021
6022		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6023			     sizeof(cp), &cp);
6024
6025		hci_conn_hold(conn);
 
 
 
 
6026	} else {
6027		conn->state = BT_CONNECTED;
6028		hci_connect_cfm(conn, status);
6029	}
6030
6031	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6032					   conn->dst_type);
6033	if (params) {
6034		hci_pend_le_list_del_init(params);
6035		if (params->conn) {
6036			hci_conn_drop(params->conn);
6037			hci_conn_put(params->conn);
6038			params->conn = NULL;
6039		}
6040	}
6041
6042unlock:
6043	hci_update_passive_scan(hdev);
6044	hci_dev_unlock(hdev);
6045}
6046
6047static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6048				     struct sk_buff *skb)
6049{
6050	struct hci_ev_le_conn_complete *ev = data;
6051
6052	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6053
6054	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6055			     NULL, ev->role, le16_to_cpu(ev->handle),
6056			     le16_to_cpu(ev->interval),
6057			     le16_to_cpu(ev->latency),
6058			     le16_to_cpu(ev->supervision_timeout));
6059}
6060
6061static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6062					 struct sk_buff *skb)
6063{
6064	struct hci_ev_le_enh_conn_complete *ev = data;
6065
6066	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6067
6068	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6069			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6070			     le16_to_cpu(ev->interval),
6071			     le16_to_cpu(ev->latency),
6072			     le16_to_cpu(ev->supervision_timeout));
6073}
6074
6075static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6076				    struct sk_buff *skb)
6077{
6078	struct hci_evt_le_ext_adv_set_term *ev = data;
6079	struct hci_conn *conn;
6080	struct adv_info *adv, *n;
6081
6082	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6083
6084	/* The Bluetooth Core 5.3 specification clearly states that this event
6085	 * shall not be sent when the Host disables the advertising set. So in
6086	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6087	 *
6088	 * When the Host disables an advertising set, all cleanup is done via
6089	 * its command callback and not needed to be duplicated here.
6090	 */
6091	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6092		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6093		return;
6094	}
6095
6096	hci_dev_lock(hdev);
6097
6098	adv = hci_find_adv_instance(hdev, ev->handle);
6099
6100	if (ev->status) {
6101		if (!adv)
6102			goto unlock;
6103
6104		/* Remove advertising as it has been terminated */
6105		hci_remove_adv_instance(hdev, ev->handle);
6106		mgmt_advertising_removed(NULL, hdev, ev->handle);
6107
6108		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6109			if (adv->enabled)
6110				goto unlock;
6111		}
6112
6113		/* We are no longer advertising, clear HCI_LE_ADV */
6114		hci_dev_clear_flag(hdev, HCI_LE_ADV);
6115		goto unlock;
6116	}
6117
6118	if (adv)
6119		adv->enabled = false;
6120
6121	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6122	if (conn) {
6123		/* Store handle in the connection so the correct advertising
6124		 * instance can be re-enabled when disconnected.
6125		 */
6126		conn->adv_instance = ev->handle;
6127
6128		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6129		    bacmp(&conn->resp_addr, BDADDR_ANY))
6130			goto unlock;
6131
6132		if (!ev->handle) {
6133			bacpy(&conn->resp_addr, &hdev->random_addr);
6134			goto unlock;
6135		}
6136
6137		if (adv)
6138			bacpy(&conn->resp_addr, &adv->random_addr);
6139	}
6140
6141unlock:
6142	hci_dev_unlock(hdev);
6143}
6144
6145static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6146					    struct sk_buff *skb)
6147{
6148	struct hci_ev_le_conn_update_complete *ev = data;
6149	struct hci_conn *conn;
6150
6151	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6152
6153	if (ev->status)
6154		return;
6155
6156	hci_dev_lock(hdev);
6157
6158	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6159	if (conn) {
6160		conn->le_conn_interval = le16_to_cpu(ev->interval);
6161		conn->le_conn_latency = le16_to_cpu(ev->latency);
6162		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6163	}
6164
6165	hci_dev_unlock(hdev);
6166}
6167
6168/* This function requires the caller holds hdev->lock */
6169static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6170					      bdaddr_t *addr,
6171					      u8 addr_type, bool addr_resolved,
6172					      u8 adv_type)
6173{
6174	struct hci_conn *conn;
6175	struct hci_conn_params *params;
6176
6177	/* If the event is not connectable don't proceed further */
6178	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6179		return NULL;
6180
6181	/* Ignore if the device is blocked or hdev is suspended */
6182	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6183	    hdev->suspended)
6184		return NULL;
6185
6186	/* Most controller will fail if we try to create new connections
6187	 * while we have an existing one in peripheral role.
6188	 */
6189	if (hdev->conn_hash.le_num_peripheral > 0 &&
6190	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6191	     !(hdev->le_states[3] & 0x10)))
6192		return NULL;
6193
6194	/* If we're not connectable only connect devices that we have in
6195	 * our pend_le_conns list.
6196	 */
6197	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6198					   addr_type);
6199	if (!params)
6200		return NULL;
6201
6202	if (!params->explicit_connect) {
6203		switch (params->auto_connect) {
6204		case HCI_AUTO_CONN_DIRECT:
6205			/* Only devices advertising with ADV_DIRECT_IND are
6206			 * triggering a connection attempt. This is allowing
6207			 * incoming connections from peripheral devices.
6208			 */
6209			if (adv_type != LE_ADV_DIRECT_IND)
6210				return NULL;
6211			break;
6212		case HCI_AUTO_CONN_ALWAYS:
6213			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
6214			 * are triggering a connection attempt. This means
6215			 * that incoming connections from peripheral device are
6216			 * accepted and also outgoing connections to peripheral
6217			 * devices are established when found.
6218			 */
6219			break;
6220		default:
6221			return NULL;
6222		}
6223	}
6224
6225	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6226			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6227			      HCI_ROLE_MASTER);
6228	if (!IS_ERR(conn)) {
6229		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6230		 * by higher layer that tried to connect, if no then
6231		 * store the pointer since we don't really have any
6232		 * other owner of the object besides the params that
6233		 * triggered it. This way we can abort the connection if
6234		 * the parameters get removed and keep the reference
6235		 * count consistent once the connection is established.
6236		 */
6237
6238		if (!params->explicit_connect)
6239			params->conn = hci_conn_get(conn);
6240
6241		return conn;
6242	}
6243
6244	switch (PTR_ERR(conn)) {
6245	case -EBUSY:
6246		/* If hci_connect() returns -EBUSY it means there is already
6247		 * an LE connection attempt going on. Since controllers don't
6248		 * support more than one connection attempt at the time, we
6249		 * don't consider this an error case.
6250		 */
6251		break;
6252	default:
6253		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6254		return NULL;
6255	}
6256
6257	return NULL;
6258}
6259
6260static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6261			       u8 bdaddr_type, bdaddr_t *direct_addr,
6262			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6263			       bool ext_adv, bool ctl_time, u64 instant)
6264{
6265	struct discovery_state *d = &hdev->discovery;
6266	struct smp_irk *irk;
6267	struct hci_conn *conn;
6268	bool match, bdaddr_resolved;
6269	u32 flags;
6270	u8 *ptr;
6271
6272	switch (type) {
6273	case LE_ADV_IND:
6274	case LE_ADV_DIRECT_IND:
6275	case LE_ADV_SCAN_IND:
6276	case LE_ADV_NONCONN_IND:
6277	case LE_ADV_SCAN_RSP:
6278		break;
6279	default:
6280		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6281				       "type: 0x%02x", type);
6282		return;
6283	}
6284
6285	if (len > max_adv_len(hdev)) {
6286		bt_dev_err_ratelimited(hdev,
6287				       "adv larger than maximum supported");
6288		return;
6289	}
6290
6291	/* Find the end of the data in case the report contains padded zero
6292	 * bytes at the end causing an invalid length value.
6293	 *
6294	 * When data is NULL, len is 0 so there is no need for extra ptr
6295	 * check as 'ptr < data + 0' is already false in such case.
6296	 */
6297	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6298		if (ptr + 1 + *ptr > data + len)
6299			break;
6300	}
6301
6302	/* Adjust for actual length. This handles the case when remote
6303	 * device is advertising with incorrect data length.
6304	 */
6305	len = ptr - data;
 
 
 
6306
6307	/* If the direct address is present, then this report is from
6308	 * a LE Direct Advertising Report event. In that case it is
6309	 * important to see if the address is matching the local
6310	 * controller address.
6311	 */
6312	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6313		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6314						  &bdaddr_resolved);
6315
6316		/* Only resolvable random addresses are valid for these
6317		 * kind of reports and others can be ignored.
6318		 */
6319		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6320			return;
6321
6322		/* If the controller is not using resolvable random
6323		 * addresses, then this report can be ignored.
6324		 */
6325		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6326			return;
6327
6328		/* If the local IRK of the controller does not match
6329		 * with the resolvable random address provided, then
6330		 * this report can be ignored.
6331		 */
6332		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6333			return;
6334	}
6335
6336	/* Check if we need to convert to identity address */
6337	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6338	if (irk) {
6339		bdaddr = &irk->bdaddr;
6340		bdaddr_type = irk->addr_type;
6341	}
6342
6343	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6344
6345	/* Check if we have been requested to connect to this device.
6346	 *
6347	 * direct_addr is set only for directed advertising reports (it is NULL
6348	 * for advertising reports) and is already verified to be RPA above.
6349	 */
6350	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6351				     type);
6352	if (!ext_adv && conn && type == LE_ADV_IND &&
6353	    len <= max_adv_len(hdev)) {
6354		/* Store report for later inclusion by
6355		 * mgmt_device_connected
6356		 */
6357		memcpy(conn->le_adv_data, data, len);
6358		conn->le_adv_data_len = len;
6359	}
6360
6361	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6362		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6363	else
6364		flags = 0;
6365
6366	/* All scan results should be sent up for Mesh systems */
6367	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6368		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6369				  rssi, flags, data, len, NULL, 0, instant);
6370		return;
6371	}
6372
6373	/* Passive scanning shouldn't trigger any device found events,
6374	 * except for devices marked as CONN_REPORT for which we do send
6375	 * device found events, or advertisement monitoring requested.
6376	 */
6377	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6378		if (type == LE_ADV_DIRECT_IND)
6379			return;
6380
6381		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6382					       bdaddr, bdaddr_type) &&
6383		    idr_is_empty(&hdev->adv_monitors_idr))
6384			return;
6385
 
 
 
 
6386		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6387				  rssi, flags, data, len, NULL, 0, 0);
6388		return;
6389	}
6390
6391	/* When receiving a scan response, then there is no way to
 
 
 
 
 
6392	 * know if the remote device is connectable or not. However
6393	 * since scan responses are merged with a previously seen
6394	 * advertising report, the flags field from that report
6395	 * will be used.
6396	 *
6397	 * In the unlikely case that a controller just sends a scan
6398	 * response event that doesn't match the pending report, then
6399	 * it is marked as a standalone SCAN_RSP.
6400	 */
6401	if (type == LE_ADV_SCAN_RSP)
6402		flags = MGMT_DEV_FOUND_SCAN_RSP;
 
 
 
6403
6404	/* If there's nothing pending either store the data from this
6405	 * event or send an immediate device found event if the data
6406	 * should not be stored for later.
6407	 */
6408	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6409		/* If the report will trigger a SCAN_REQ store it for
6410		 * later merging.
6411		 */
6412		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6413			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6414						 rssi, flags, data, len);
6415			return;
6416		}
6417
6418		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6419				  rssi, flags, data, len, NULL, 0, 0);
6420		return;
6421	}
6422
6423	/* Check if the pending report is for the same device as the new one */
6424	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6425		 bdaddr_type == d->last_adv_addr_type);
6426
6427	/* If the pending data doesn't match this report or this isn't a
6428	 * scan response (e.g. we got a duplicate ADV_IND) then force
6429	 * sending of the pending data.
6430	 */
6431	if (type != LE_ADV_SCAN_RSP || !match) {
6432		/* Send out whatever is in the cache, but skip duplicates */
6433		if (!match)
6434			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6435					  d->last_adv_addr_type, NULL,
6436					  d->last_adv_rssi, d->last_adv_flags,
6437					  d->last_adv_data,
6438					  d->last_adv_data_len, NULL, 0, 0);
6439
6440		/* If the new report will trigger a SCAN_REQ store it for
6441		 * later merging.
6442		 */
6443		if (!ext_adv && (type == LE_ADV_IND ||
6444				 type == LE_ADV_SCAN_IND)) {
6445			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6446						 rssi, flags, data, len);
6447			return;
6448		}
6449
6450		/* The advertising reports cannot be merged, so clear
6451		 * the pending report and send out a device found event.
6452		 */
6453		clear_pending_adv_report(hdev);
6454		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6455				  rssi, flags, data, len, NULL, 0, 0);
6456		return;
6457	}
6458
6459	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6460	 * the new event is a SCAN_RSP. We can therefore proceed with
6461	 * sending a merged device found event.
6462	 */
6463	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6464			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6465			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6466	clear_pending_adv_report(hdev);
6467}
6468
6469static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6470				  struct sk_buff *skb)
6471{
6472	struct hci_ev_le_advertising_report *ev = data;
6473	u64 instant = jiffies;
6474
6475	if (!ev->num)
6476		return;
6477
6478	hci_dev_lock(hdev);
6479
6480	while (ev->num--) {
6481		struct hci_ev_le_advertising_info *info;
6482		s8 rssi;
6483
6484		info = hci_le_ev_skb_pull(hdev, skb,
6485					  HCI_EV_LE_ADVERTISING_REPORT,
6486					  sizeof(*info));
6487		if (!info)
6488			break;
6489
6490		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6491					info->length + 1))
6492			break;
6493
6494		if (info->length <= max_adv_len(hdev)) {
6495			rssi = info->data[info->length];
6496			process_adv_report(hdev, info->type, &info->bdaddr,
6497					   info->bdaddr_type, NULL, 0, rssi,
6498					   info->data, info->length, false,
6499					   false, instant);
6500		} else {
6501			bt_dev_err(hdev, "Dropping invalid advertising data");
6502		}
6503	}
6504
6505	hci_dev_unlock(hdev);
6506}
6507
6508static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6509{
6510	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6511		switch (evt_type) {
6512		case LE_LEGACY_ADV_IND:
6513			return LE_ADV_IND;
6514		case LE_LEGACY_ADV_DIRECT_IND:
6515			return LE_ADV_DIRECT_IND;
6516		case LE_LEGACY_ADV_SCAN_IND:
6517			return LE_ADV_SCAN_IND;
6518		case LE_LEGACY_NONCONN_IND:
6519			return LE_ADV_NONCONN_IND;
6520		case LE_LEGACY_SCAN_RSP_ADV:
6521		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6522			return LE_ADV_SCAN_RSP;
6523		}
6524
6525		goto invalid;
6526	}
6527
6528	if (evt_type & LE_EXT_ADV_CONN_IND) {
6529		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6530			return LE_ADV_DIRECT_IND;
6531
6532		return LE_ADV_IND;
6533	}
6534
6535	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6536		return LE_ADV_SCAN_RSP;
6537
6538	if (evt_type & LE_EXT_ADV_SCAN_IND)
6539		return LE_ADV_SCAN_IND;
6540
6541	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6542	    evt_type & LE_EXT_ADV_DIRECT_IND)
6543		return LE_ADV_NONCONN_IND;
6544
6545invalid:
6546	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6547			       evt_type);
6548
6549	return LE_ADV_INVALID;
6550}
6551
6552static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6553				      struct sk_buff *skb)
6554{
6555	struct hci_ev_le_ext_adv_report *ev = data;
6556	u64 instant = jiffies;
6557
6558	if (!ev->num)
6559		return;
6560
6561	hci_dev_lock(hdev);
6562
6563	while (ev->num--) {
6564		struct hci_ev_le_ext_adv_info *info;
6565		u8 legacy_evt_type;
6566		u16 evt_type;
6567
6568		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6569					  sizeof(*info));
6570		if (!info)
6571			break;
6572
6573		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6574					info->length))
6575			break;
6576
6577		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6578		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6579		if (legacy_evt_type != LE_ADV_INVALID) {
6580			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6581					   info->bdaddr_type, NULL, 0,
6582					   info->rssi, info->data, info->length,
6583					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6584					   false, instant);
6585		}
6586	}
6587
6588	hci_dev_unlock(hdev);
6589}
6590
6591static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6592{
6593	struct hci_cp_le_pa_term_sync cp;
6594
6595	memset(&cp, 0, sizeof(cp));
6596	cp.handle = handle;
6597
6598	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6599}
6600
6601static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6602					    struct sk_buff *skb)
6603{
6604	struct hci_ev_le_pa_sync_established *ev = data;
6605	int mask = hdev->link_mode;
6606	__u8 flags = 0;
6607	struct hci_conn *pa_sync;
6608
6609	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6610
6611	hci_dev_lock(hdev);
6612
6613	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6614
6615	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6616	if (!(mask & HCI_LM_ACCEPT)) {
6617		hci_le_pa_term_sync(hdev, ev->handle);
6618		goto unlock;
6619	}
6620
6621	if (!(flags & HCI_PROTO_DEFER))
6622		goto unlock;
6623
6624	if (ev->status) {
6625		/* Add connection to indicate the failed PA sync event */
6626		pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6627					     HCI_ROLE_SLAVE);
6628
6629		if (!pa_sync)
6630			goto unlock;
6631
6632		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6633
6634		/* Notify iso layer */
6635		hci_connect_cfm(pa_sync, ev->status);
6636	}
6637
6638unlock:
6639	hci_dev_unlock(hdev);
6640}
6641
6642static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6643				      struct sk_buff *skb)
6644{
6645	struct hci_ev_le_per_adv_report *ev = data;
6646	int mask = hdev->link_mode;
6647	__u8 flags = 0;
6648
6649	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6650
6651	hci_dev_lock(hdev);
6652
6653	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6654	if (!(mask & HCI_LM_ACCEPT))
6655		hci_le_pa_term_sync(hdev, ev->sync_handle);
6656
6657	hci_dev_unlock(hdev);
6658}
6659
6660static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6661					    struct sk_buff *skb)
6662{
6663	struct hci_ev_le_remote_feat_complete *ev = data;
6664	struct hci_conn *conn;
6665
6666	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6667
6668	hci_dev_lock(hdev);
6669
6670	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6671	if (conn) {
6672		if (!ev->status)
6673			memcpy(conn->features[0], ev->features, 8);
6674
6675		if (conn->state == BT_CONFIG) {
6676			__u8 status;
6677
6678			/* If the local controller supports peripheral-initiated
6679			 * features exchange, but the remote controller does
6680			 * not, then it is possible that the error code 0x1a
6681			 * for unsupported remote feature gets returned.
6682			 *
6683			 * In this specific case, allow the connection to
6684			 * transition into connected state and mark it as
6685			 * successful.
6686			 */
6687			if (!conn->out && ev->status == 0x1a &&
6688			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6689				status = 0x00;
6690			else
6691				status = ev->status;
6692
6693			conn->state = BT_CONNECTED;
6694			hci_connect_cfm(conn, status);
6695			hci_conn_drop(conn);
6696		}
6697	}
6698
6699	hci_dev_unlock(hdev);
6700}
6701
6702static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6703				   struct sk_buff *skb)
6704{
6705	struct hci_ev_le_ltk_req *ev = data;
6706	struct hci_cp_le_ltk_reply cp;
6707	struct hci_cp_le_ltk_neg_reply neg;
6708	struct hci_conn *conn;
6709	struct smp_ltk *ltk;
6710
6711	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6712
6713	hci_dev_lock(hdev);
6714
6715	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6716	if (conn == NULL)
6717		goto not_found;
6718
6719	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6720	if (!ltk)
6721		goto not_found;
6722
6723	if (smp_ltk_is_sc(ltk)) {
6724		/* With SC both EDiv and Rand are set to zero */
6725		if (ev->ediv || ev->rand)
6726			goto not_found;
6727	} else {
6728		/* For non-SC keys check that EDiv and Rand match */
6729		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6730			goto not_found;
6731	}
6732
6733	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6734	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6735	cp.handle = cpu_to_le16(conn->handle);
6736
6737	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6738
6739	conn->enc_key_size = ltk->enc_size;
6740
6741	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6742
6743	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6744	 * temporary key used to encrypt a connection following
6745	 * pairing. It is used during the Encrypted Session Setup to
6746	 * distribute the keys. Later, security can be re-established
6747	 * using a distributed LTK.
6748	 */
6749	if (ltk->type == SMP_STK) {
6750		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6751		list_del_rcu(&ltk->list);
6752		kfree_rcu(ltk, rcu);
6753	} else {
6754		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6755	}
6756
6757	hci_dev_unlock(hdev);
6758
6759	return;
6760
6761not_found:
6762	neg.handle = ev->handle;
6763	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6764	hci_dev_unlock(hdev);
6765}
6766
6767static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6768				      u8 reason)
6769{
6770	struct hci_cp_le_conn_param_req_neg_reply cp;
6771
6772	cp.handle = cpu_to_le16(handle);
6773	cp.reason = reason;
6774
6775	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6776		     &cp);
6777}
6778
6779static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6780					     struct sk_buff *skb)
6781{
6782	struct hci_ev_le_remote_conn_param_req *ev = data;
6783	struct hci_cp_le_conn_param_req_reply cp;
6784	struct hci_conn *hcon;
6785	u16 handle, min, max, latency, timeout;
6786
6787	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6788
6789	handle = le16_to_cpu(ev->handle);
6790	min = le16_to_cpu(ev->interval_min);
6791	max = le16_to_cpu(ev->interval_max);
6792	latency = le16_to_cpu(ev->latency);
6793	timeout = le16_to_cpu(ev->timeout);
6794
6795	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6796	if (!hcon || hcon->state != BT_CONNECTED)
6797		return send_conn_param_neg_reply(hdev, handle,
6798						 HCI_ERROR_UNKNOWN_CONN_ID);
6799
6800	if (max > hcon->le_conn_max_interval)
6801		return send_conn_param_neg_reply(hdev, handle,
6802						 HCI_ERROR_INVALID_LL_PARAMS);
6803
6804	if (hci_check_conn_params(min, max, latency, timeout))
6805		return send_conn_param_neg_reply(hdev, handle,
6806						 HCI_ERROR_INVALID_LL_PARAMS);
6807
6808	if (hcon->role == HCI_ROLE_MASTER) {
6809		struct hci_conn_params *params;
6810		u8 store_hint;
6811
6812		hci_dev_lock(hdev);
6813
6814		params = hci_conn_params_lookup(hdev, &hcon->dst,
6815						hcon->dst_type);
6816		if (params) {
6817			params->conn_min_interval = min;
6818			params->conn_max_interval = max;
6819			params->conn_latency = latency;
6820			params->supervision_timeout = timeout;
6821			store_hint = 0x01;
6822		} else {
6823			store_hint = 0x00;
6824		}
6825
6826		hci_dev_unlock(hdev);
6827
6828		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6829				    store_hint, min, max, latency, timeout);
6830	}
6831
6832	cp.handle = ev->handle;
6833	cp.interval_min = ev->interval_min;
6834	cp.interval_max = ev->interval_max;
6835	cp.latency = ev->latency;
6836	cp.timeout = ev->timeout;
6837	cp.min_ce_len = 0;
6838	cp.max_ce_len = 0;
6839
6840	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6841}
6842
6843static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6844					 struct sk_buff *skb)
6845{
6846	struct hci_ev_le_direct_adv_report *ev = data;
6847	u64 instant = jiffies;
6848	int i;
6849
6850	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6851				flex_array_size(ev, info, ev->num)))
6852		return;
6853
6854	if (!ev->num)
6855		return;
6856
6857	hci_dev_lock(hdev);
6858
6859	for (i = 0; i < ev->num; i++) {
6860		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
 
 
 
 
6861
6862		process_adv_report(hdev, info->type, &info->bdaddr,
6863				   info->bdaddr_type, &info->direct_addr,
6864				   info->direct_addr_type, info->rssi, NULL, 0,
6865				   false, false, instant);
6866	}
6867
6868	hci_dev_unlock(hdev);
6869}
6870
6871static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6872				  struct sk_buff *skb)
6873{
6874	struct hci_ev_le_phy_update_complete *ev = data;
6875	struct hci_conn *conn;
6876
6877	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6878
6879	if (ev->status)
6880		return;
6881
6882	hci_dev_lock(hdev);
6883
6884	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6885	if (!conn)
6886		goto unlock;
6887
6888	conn->le_tx_phy = ev->tx_phy;
6889	conn->le_rx_phy = ev->rx_phy;
6890
6891unlock:
6892	hci_dev_unlock(hdev);
6893}
6894
6895static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6896					struct sk_buff *skb)
6897{
6898	struct hci_evt_le_cis_established *ev = data;
6899	struct hci_conn *conn;
6900	struct bt_iso_qos *qos;
6901	bool pending = false;
6902	u16 handle = __le16_to_cpu(ev->handle);
6903
6904	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6905
6906	hci_dev_lock(hdev);
 
 
6907
6908	conn = hci_conn_hash_lookup_handle(hdev, handle);
6909	if (!conn) {
6910		bt_dev_err(hdev,
6911			   "Unable to find connection with handle 0x%4.4x",
6912			   handle);
6913		goto unlock;
6914	}
6915
6916	if (conn->type != ISO_LINK) {
6917		bt_dev_err(hdev,
6918			   "Invalid connection link type handle 0x%4.4x",
6919			   handle);
6920		goto unlock;
6921	}
6922
6923	qos = &conn->iso_qos;
 
 
6924
6925	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
 
 
6926
6927	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6928	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6929	qos->ucast.out.interval = qos->ucast.in.interval;
6930
6931	switch (conn->role) {
6932	case HCI_ROLE_SLAVE:
6933		/* Convert Transport Latency (us) to Latency (msec) */
6934		qos->ucast.in.latency =
6935			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6936					  1000);
6937		qos->ucast.out.latency =
6938			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6939					  1000);
6940		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6941		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6942		qos->ucast.in.phy = ev->c_phy;
6943		qos->ucast.out.phy = ev->p_phy;
6944		break;
6945	case HCI_ROLE_MASTER:
6946		/* Convert Transport Latency (us) to Latency (msec) */
6947		qos->ucast.out.latency =
6948			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6949					  1000);
6950		qos->ucast.in.latency =
6951			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6952					  1000);
6953		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6954		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6955		qos->ucast.out.phy = ev->c_phy;
6956		qos->ucast.in.phy = ev->p_phy;
6957		break;
6958	}
6959
6960	if (!ev->status) {
6961		conn->state = BT_CONNECTED;
6962		hci_debugfs_create_conn(conn);
6963		hci_conn_add_sysfs(conn);
6964		hci_iso_setup_path(conn);
6965		goto unlock;
6966	}
6967
6968	conn->state = BT_CLOSED;
6969	hci_connect_cfm(conn, ev->status);
6970	hci_conn_del(conn);
6971
6972unlock:
6973	if (pending)
6974		hci_le_create_cis_pending(hdev);
6975
6976	hci_dev_unlock(hdev);
6977}
6978
6979static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6980{
6981	struct hci_cp_le_reject_cis cp;
6982
6983	memset(&cp, 0, sizeof(cp));
6984	cp.handle = handle;
6985	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6986	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6987}
6988
6989static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6990{
6991	struct hci_cp_le_accept_cis cp;
6992
6993	memset(&cp, 0, sizeof(cp));
6994	cp.handle = handle;
6995	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6996}
6997
6998static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6999			       struct sk_buff *skb)
7000{
7001	struct hci_evt_le_cis_req *ev = data;
7002	u16 acl_handle, cis_handle;
7003	struct hci_conn *acl, *cis;
7004	int mask;
7005	__u8 flags = 0;
7006
7007	acl_handle = __le16_to_cpu(ev->acl_handle);
7008	cis_handle = __le16_to_cpu(ev->cis_handle);
7009
7010	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7011		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7012
7013	hci_dev_lock(hdev);
 
 
 
7014
7015	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7016	if (!acl)
7017		goto unlock;
7018
7019	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7020	if (!(mask & HCI_LM_ACCEPT)) {
7021		hci_le_reject_cis(hdev, ev->cis_handle);
7022		goto unlock;
7023	}
7024
7025	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7026	if (!cis) {
7027		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
7028				   cis_handle);
7029		if (!cis) {
7030			hci_le_reject_cis(hdev, ev->cis_handle);
7031			goto unlock;
7032		}
7033	}
7034
7035	cis->iso_qos.ucast.cig = ev->cig_id;
7036	cis->iso_qos.ucast.cis = ev->cis_id;
7037
7038	if (!(flags & HCI_PROTO_DEFER)) {
7039		hci_le_accept_cis(hdev, ev->cis_handle);
7040	} else {
7041		cis->state = BT_CONNECT2;
7042		hci_connect_cfm(cis, 0);
7043	}
7044
7045unlock:
7046	hci_dev_unlock(hdev);
7047}
7048
7049static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7050{
7051	u8 handle = PTR_UINT(data);
7052
7053	return hci_le_terminate_big_sync(hdev, handle,
7054					 HCI_ERROR_LOCAL_HOST_TERM);
7055}
7056
7057static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7058					   struct sk_buff *skb)
7059{
7060	struct hci_evt_le_create_big_complete *ev = data;
7061	struct hci_conn *conn;
7062	__u8 i = 0;
7063
7064	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7065
7066	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7067				flex_array_size(ev, bis_handle, ev->num_bis)))
7068		return;
7069
7070	hci_dev_lock(hdev);
7071	rcu_read_lock();
7072
7073	/* Connect all BISes that are bound to the BIG */
7074	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
7075		if (bacmp(&conn->dst, BDADDR_ANY) ||
7076		    conn->type != ISO_LINK ||
7077		    conn->iso_qos.bcast.big != ev->handle)
7078			continue;
7079
7080		if (hci_conn_set_handle(conn,
7081					__le16_to_cpu(ev->bis_handle[i++])))
7082			continue;
7083
7084		if (!ev->status) {
7085			conn->state = BT_CONNECTED;
7086			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7087			rcu_read_unlock();
7088			hci_debugfs_create_conn(conn);
7089			hci_conn_add_sysfs(conn);
7090			hci_iso_setup_path(conn);
7091			rcu_read_lock();
7092			continue;
7093		}
7094
7095		hci_connect_cfm(conn, ev->status);
7096		rcu_read_unlock();
7097		hci_conn_del(conn);
7098		rcu_read_lock();
7099	}
7100
7101	rcu_read_unlock();
7102
7103	if (!ev->status && !i)
7104		/* If no BISes have been connected for the BIG,
7105		 * terminate. This is in case all bound connections
7106		 * have been closed before the BIG creation
7107		 * has completed.
7108		 */
7109		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7110				   UINT_PTR(ev->handle), NULL);
7111
7112	hci_dev_unlock(hdev);
7113}
7114
7115static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7116					    struct sk_buff *skb)
7117{
7118	struct hci_evt_le_big_sync_estabilished *ev = data;
7119	struct hci_conn *bis;
7120	int i;
7121
7122	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7123
7124	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7125				flex_array_size(ev, bis, ev->num_bis)))
7126		return;
7127
7128	hci_dev_lock(hdev);
7129
7130	for (i = 0; i < ev->num_bis; i++) {
7131		u16 handle = le16_to_cpu(ev->bis[i]);
7132		__le32 interval;
7133
7134		bis = hci_conn_hash_lookup_handle(hdev, handle);
7135		if (!bis) {
7136			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7137					   HCI_ROLE_SLAVE, handle);
7138			if (!bis)
7139				continue;
7140		}
7141
7142		if (ev->status != 0x42)
7143			/* Mark PA sync as established */
7144			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7145
7146		bis->iso_qos.bcast.big = ev->handle;
7147		memset(&interval, 0, sizeof(interval));
7148		memcpy(&interval, ev->latency, sizeof(ev->latency));
7149		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7150		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7151		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7152		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7153
7154		if (!ev->status) {
7155			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7156			hci_iso_setup_path(bis);
7157		}
 
 
7158	}
7159
7160	/* In case BIG sync failed, notify each failed connection to
7161	 * the user after all hci connections have been added
 
 
7162	 */
7163	if (ev->status)
7164		for (i = 0; i < ev->num_bis; i++) {
7165			u16 handle = le16_to_cpu(ev->bis[i]);
7166
7167			bis = hci_conn_hash_lookup_handle(hdev, handle);
7168
7169			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7170			hci_connect_cfm(bis, ev->status);
7171		}
7172
7173	hci_dev_unlock(hdev);
7174}
7175
7176static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7177					   struct sk_buff *skb)
7178{
7179	struct hci_evt_le_big_info_adv_report *ev = data;
7180	int mask = hdev->link_mode;
7181	__u8 flags = 0;
7182	struct hci_conn *pa_sync;
7183
7184	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
 
 
7185
7186	hci_dev_lock(hdev);
 
 
7187
7188	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7189	if (!(mask & HCI_LM_ACCEPT)) {
7190		hci_le_pa_term_sync(hdev, ev->sync_handle);
7191		goto unlock;
7192	}
7193
7194	if (!(flags & HCI_PROTO_DEFER))
7195		goto unlock;
 
7196
7197	pa_sync = hci_conn_hash_lookup_pa_sync_handle
7198			(hdev,
7199			le16_to_cpu(ev->sync_handle));
7200
7201	if (pa_sync)
7202		goto unlock;
 
7203
7204	/* Add connection to indicate the PA sync event */
7205	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7206				     HCI_ROLE_SLAVE);
7207
7208	if (!pa_sync)
7209		goto unlock;
 
7210
7211	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7212	set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
 
7213
7214	/* Notify iso layer */
7215	hci_connect_cfm(pa_sync, 0x00);
 
 
7216
7217unlock:
7218	hci_dev_unlock(hdev);
7219}
 
7220
7221#define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7222[_op] = { \
7223	.func = _func, \
7224	.min_len = _min_len, \
7225	.max_len = _max_len, \
7226}
7227
7228#define HCI_LE_EV(_op, _func, _len) \
7229	HCI_LE_EV_VL(_op, _func, _len, _len)
7230
7231#define HCI_LE_EV_STATUS(_op, _func) \
7232	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7233
7234/* Entries in this table shall have their position according to the subevent
7235 * opcode they handle so the use of the macros above is recommend since it does
7236 * attempt to initialize at its proper index using Designated Initializers that
7237 * way events without a callback function can be ommited.
7238 */
7239static const struct hci_le_ev {
7240	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7241	u16  min_len;
7242	u16  max_len;
7243} hci_le_ev_table[U8_MAX + 1] = {
7244	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7245	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7246		  sizeof(struct hci_ev_le_conn_complete)),
7247	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7248	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7249		     sizeof(struct hci_ev_le_advertising_report),
7250		     HCI_MAX_EVENT_SIZE),
7251	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7252	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7253		  hci_le_conn_update_complete_evt,
7254		  sizeof(struct hci_ev_le_conn_update_complete)),
7255	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7256	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7257		  hci_le_remote_feat_complete_evt,
7258		  sizeof(struct hci_ev_le_remote_feat_complete)),
7259	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7260	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7261		  sizeof(struct hci_ev_le_ltk_req)),
7262	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7263	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7264		  hci_le_remote_conn_param_req_evt,
7265		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7266	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7267	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7268		  hci_le_enh_conn_complete_evt,
7269		  sizeof(struct hci_ev_le_enh_conn_complete)),
7270	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7271	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7272		     sizeof(struct hci_ev_le_direct_adv_report),
7273		     HCI_MAX_EVENT_SIZE),
7274	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7275	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7276		  sizeof(struct hci_ev_le_phy_update_complete)),
7277	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7278	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7279		     sizeof(struct hci_ev_le_ext_adv_report),
7280		     HCI_MAX_EVENT_SIZE),
7281	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7282	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7283		  hci_le_pa_sync_estabilished_evt,
7284		  sizeof(struct hci_ev_le_pa_sync_established)),
7285	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7286	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7287				 hci_le_per_adv_report_evt,
7288				 sizeof(struct hci_ev_le_per_adv_report),
7289				 HCI_MAX_EVENT_SIZE),
7290	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7291	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7292		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7293	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7294	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7295		  sizeof(struct hci_evt_le_cis_established)),
7296	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7297	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7298		  sizeof(struct hci_evt_le_cis_req)),
7299	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7300	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7301		     hci_le_create_big_complete_evt,
7302		     sizeof(struct hci_evt_le_create_big_complete),
7303		     HCI_MAX_EVENT_SIZE),
7304	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7305	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7306		     hci_le_big_sync_established_evt,
7307		     sizeof(struct hci_evt_le_big_sync_estabilished),
7308		     HCI_MAX_EVENT_SIZE),
7309	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7310	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7311		     hci_le_big_info_adv_report_evt,
7312		     sizeof(struct hci_evt_le_big_info_adv_report),
7313		     HCI_MAX_EVENT_SIZE),
7314};
7315
7316static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7317			    struct sk_buff *skb, u16 *opcode, u8 *status,
7318			    hci_req_complete_t *req_complete,
7319			    hci_req_complete_skb_t *req_complete_skb)
7320{
7321	struct hci_ev_le_meta *ev = data;
7322	const struct hci_le_ev *subev;
7323
7324	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7325
7326	/* Only match event if command OGF is for LE */
7327	if (hdev->sent_cmd &&
7328	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7329	    hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7330		*opcode = hci_skb_opcode(hdev->sent_cmd);
7331		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7332				     req_complete_skb);
7333	}
7334
7335	subev = &hci_le_ev_table[ev->subevent];
7336	if (!subev->func)
7337		return;
7338
7339	if (skb->len < subev->min_len) {
7340		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7341			   ev->subevent, skb->len, subev->min_len);
7342		return;
7343	}
7344
7345	/* Just warn if the length is over max_len size it still be
7346	 * possible to partially parse the event so leave to callback to
7347	 * decide if that is acceptable.
7348	 */
7349	if (skb->len > subev->max_len)
7350		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7351			    ev->subevent, skb->len, subev->max_len);
7352	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7353	if (!data)
7354		return;
7355
7356	subev->func(hdev, data, skb);
7357}
 
7358
7359static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7360				 u8 event, struct sk_buff *skb)
7361{
7362	struct hci_ev_cmd_complete *ev;
7363	struct hci_event_hdr *hdr;
7364
7365	if (!skb)
7366		return false;
 
7367
7368	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7369	if (!hdr)
7370		return false;
7371
7372	if (event) {
7373		if (hdr->evt != event)
7374			return false;
7375		return true;
7376	}
7377
7378	/* Check if request ended in Command Status - no way to retrieve
7379	 * any extra parameters in this case.
7380	 */
7381	if (hdr->evt == HCI_EV_CMD_STATUS)
7382		return false;
7383
7384	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7385		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7386			   hdr->evt);
7387		return false;
7388	}
7389
7390	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7391	if (!ev)
7392		return false;
7393
7394	if (opcode != __le16_to_cpu(ev->opcode)) {
7395		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7396		       __le16_to_cpu(ev->opcode));
7397		return false;
7398	}
7399
7400	return true;
7401}
 
7402
7403static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7404				  struct sk_buff *skb)
7405{
7406	struct hci_ev_le_advertising_info *adv;
7407	struct hci_ev_le_direct_adv_info *direct_adv;
7408	struct hci_ev_le_ext_adv_info *ext_adv;
7409	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7410	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7411
7412	hci_dev_lock(hdev);
 
 
7413
7414	/* If we are currently suspended and this is the first BT event seen,
7415	 * save the wake reason associated with the event.
7416	 */
7417	if (!hdev->suspended || hdev->wake_reason)
7418		goto unlock;
7419
7420	/* Default to remote wake. Values for wake_reason are documented in the
7421	 * Bluez mgmt api docs.
7422	 */
7423	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7424
7425	/* Once configured for remote wakeup, we should only wake up for
7426	 * reconnections. It's useful to see which device is waking us up so
7427	 * keep track of the bdaddr of the connection event that woke us up.
7428	 */
7429	if (event == HCI_EV_CONN_REQUEST) {
7430		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7431		hdev->wake_addr_type = BDADDR_BREDR;
7432	} else if (event == HCI_EV_CONN_COMPLETE) {
7433		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7434		hdev->wake_addr_type = BDADDR_BREDR;
7435	} else if (event == HCI_EV_LE_META) {
7436		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7437		u8 subevent = le_ev->subevent;
7438		u8 *ptr = &skb->data[sizeof(*le_ev)];
7439		u8 num_reports = *ptr;
7440
7441		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7442		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7443		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7444		    num_reports) {
7445			adv = (void *)(ptr + 1);
7446			direct_adv = (void *)(ptr + 1);
7447			ext_adv = (void *)(ptr + 1);
7448
7449			switch (subevent) {
7450			case HCI_EV_LE_ADVERTISING_REPORT:
7451				bacpy(&hdev->wake_addr, &adv->bdaddr);
7452				hdev->wake_addr_type = adv->bdaddr_type;
7453				break;
7454			case HCI_EV_LE_DIRECT_ADV_REPORT:
7455				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7456				hdev->wake_addr_type = direct_adv->bdaddr_type;
7457				break;
7458			case HCI_EV_LE_EXT_ADV_REPORT:
7459				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7460				hdev->wake_addr_type = ext_adv->bdaddr_type;
7461				break;
7462			}
7463		}
7464	} else {
7465		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7466	}
7467
7468unlock:
7469	hci_dev_unlock(hdev);
7470}
7471
7472#define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7473[_op] = { \
7474	.req = false, \
7475	.func = _func, \
7476	.min_len = _min_len, \
7477	.max_len = _max_len, \
7478}
7479
7480#define HCI_EV(_op, _func, _len) \
7481	HCI_EV_VL(_op, _func, _len, _len)
7482
7483#define HCI_EV_STATUS(_op, _func) \
7484	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7485
7486#define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7487[_op] = { \
7488	.req = true, \
7489	.func_req = _func, \
7490	.min_len = _min_len, \
7491	.max_len = _max_len, \
7492}
7493
7494#define HCI_EV_REQ(_op, _func, _len) \
7495	HCI_EV_REQ_VL(_op, _func, _len, _len)
7496
7497/* Entries in this table shall have their position according to the event opcode
7498 * they handle so the use of the macros above is recommend since it does attempt
7499 * to initialize at its proper index using Designated Initializers that way
7500 * events without a callback function don't have entered.
7501 */
7502static const struct hci_ev {
7503	bool req;
7504	union {
7505		void (*func)(struct hci_dev *hdev, void *data,
7506			     struct sk_buff *skb);
7507		void (*func_req)(struct hci_dev *hdev, void *data,
7508				 struct sk_buff *skb, u16 *opcode, u8 *status,
7509				 hci_req_complete_t *req_complete,
7510				 hci_req_complete_skb_t *req_complete_skb);
7511	};
7512	u16  min_len;
7513	u16  max_len;
7514} hci_ev_table[U8_MAX + 1] = {
7515	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7516	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7517	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7518	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7519		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7520	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7521	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7522	       sizeof(struct hci_ev_conn_complete)),
7523	/* [0x04 = HCI_EV_CONN_REQUEST] */
7524	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7525	       sizeof(struct hci_ev_conn_request)),
7526	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7527	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7528	       sizeof(struct hci_ev_disconn_complete)),
7529	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7530	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7531	       sizeof(struct hci_ev_auth_complete)),
7532	/* [0x07 = HCI_EV_REMOTE_NAME] */
7533	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7534	       sizeof(struct hci_ev_remote_name)),
7535	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7536	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7537	       sizeof(struct hci_ev_encrypt_change)),
7538	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7539	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7540	       hci_change_link_key_complete_evt,
7541	       sizeof(struct hci_ev_change_link_key_complete)),
7542	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7543	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7544	       sizeof(struct hci_ev_remote_features)),
7545	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7546	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7547		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7548	/* [0x0f = HCI_EV_CMD_STATUS] */
7549	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7550		   sizeof(struct hci_ev_cmd_status)),
7551	/* [0x10 = HCI_EV_CMD_STATUS] */
7552	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7553	       sizeof(struct hci_ev_hardware_error)),
7554	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7555	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7556	       sizeof(struct hci_ev_role_change)),
7557	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7558	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7559		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7560	/* [0x14 = HCI_EV_MODE_CHANGE] */
7561	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7562	       sizeof(struct hci_ev_mode_change)),
7563	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7564	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7565	       sizeof(struct hci_ev_pin_code_req)),
7566	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7567	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7568	       sizeof(struct hci_ev_link_key_req)),
7569	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7570	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7571	       sizeof(struct hci_ev_link_key_notify)),
7572	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7573	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7574	       sizeof(struct hci_ev_clock_offset)),
7575	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7576	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7577	       sizeof(struct hci_ev_pkt_type_change)),
7578	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7579	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7580	       sizeof(struct hci_ev_pscan_rep_mode)),
7581	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7582	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7583		  hci_inquiry_result_with_rssi_evt,
7584		  sizeof(struct hci_ev_inquiry_result_rssi),
7585		  HCI_MAX_EVENT_SIZE),
7586	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7587	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7588	       sizeof(struct hci_ev_remote_ext_features)),
7589	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7590	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7591	       sizeof(struct hci_ev_sync_conn_complete)),
7592	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7593	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7594		  hci_extended_inquiry_result_evt,
7595		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7596	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7597	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7598	       sizeof(struct hci_ev_key_refresh_complete)),
7599	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7600	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7601	       sizeof(struct hci_ev_io_capa_request)),
7602	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7603	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7604	       sizeof(struct hci_ev_io_capa_reply)),
7605	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7606	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7607	       sizeof(struct hci_ev_user_confirm_req)),
7608	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7609	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7610	       sizeof(struct hci_ev_user_passkey_req)),
7611	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7612	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7613	       sizeof(struct hci_ev_remote_oob_data_request)),
7614	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7615	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7616	       sizeof(struct hci_ev_simple_pair_complete)),
7617	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7618	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7619	       sizeof(struct hci_ev_user_passkey_notify)),
7620	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7621	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7622	       sizeof(struct hci_ev_keypress_notify)),
7623	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7624	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7625	       sizeof(struct hci_ev_remote_host_features)),
7626	/* [0x3e = HCI_EV_LE_META] */
7627	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7628		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7629#if IS_ENABLED(CONFIG_BT_HS)
7630	/* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7631	HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7632	       sizeof(struct hci_ev_phy_link_complete)),
7633	/* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7634	HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7635	       sizeof(struct hci_ev_channel_selected)),
7636	/* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7637	HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7638	       hci_disconn_loglink_complete_evt,
7639	       sizeof(struct hci_ev_disconn_logical_link_complete)),
7640	/* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7641	HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7642	       sizeof(struct hci_ev_logical_link_complete)),
7643	/* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7644	HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7645	       hci_disconn_phylink_complete_evt,
7646	       sizeof(struct hci_ev_disconn_phy_link_complete)),
7647#endif
7648	/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7649	HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7650	       sizeof(struct hci_ev_num_comp_blocks)),
7651	/* [0xff = HCI_EV_VENDOR] */
7652	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7653};
7654
7655static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7656			   u16 *opcode, u8 *status,
7657			   hci_req_complete_t *req_complete,
7658			   hci_req_complete_skb_t *req_complete_skb)
7659{
7660	const struct hci_ev *ev = &hci_ev_table[event];
7661	void *data;
7662
7663	if (!ev->func)
7664		return;
 
7665
7666	if (skb->len < ev->min_len) {
7667		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7668			   event, skb->len, ev->min_len);
7669		return;
7670	}
7671
7672	/* Just warn if the length is over max_len size it still be
7673	 * possible to partially parse the event so leave to callback to
7674	 * decide if that is acceptable.
7675	 */
7676	if (skb->len > ev->max_len)
7677		bt_dev_warn_ratelimited(hdev,
7678					"unexpected event 0x%2.2x length: %u > %u",
7679					event, skb->len, ev->max_len);
7680
7681	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7682	if (!data)
7683		return;
 
7684
7685	if (ev->req)
7686		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7687			     req_complete_skb);
7688	else
7689		ev->func(hdev, data, skb);
7690}
7691
7692void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7693{
7694	struct hci_event_hdr *hdr = (void *) skb->data;
7695	hci_req_complete_t req_complete = NULL;
7696	hci_req_complete_skb_t req_complete_skb = NULL;
7697	struct sk_buff *orig_skb = NULL;
7698	u8 status = 0, event, req_evt = 0;
7699	u16 opcode = HCI_OP_NOP;
7700
7701	if (skb->len < sizeof(*hdr)) {
7702		bt_dev_err(hdev, "Malformed HCI Event");
7703		goto done;
7704	}
7705
7706	kfree_skb(hdev->recv_event);
7707	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
 
 
7708
7709	event = hdr->evt;
7710	if (!event) {
7711		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7712			    event);
7713		goto done;
7714	}
7715
7716	/* Only match event if command OGF is not for LE */
7717	if (hdev->sent_cmd &&
7718	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7719	    hci_skb_event(hdev->sent_cmd) == event) {
7720		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7721				     status, &req_complete, &req_complete_skb);
7722		req_evt = event;
7723	}
7724
7725	/* If it looks like we might end up having to call
7726	 * req_complete_skb, store a pristine copy of the skb since the
7727	 * various handlers may modify the original one through
7728	 * skb_pull() calls, etc.
7729	 */
7730	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7731	    event == HCI_EV_CMD_COMPLETE)
7732		orig_skb = skb_clone(skb, GFP_KERNEL);
7733
7734	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7735
7736	/* Store wake reason if we're suspended */
7737	hci_store_wake_reason(hdev, event, skb);
7738
7739	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7740
7741	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7742		       &req_complete_skb);
7743
7744	if (req_complete) {
7745		req_complete(hdev, status, opcode);
7746	} else if (req_complete_skb) {
7747		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7748			kfree_skb(orig_skb);
7749			orig_skb = NULL;
7750		}
7751		req_complete_skb(hdev, status, opcode, orig_skb);
7752	}
7753
7754done:
7755	kfree_skb(orig_skb);
7756	kfree_skb(skb);
7757	hdev->stat.evt_rx++;
7758}
v4.17
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
 
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI event handling. */
  26
  27#include <asm/unaligned.h>
 
 
  28
  29#include <net/bluetooth/bluetooth.h>
  30#include <net/bluetooth/hci_core.h>
  31#include <net/bluetooth/mgmt.h>
  32
  33#include "hci_request.h"
  34#include "hci_debugfs.h"
 
  35#include "a2mp.h"
  36#include "amp.h"
  37#include "smp.h"
 
 
  38
  39#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
  40		 "\x00\x00\x00\x00\x00\x00\x00\x00"
  41
 
 
  42/* Handle HCI Event packets */
  43
  44static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45{
  46	__u8 status = *((__u8 *) skb->data);
  47
  48	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
 
 
 
 
 
 
 
 
 
 
 
  49
  50	if (status)
  51		return;
  52
  53	clear_bit(HCI_INQUIRY, &hdev->flags);
  54	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
  55	wake_up_bit(&hdev->flags, HCI_INQUIRY);
  56
  57	hci_dev_lock(hdev);
  58	/* Set discovery state to stopped if we're not doing LE active
  59	 * scanning.
  60	 */
  61	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
  62	    hdev->le_scan_type != LE_SCAN_ACTIVE)
  63		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
  64	hci_dev_unlock(hdev);
  65
  66	hci_conn_check_pending(hdev);
 
 
  67}
  68
  69static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
 
  70{
  71	__u8 status = *((__u8 *) skb->data);
  72
  73	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  74
  75	if (status)
  76		return;
  77
  78	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
 
 
  79}
  80
  81static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
 
  82{
  83	__u8 status = *((__u8 *) skb->data);
  84
  85	BT_DBG("%s status 0x%2.2x", hdev->name, status);
  86
  87	if (status)
  88		return;
  89
  90	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
  91
  92	hci_conn_check_pending(hdev);
 
 
  93}
  94
  95static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
  96					  struct sk_buff *skb)
  97{
  98	BT_DBG("%s", hdev->name);
 
 
 
 
  99}
 100
 101static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
 
 102{
 103	struct hci_rp_role_discovery *rp = (void *) skb->data;
 104	struct hci_conn *conn;
 105
 106	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 107
 108	if (rp->status)
 109		return;
 110
 111	hci_dev_lock(hdev);
 112
 113	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 114	if (conn)
 115		conn->role = rp->role;
 116
 117	hci_dev_unlock(hdev);
 
 
 118}
 119
 120static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
 
 121{
 122	struct hci_rp_read_link_policy *rp = (void *) skb->data;
 123	struct hci_conn *conn;
 124
 125	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 126
 127	if (rp->status)
 128		return;
 129
 130	hci_dev_lock(hdev);
 131
 132	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 133	if (conn)
 134		conn->link_policy = __le16_to_cpu(rp->policy);
 135
 136	hci_dev_unlock(hdev);
 
 
 137}
 138
 139static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
 
 140{
 141	struct hci_rp_write_link_policy *rp = (void *) skb->data;
 142	struct hci_conn *conn;
 143	void *sent;
 144
 145	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 146
 147	if (rp->status)
 148		return;
 149
 150	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
 151	if (!sent)
 152		return;
 153
 154	hci_dev_lock(hdev);
 155
 156	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 157	if (conn)
 158		conn->link_policy = get_unaligned_le16(sent + 2);
 159
 160	hci_dev_unlock(hdev);
 
 
 161}
 162
 163static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
 164					struct sk_buff *skb)
 165{
 166	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
 167
 168	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 169
 170	if (rp->status)
 171		return;
 172
 173	hdev->link_policy = __le16_to_cpu(rp->policy);
 
 
 174}
 175
 176static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
 177					 struct sk_buff *skb)
 178{
 179	__u8 status = *((__u8 *) skb->data);
 180	void *sent;
 181
 182	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 183
 184	if (status)
 185		return;
 186
 187	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
 188	if (!sent)
 189		return;
 190
 191	hdev->link_policy = get_unaligned_le16(sent);
 
 
 192}
 193
 194static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
 195{
 196	__u8 status = *((__u8 *) skb->data);
 197
 198	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 199
 200	clear_bit(HCI_RESET, &hdev->flags);
 201
 202	if (status)
 203		return;
 204
 205	/* Reset all non-persistent flags */
 206	hci_dev_clear_volatile_flags(hdev);
 207
 208	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 209
 210	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
 211	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 212
 213	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
 214	hdev->adv_data_len = 0;
 215
 216	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
 217	hdev->scan_rsp_data_len = 0;
 218
 219	hdev->le_scan_type = LE_SCAN_PASSIVE;
 220
 221	hdev->ssp_debug_mode = 0;
 222
 223	hci_bdaddr_list_clear(&hdev->le_white_list);
 
 
 
 224}
 225
 226static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
 227					struct sk_buff *skb)
 228{
 229	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
 230	struct hci_cp_read_stored_link_key *sent;
 231
 232	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 233
 234	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
 235	if (!sent)
 236		return;
 237
 238	if (!rp->status && sent->read_all == 0x01) {
 239		hdev->stored_max_keys = rp->max_keys;
 240		hdev->stored_num_keys = rp->num_keys;
 241	}
 
 
 242}
 243
 244static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
 245					  struct sk_buff *skb)
 246{
 247	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
 
 248
 249	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 250
 251	if (rp->status)
 252		return;
 
 
 253
 254	if (rp->num_keys <= hdev->stored_num_keys)
 255		hdev->stored_num_keys -= rp->num_keys;
 256	else
 257		hdev->stored_num_keys = 0;
 
 
 258}
 259
 260static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 
 261{
 262	__u8 status = *((__u8 *) skb->data);
 263	void *sent;
 264
 265	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 266
 267	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
 268	if (!sent)
 269		return;
 270
 271	hci_dev_lock(hdev);
 272
 273	if (hci_dev_test_flag(hdev, HCI_MGMT))
 274		mgmt_set_local_name_complete(hdev, sent, status);
 275	else if (!status)
 276		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
 277
 278	hci_dev_unlock(hdev);
 
 
 279}
 280
 281static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 
 282{
 283	struct hci_rp_read_local_name *rp = (void *) skb->data;
 284
 285	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 286
 287	if (rp->status)
 288		return;
 289
 290	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 291	    hci_dev_test_flag(hdev, HCI_CONFIG))
 292		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
 
 
 293}
 294
 295static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
 296{
 297	__u8 status = *((__u8 *) skb->data);
 298	void *sent;
 299
 300	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 301
 302	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
 303	if (!sent)
 304		return;
 305
 306	hci_dev_lock(hdev);
 307
 308	if (!status) {
 309		__u8 param = *((__u8 *) sent);
 310
 311		if (param == AUTH_ENABLED)
 312			set_bit(HCI_AUTH, &hdev->flags);
 313		else
 314			clear_bit(HCI_AUTH, &hdev->flags);
 315	}
 316
 317	if (hci_dev_test_flag(hdev, HCI_MGMT))
 318		mgmt_auth_enable_complete(hdev, status);
 319
 320	hci_dev_unlock(hdev);
 
 
 321}
 322
 323static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
 
 324{
 325	__u8 status = *((__u8 *) skb->data);
 326	__u8 param;
 327	void *sent;
 328
 329	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 330
 331	if (status)
 332		return;
 333
 334	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
 335	if (!sent)
 336		return;
 337
 338	param = *((__u8 *) sent);
 339
 340	if (param)
 341		set_bit(HCI_ENCRYPT, &hdev->flags);
 342	else
 343		clear_bit(HCI_ENCRYPT, &hdev->flags);
 
 
 344}
 345
 346static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
 347{
 348	__u8 status = *((__u8 *) skb->data);
 349	__u8 param;
 350	void *sent;
 351
 352	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 353
 354	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
 355	if (!sent)
 356		return;
 357
 358	param = *((__u8 *) sent);
 359
 360	hci_dev_lock(hdev);
 361
 362	if (status) {
 363		hdev->discov_timeout = 0;
 364		goto done;
 365	}
 366
 367	if (param & SCAN_INQUIRY)
 368		set_bit(HCI_ISCAN, &hdev->flags);
 369	else
 370		clear_bit(HCI_ISCAN, &hdev->flags);
 371
 372	if (param & SCAN_PAGE)
 373		set_bit(HCI_PSCAN, &hdev->flags);
 374	else
 375		clear_bit(HCI_PSCAN, &hdev->flags);
 376
 377done:
 378	hci_dev_unlock(hdev);
 
 
 379}
 380
 381static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
 
 382{
 383	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
 
 
 384
 385	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 386
 387	if (rp->status)
 388		return;
 389
 390	memcpy(hdev->dev_class, rp->dev_class, 3);
 391
 392	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
 393	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
 
 
 394}
 395
 396static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
 
 397{
 398	__u8 status = *((__u8 *) skb->data);
 399	void *sent;
 400
 401	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 402
 403	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
 404	if (!sent)
 405		return;
 406
 407	hci_dev_lock(hdev);
 408
 409	if (status == 0)
 410		memcpy(hdev->dev_class, sent, 3);
 411
 412	if (hci_dev_test_flag(hdev, HCI_MGMT))
 413		mgmt_set_class_of_dev_complete(hdev, sent, status);
 414
 415	hci_dev_unlock(hdev);
 
 
 416}
 417
 418static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
 
 419{
 420	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
 421	__u16 setting;
 422
 423	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 424
 425	if (rp->status)
 426		return;
 427
 428	setting = __le16_to_cpu(rp->voice_setting);
 429
 430	if (hdev->voice_setting == setting)
 431		return;
 432
 433	hdev->voice_setting = setting;
 434
 435	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
 436
 437	if (hdev->notify)
 438		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 
 
 439}
 440
 441static void hci_cc_write_voice_setting(struct hci_dev *hdev,
 442				       struct sk_buff *skb)
 443{
 444	__u8 status = *((__u8 *) skb->data);
 445	__u16 setting;
 446	void *sent;
 447
 448	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 449
 450	if (status)
 451		return;
 452
 453	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
 454	if (!sent)
 455		return;
 456
 457	setting = get_unaligned_le16(sent);
 458
 459	if (hdev->voice_setting == setting)
 460		return;
 461
 462	hdev->voice_setting = setting;
 463
 464	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
 465
 466	if (hdev->notify)
 467		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 
 
 468}
 469
 470static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
 471					  struct sk_buff *skb)
 472{
 473	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
 474
 475	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 476
 477	if (rp->status)
 478		return;
 479
 480	hdev->num_iac = rp->num_iac;
 481
 482	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
 
 
 483}
 484
 485static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
 
 486{
 487	__u8 status = *((__u8 *) skb->data);
 488	struct hci_cp_write_ssp_mode *sent;
 489
 490	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 491
 492	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
 493	if (!sent)
 494		return;
 495
 496	hci_dev_lock(hdev);
 497
 498	if (!status) {
 499		if (sent->mode)
 500			hdev->features[1][0] |= LMP_HOST_SSP;
 501		else
 502			hdev->features[1][0] &= ~LMP_HOST_SSP;
 503	}
 504
 505	if (hci_dev_test_flag(hdev, HCI_MGMT))
 506		mgmt_ssp_enable_complete(hdev, sent->mode, status);
 507	else if (!status) {
 508		if (sent->mode)
 509			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
 510		else
 511			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
 512	}
 513
 514	hci_dev_unlock(hdev);
 
 
 515}
 516
 517static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
 
 518{
 519	u8 status = *((u8 *) skb->data);
 520	struct hci_cp_write_sc_support *sent;
 521
 522	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 523
 524	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
 525	if (!sent)
 526		return;
 527
 528	hci_dev_lock(hdev);
 529
 530	if (!status) {
 531		if (sent->support)
 532			hdev->features[1][0] |= LMP_HOST_SC;
 533		else
 534			hdev->features[1][0] &= ~LMP_HOST_SC;
 535	}
 536
 537	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
 538		if (sent->support)
 539			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
 540		else
 541			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
 542	}
 543
 544	hci_dev_unlock(hdev);
 
 
 545}
 546
 547static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
 
 548{
 549	struct hci_rp_read_local_version *rp = (void *) skb->data;
 550
 551	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 552
 553	if (rp->status)
 554		return;
 555
 556	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 557	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
 558		hdev->hci_ver = rp->hci_ver;
 559		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
 560		hdev->lmp_ver = rp->lmp_ver;
 561		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
 562		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
 563	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 564}
 565
 566static void hci_cc_read_local_commands(struct hci_dev *hdev,
 567				       struct sk_buff *skb)
 568{
 569	struct hci_rp_read_local_commands *rp = (void *) skb->data;
 570
 571	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 572
 573	if (rp->status)
 574		return;
 575
 576	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
 577	    hci_dev_test_flag(hdev, HCI_CONFIG))
 578		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 579}
 580
 581static void hci_cc_read_local_features(struct hci_dev *hdev,
 582				       struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 583{
 584	struct hci_rp_read_local_features *rp = (void *) skb->data;
 585
 586	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 587
 588	if (rp->status)
 589		return;
 590
 591	memcpy(hdev->features, rp->features, 8);
 592
 593	/* Adjust default settings according to features
 594	 * supported by device. */
 595
 596	if (hdev->features[0][0] & LMP_3SLOT)
 597		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
 598
 599	if (hdev->features[0][0] & LMP_5SLOT)
 600		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
 601
 602	if (hdev->features[0][1] & LMP_HV2) {
 603		hdev->pkt_type  |= (HCI_HV2);
 604		hdev->esco_type |= (ESCO_HV2);
 605	}
 606
 607	if (hdev->features[0][1] & LMP_HV3) {
 608		hdev->pkt_type  |= (HCI_HV3);
 609		hdev->esco_type |= (ESCO_HV3);
 610	}
 611
 612	if (lmp_esco_capable(hdev))
 613		hdev->esco_type |= (ESCO_EV3);
 614
 615	if (hdev->features[0][4] & LMP_EV4)
 616		hdev->esco_type |= (ESCO_EV4);
 617
 618	if (hdev->features[0][4] & LMP_EV5)
 619		hdev->esco_type |= (ESCO_EV5);
 620
 621	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
 622		hdev->esco_type |= (ESCO_2EV3);
 623
 624	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
 625		hdev->esco_type |= (ESCO_3EV3);
 626
 627	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
 628		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
 
 
 629}
 630
 631static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
 632					   struct sk_buff *skb)
 633{
 634	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
 635
 636	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 637
 638	if (rp->status)
 639		return;
 640
 641	if (hdev->max_page < rp->max_page)
 642		hdev->max_page = rp->max_page;
 
 
 
 
 
 643
 644	if (rp->page < HCI_MAX_PAGES)
 645		memcpy(hdev->features[rp->page], rp->features, 8);
 
 
 646}
 647
 648static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
 649					  struct sk_buff *skb)
 650{
 651	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
 652
 653	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 654
 655	if (rp->status)
 656		return;
 657
 658	hdev->flow_ctl_mode = rp->mode;
 
 
 659}
 660
 661static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
 
 662{
 663	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
 664
 665	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 666
 667	if (rp->status)
 668		return;
 669
 670	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
 671	hdev->sco_mtu  = rp->sco_mtu;
 672	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
 673	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
 674
 675	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
 676		hdev->sco_mtu  = 64;
 677		hdev->sco_pkts = 8;
 678	}
 679
 680	hdev->acl_cnt = hdev->acl_pkts;
 681	hdev->sco_cnt = hdev->sco_pkts;
 682
 683	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
 684	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
 
 
 685}
 686
 687static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
 
 688{
 689	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
 690
 691	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 692
 693	if (rp->status)
 694		return;
 695
 696	if (test_bit(HCI_INIT, &hdev->flags))
 697		bacpy(&hdev->bdaddr, &rp->bdaddr);
 698
 699	if (hci_dev_test_flag(hdev, HCI_SETUP))
 700		bacpy(&hdev->setup_addr, &rp->bdaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 701}
 702
 703static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
 704					   struct sk_buff *skb)
 705{
 706	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
 707
 708	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 709
 710	if (rp->status)
 711		return;
 712
 713	if (test_bit(HCI_INIT, &hdev->flags)) {
 714		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
 715		hdev->page_scan_window = __le16_to_cpu(rp->window);
 716	}
 
 
 717}
 718
 719static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
 720					    struct sk_buff *skb)
 721{
 722	u8 status = *((u8 *) skb->data);
 723	struct hci_cp_write_page_scan_activity *sent;
 724
 725	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 726
 727	if (status)
 728		return;
 729
 730	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
 731	if (!sent)
 732		return;
 733
 734	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
 735	hdev->page_scan_window = __le16_to_cpu(sent->window);
 
 
 736}
 737
 738static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
 739					   struct sk_buff *skb)
 740{
 741	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
 742
 743	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 744
 745	if (rp->status)
 746		return;
 747
 748	if (test_bit(HCI_INIT, &hdev->flags))
 749		hdev->page_scan_type = rp->type;
 
 
 750}
 751
 752static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
 753					struct sk_buff *skb)
 754{
 755	u8 status = *((u8 *) skb->data);
 756	u8 *type;
 757
 758	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 759
 760	if (status)
 761		return;
 762
 763	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
 764	if (type)
 765		hdev->page_scan_type = *type;
 
 
 766}
 767
 768static void hci_cc_read_data_block_size(struct hci_dev *hdev,
 769					struct sk_buff *skb)
 770{
 771	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
 772
 773	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 774
 775	if (rp->status)
 776		return;
 777
 778	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
 779	hdev->block_len = __le16_to_cpu(rp->block_len);
 780	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
 781
 782	hdev->block_cnt = hdev->num_blocks;
 783
 784	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
 785	       hdev->block_cnt, hdev->block_len);
 
 
 786}
 787
 788static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
 
 789{
 790	struct hci_rp_read_clock *rp = (void *) skb->data;
 791	struct hci_cp_read_clock *cp;
 792	struct hci_conn *conn;
 793
 794	BT_DBG("%s", hdev->name);
 795
 796	if (skb->len < sizeof(*rp))
 797		return;
 798
 799	if (rp->status)
 800		return;
 801
 802	hci_dev_lock(hdev);
 803
 804	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
 805	if (!cp)
 806		goto unlock;
 807
 808	if (cp->which == 0x00) {
 809		hdev->clock = le32_to_cpu(rp->clock);
 810		goto unlock;
 811	}
 812
 813	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
 814	if (conn) {
 815		conn->clock = le32_to_cpu(rp->clock);
 816		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
 817	}
 818
 819unlock:
 820	hci_dev_unlock(hdev);
 
 821}
 822
 823static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
 824				       struct sk_buff *skb)
 825{
 826	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
 827
 828	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 829
 830	if (rp->status)
 831		return;
 832
 833	hdev->amp_status = rp->amp_status;
 834	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
 835	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
 836	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
 837	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
 838	hdev->amp_type = rp->amp_type;
 839	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
 840	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
 841	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
 842	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
 
 
 843}
 844
 845static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
 846					 struct sk_buff *skb)
 847{
 848	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
 849
 850	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 851
 852	if (rp->status)
 853		return;
 854
 855	hdev->inq_tx_power = rp->tx_power;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 856}
 857
 858static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
 859{
 860	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
 861	struct hci_cp_pin_code_reply *cp;
 862	struct hci_conn *conn;
 863
 864	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 865
 866	hci_dev_lock(hdev);
 867
 868	if (hci_dev_test_flag(hdev, HCI_MGMT))
 869		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
 870
 871	if (rp->status)
 872		goto unlock;
 873
 874	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
 875	if (!cp)
 876		goto unlock;
 877
 878	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
 879	if (conn)
 880		conn->pin_length = cp->pin_len;
 881
 882unlock:
 883	hci_dev_unlock(hdev);
 
 884}
 885
 886static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
 887{
 888	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
 889
 890	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 891
 892	hci_dev_lock(hdev);
 893
 894	if (hci_dev_test_flag(hdev, HCI_MGMT))
 895		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
 896						 rp->status);
 897
 898	hci_dev_unlock(hdev);
 
 
 899}
 900
 901static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
 902				       struct sk_buff *skb)
 903{
 904	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
 905
 906	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 907
 908	if (rp->status)
 909		return;
 910
 911	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
 912	hdev->le_pkts = rp->le_max_pkt;
 913
 914	hdev->le_cnt = hdev->le_pkts;
 915
 916	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
 
 
 917}
 918
 919static void hci_cc_le_read_local_features(struct hci_dev *hdev,
 920					  struct sk_buff *skb)
 921{
 922	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
 923
 924	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 925
 926	if (rp->status)
 927		return;
 928
 929	memcpy(hdev->le_features, rp->features, 8);
 
 
 930}
 931
 932static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
 933					struct sk_buff *skb)
 934{
 935	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
 936
 937	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 938
 939	if (rp->status)
 940		return;
 941
 942	hdev->adv_tx_power = rp->tx_power;
 
 
 943}
 944
 945static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
 946{
 947	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 948
 949	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 950
 951	hci_dev_lock(hdev);
 952
 953	if (hci_dev_test_flag(hdev, HCI_MGMT))
 954		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
 955						 rp->status);
 956
 957	hci_dev_unlock(hdev);
 
 
 958}
 959
 960static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
 961					  struct sk_buff *skb)
 962{
 963	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 964
 965	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 966
 967	hci_dev_lock(hdev);
 968
 969	if (hci_dev_test_flag(hdev, HCI_MGMT))
 970		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
 971						     ACL_LINK, 0, rp->status);
 972
 973	hci_dev_unlock(hdev);
 
 
 974}
 975
 976static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
 977{
 978	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 979
 980	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 981
 982	hci_dev_lock(hdev);
 983
 984	if (hci_dev_test_flag(hdev, HCI_MGMT))
 985		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
 986						 0, rp->status);
 987
 988	hci_dev_unlock(hdev);
 
 
 989}
 990
 991static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
 992					  struct sk_buff *skb)
 993{
 994	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
 995
 996	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 997
 998	hci_dev_lock(hdev);
 999
1000	if (hci_dev_test_flag(hdev, HCI_MGMT))
1001		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1002						     ACL_LINK, 0, rp->status);
1003
1004	hci_dev_unlock(hdev);
 
 
1005}
1006
1007static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1008				       struct sk_buff *skb)
1009{
1010	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
 
 
1011
1012	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1013}
1014
1015static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1016					   struct sk_buff *skb)
1017{
1018	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
 
 
1019
1020	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1021}
1022
1023static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
 
1024{
1025	__u8 status = *((__u8 *) skb->data);
1026	bdaddr_t *sent;
1027
1028	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1029
1030	if (status)
1031		return;
1032
1033	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1034	if (!sent)
1035		return;
1036
1037	hci_dev_lock(hdev);
1038
1039	bacpy(&hdev->random_addr, sent);
1040
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1041	hci_dev_unlock(hdev);
 
 
1042}
1043
1044static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
1045{
1046	__u8 *sent, status = *((__u8 *) skb->data);
 
1047
1048	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1049
1050	if (status)
1051		return;
1052
1053	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1054	if (!sent)
1055		return;
1056
1057	hci_dev_lock(hdev);
1058
1059	/* If we're doing connection initiation as peripheral. Set a
1060	 * timeout in case something goes wrong.
1061	 */
1062	if (*sent) {
1063		struct hci_conn *conn;
1064
1065		hci_dev_set_flag(hdev, HCI_LE_ADV);
1066
1067		conn = hci_lookup_le_connect(hdev);
1068		if (conn)
1069			queue_delayed_work(hdev->workqueue,
1070					   &conn->le_conn_timeout,
1071					   conn->conn_timeout);
1072	} else {
1073		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1074	}
1075
1076	hci_dev_unlock(hdev);
 
 
1077}
1078
1079static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1080{
1081	struct hci_cp_le_set_scan_param *cp;
1082	__u8 status = *((__u8 *) skb->data);
1083
1084	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1085
1086	if (status)
1087		return;
1088
1089	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1090	if (!cp)
1091		return;
1092
1093	hci_dev_lock(hdev);
1094
1095	hdev->le_scan_type = cp->type;
1096
1097	hci_dev_unlock(hdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098}
1099
1100static bool has_pending_adv_report(struct hci_dev *hdev)
1101{
1102	struct discovery_state *d = &hdev->discovery;
1103
1104	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1105}
1106
1107static void clear_pending_adv_report(struct hci_dev *hdev)
1108{
1109	struct discovery_state *d = &hdev->discovery;
1110
1111	bacpy(&d->last_adv_addr, BDADDR_ANY);
1112	d->last_adv_data_len = 0;
1113}
1114
1115static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1116				     u8 bdaddr_type, s8 rssi, u32 flags,
1117				     u8 *data, u8 len)
1118{
1119	struct discovery_state *d = &hdev->discovery;
1120
 
 
 
1121	bacpy(&d->last_adv_addr, bdaddr);
1122	d->last_adv_addr_type = bdaddr_type;
1123	d->last_adv_rssi = rssi;
1124	d->last_adv_flags = flags;
1125	memcpy(d->last_adv_data, data, len);
1126	d->last_adv_data_len = len;
1127}
1128
1129static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1130				      struct sk_buff *skb)
1131{
1132	struct hci_cp_le_set_scan_enable *cp;
1133	__u8 status = *((__u8 *) skb->data);
1134
1135	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1136
1137	if (status)
1138		return;
1139
1140	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1141	if (!cp)
1142		return;
1143
1144	hci_dev_lock(hdev);
1145
1146	switch (cp->enable) {
1147	case LE_SCAN_ENABLE:
1148		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1149		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1150			clear_pending_adv_report(hdev);
 
 
1151		break;
1152
1153	case LE_SCAN_DISABLE:
1154		/* We do this here instead of when setting DISCOVERY_STOPPED
1155		 * since the latter would potentially require waiting for
1156		 * inquiry to stop too.
1157		 */
1158		if (has_pending_adv_report(hdev)) {
1159			struct discovery_state *d = &hdev->discovery;
1160
1161			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1162					  d->last_adv_addr_type, NULL,
1163					  d->last_adv_rssi, d->last_adv_flags,
1164					  d->last_adv_data,
1165					  d->last_adv_data_len, NULL, 0);
1166		}
1167
1168		/* Cancel this timer so that we don't try to disable scanning
1169		 * when it's already disabled.
1170		 */
1171		cancel_delayed_work(&hdev->le_scan_disable);
1172
1173		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1174
1175		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1176		 * interrupted scanning due to a connect request. Mark
1177		 * therefore discovery as stopped. If this was not
1178		 * because of a connect request advertising might have
1179		 * been disabled because of active scanning, so
1180		 * re-enable it again if necessary.
1181		 */
1182		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1183			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1184		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1185			 hdev->discovery.state == DISCOVERY_FINDING)
1186			hci_req_reenable_advertising(hdev);
1187
1188		break;
1189
1190	default:
1191		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1192			   cp->enable);
1193		break;
1194	}
1195
1196	hci_dev_unlock(hdev);
1197}
1198
1199static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1200					   struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1201{
1202	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1203
1204	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1205
1206	if (rp->status)
1207		return;
 
 
1208
1209	hdev->le_white_list_size = rp->size;
1210}
1211
1212static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1213				       struct sk_buff *skb)
1214{
1215	__u8 status = *((__u8 *) skb->data);
1216
1217	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
 
 
1218
1219	if (status)
1220		return;
 
1221
1222	hci_bdaddr_list_clear(&hdev->le_white_list);
1223}
1224
1225static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1226					struct sk_buff *skb)
1227{
1228	struct hci_cp_le_add_to_white_list *sent;
1229	__u8 status = *((__u8 *) skb->data);
1230
1231	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1232
1233	if (status)
1234		return;
1235
1236	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1237	if (!sent)
1238		return;
 
 
 
 
 
1239
1240	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1241			   sent->bdaddr_type);
1242}
1243
1244static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1245					  struct sk_buff *skb)
1246{
1247	struct hci_cp_le_del_from_white_list *sent;
1248	__u8 status = *((__u8 *) skb->data);
1249
1250	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1251
1252	if (status)
1253		return;
1254
1255	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1256	if (!sent)
1257		return;
1258
1259	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
 
1260			    sent->bdaddr_type);
 
 
 
1261}
1262
1263static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1264					    struct sk_buff *skb)
1265{
1266	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1267
1268	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1269
1270	if (rp->status)
1271		return;
1272
1273	memcpy(hdev->le_states, rp->le_states, 8);
 
 
1274}
1275
1276static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1277					struct sk_buff *skb)
1278{
1279	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1280
1281	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1282
1283	if (rp->status)
1284		return;
1285
1286	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1287	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
 
 
1288}
1289
1290static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1291					 struct sk_buff *skb)
1292{
1293	struct hci_cp_le_write_def_data_len *sent;
1294	__u8 status = *((__u8 *) skb->data);
1295
1296	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1297
1298	if (status)
1299		return;
1300
1301	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1302	if (!sent)
1303		return;
1304
1305	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1306	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1307}
1308
1309static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1310					struct sk_buff *skb)
1311{
1312	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1313
1314	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1315
1316	if (rp->status)
1317		return;
1318
1319	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1320	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1321	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1322	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
 
 
1323}
1324
1325static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1326					   struct sk_buff *skb)
1327{
1328	struct hci_cp_write_le_host_supported *sent;
1329	__u8 status = *((__u8 *) skb->data);
1330
1331	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1332
1333	if (status)
1334		return;
1335
1336	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1337	if (!sent)
1338		return;
1339
1340	hci_dev_lock(hdev);
1341
1342	if (sent->le) {
1343		hdev->features[1][0] |= LMP_HOST_LE;
1344		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1345	} else {
1346		hdev->features[1][0] &= ~LMP_HOST_LE;
1347		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1348		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1349	}
1350
1351	if (sent->simul)
1352		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1353	else
1354		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1355
1356	hci_dev_unlock(hdev);
 
 
1357}
1358
1359static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
 
1360{
1361	struct hci_cp_le_set_adv_param *cp;
1362	u8 status = *((u8 *) skb->data);
1363
1364	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1365
1366	if (status)
1367		return;
1368
1369	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1370	if (!cp)
1371		return;
1372
1373	hci_dev_lock(hdev);
1374	hdev->adv_addr_type = cp->own_address_type;
1375	hci_dev_unlock(hdev);
 
 
1376}
1377
1378static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
 
1379{
1380	struct hci_rp_read_rssi *rp = (void *) skb->data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1381	struct hci_conn *conn;
1382
1383	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1384
1385	if (rp->status)
1386		return;
1387
1388	hci_dev_lock(hdev);
1389
1390	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1391	if (conn)
1392		conn->rssi = rp->rssi;
1393
1394	hci_dev_unlock(hdev);
 
 
1395}
1396
1397static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
 
1398{
1399	struct hci_cp_read_tx_power *sent;
1400	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1401	struct hci_conn *conn;
1402
1403	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1404
1405	if (rp->status)
1406		return;
1407
1408	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1409	if (!sent)
1410		return;
1411
1412	hci_dev_lock(hdev);
1413
1414	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1415	if (!conn)
1416		goto unlock;
1417
1418	switch (sent->type) {
1419	case 0x00:
1420		conn->tx_power = rp->tx_power;
1421		break;
1422	case 0x01:
1423		conn->max_tx_power = rp->tx_power;
1424		break;
1425	}
1426
1427unlock:
1428	hci_dev_unlock(hdev);
 
1429}
1430
1431static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
 
1432{
1433	u8 status = *((u8 *) skb->data);
1434	u8 *mode;
1435
1436	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1437
1438	if (status)
1439		return;
1440
1441	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1442	if (mode)
1443		hdev->ssp_debug_mode = *mode;
 
 
1444}
1445
1446static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1447{
1448	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1449
1450	if (status) {
1451		hci_conn_check_pending(hdev);
1452		return;
1453	}
1454
1455	set_bit(HCI_INQUIRY, &hdev->flags);
 
1456}
1457
1458static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1459{
1460	struct hci_cp_create_conn *cp;
1461	struct hci_conn *conn;
1462
1463	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1464
1465	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1466	if (!cp)
1467		return;
1468
1469	hci_dev_lock(hdev);
1470
1471	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1472
1473	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1474
1475	if (status) {
1476		if (conn && conn->state == BT_CONNECT) {
1477			if (status != 0x0c || conn->attempt > 2) {
1478				conn->state = BT_CLOSED;
1479				hci_connect_cfm(conn, status);
1480				hci_conn_del(conn);
1481			} else
1482				conn->state = BT_CONNECT2;
1483		}
1484	} else {
1485		if (!conn) {
1486			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1487					    HCI_ROLE_MASTER);
1488			if (!conn)
1489				bt_dev_err(hdev, "no memory for new connection");
1490		}
1491	}
1492
1493	hci_dev_unlock(hdev);
1494}
1495
1496static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1497{
1498	struct hci_cp_add_sco *cp;
1499	struct hci_conn *acl, *sco;
 
1500	__u16 handle;
1501
1502	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1503
1504	if (!status)
1505		return;
1506
1507	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1508	if (!cp)
1509		return;
1510
1511	handle = __le16_to_cpu(cp->handle);
1512
1513	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1514
1515	hci_dev_lock(hdev);
1516
1517	acl = hci_conn_hash_lookup_handle(hdev, handle);
1518	if (acl) {
1519		sco = acl->link;
1520		if (sco) {
1521			sco->state = BT_CLOSED;
 
1522
1523			hci_connect_cfm(sco, status);
1524			hci_conn_del(sco);
1525		}
1526	}
1527
1528	hci_dev_unlock(hdev);
1529}
1530
1531static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1532{
1533	struct hci_cp_auth_requested *cp;
1534	struct hci_conn *conn;
1535
1536	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1537
1538	if (!status)
1539		return;
1540
1541	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1542	if (!cp)
1543		return;
1544
1545	hci_dev_lock(hdev);
1546
1547	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1548	if (conn) {
1549		if (conn->state == BT_CONFIG) {
1550			hci_connect_cfm(conn, status);
1551			hci_conn_drop(conn);
1552		}
1553	}
1554
1555	hci_dev_unlock(hdev);
1556}
1557
1558static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1559{
1560	struct hci_cp_set_conn_encrypt *cp;
1561	struct hci_conn *conn;
1562
1563	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1564
1565	if (!status)
1566		return;
1567
1568	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1569	if (!cp)
1570		return;
1571
1572	hci_dev_lock(hdev);
1573
1574	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1575	if (conn) {
1576		if (conn->state == BT_CONFIG) {
1577			hci_connect_cfm(conn, status);
1578			hci_conn_drop(conn);
1579		}
1580	}
1581
1582	hci_dev_unlock(hdev);
1583}
1584
1585static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1586				    struct hci_conn *conn)
1587{
1588	if (conn->state != BT_CONFIG || !conn->out)
1589		return 0;
1590
1591	if (conn->pending_sec_level == BT_SECURITY_SDP)
1592		return 0;
1593
1594	/* Only request authentication for SSP connections or non-SSP
1595	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1596	 * is requested.
1597	 */
1598	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1599	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1600	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1601	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1602		return 0;
1603
1604	return 1;
1605}
1606
1607static int hci_resolve_name(struct hci_dev *hdev,
1608				   struct inquiry_entry *e)
1609{
1610	struct hci_cp_remote_name_req cp;
1611
1612	memset(&cp, 0, sizeof(cp));
1613
1614	bacpy(&cp.bdaddr, &e->data.bdaddr);
1615	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1616	cp.pscan_mode = e->data.pscan_mode;
1617	cp.clock_offset = e->data.clock_offset;
1618
1619	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1620}
1621
1622static bool hci_resolve_next_name(struct hci_dev *hdev)
1623{
1624	struct discovery_state *discov = &hdev->discovery;
1625	struct inquiry_entry *e;
1626
1627	if (list_empty(&discov->resolve))
1628		return false;
1629
 
 
 
 
 
 
1630	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1631	if (!e)
1632		return false;
1633
1634	if (hci_resolve_name(hdev, e) == 0) {
1635		e->name_state = NAME_PENDING;
1636		return true;
1637	}
1638
1639	return false;
1640}
1641
1642static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1643				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1644{
1645	struct discovery_state *discov = &hdev->discovery;
1646	struct inquiry_entry *e;
1647
1648	/* Update the mgmt connected state if necessary. Be careful with
1649	 * conn objects that exist but are not (yet) connected however.
1650	 * Only those in BT_CONFIG or BT_CONNECTED states can be
1651	 * considered connected.
1652	 */
1653	if (conn &&
1654	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1655	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1656		mgmt_device_connected(hdev, conn, 0, name, name_len);
1657
1658	if (discov->state == DISCOVERY_STOPPED)
1659		return;
1660
1661	if (discov->state == DISCOVERY_STOPPING)
1662		goto discov_complete;
1663
1664	if (discov->state != DISCOVERY_RESOLVING)
1665		return;
1666
1667	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1668	/* If the device was not found in a list of found devices names of which
1669	 * are pending. there is no need to continue resolving a next name as it
1670	 * will be done upon receiving another Remote Name Request Complete
1671	 * Event */
1672	if (!e)
1673		return;
1674
1675	list_del(&e->list);
1676	if (name) {
1677		e->name_state = NAME_KNOWN;
1678		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1679				 e->data.rssi, name, name_len);
1680	} else {
1681		e->name_state = NAME_NOT_KNOWN;
1682	}
1683
1684	if (hci_resolve_next_name(hdev))
1685		return;
1686
1687discov_complete:
1688	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1689}
1690
1691static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1692{
1693	struct hci_cp_remote_name_req *cp;
1694	struct hci_conn *conn;
1695
1696	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1697
1698	/* If successful wait for the name req complete event before
1699	 * checking for the need to do authentication */
1700	if (!status)
1701		return;
1702
1703	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1704	if (!cp)
1705		return;
1706
1707	hci_dev_lock(hdev);
1708
1709	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1710
1711	if (hci_dev_test_flag(hdev, HCI_MGMT))
1712		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1713
1714	if (!conn)
1715		goto unlock;
1716
1717	if (!hci_outgoing_auth_needed(hdev, conn))
1718		goto unlock;
1719
1720	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1721		struct hci_cp_auth_requested auth_cp;
1722
1723		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1724
1725		auth_cp.handle = __cpu_to_le16(conn->handle);
1726		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1727			     sizeof(auth_cp), &auth_cp);
1728	}
1729
1730unlock:
1731	hci_dev_unlock(hdev);
1732}
1733
1734static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1735{
1736	struct hci_cp_read_remote_features *cp;
1737	struct hci_conn *conn;
1738
1739	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1740
1741	if (!status)
1742		return;
1743
1744	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1745	if (!cp)
1746		return;
1747
1748	hci_dev_lock(hdev);
1749
1750	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1751	if (conn) {
1752		if (conn->state == BT_CONFIG) {
1753			hci_connect_cfm(conn, status);
1754			hci_conn_drop(conn);
1755		}
1756	}
1757
1758	hci_dev_unlock(hdev);
1759}
1760
1761static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1762{
1763	struct hci_cp_read_remote_ext_features *cp;
1764	struct hci_conn *conn;
1765
1766	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1767
1768	if (!status)
1769		return;
1770
1771	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1772	if (!cp)
1773		return;
1774
1775	hci_dev_lock(hdev);
1776
1777	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1778	if (conn) {
1779		if (conn->state == BT_CONFIG) {
1780			hci_connect_cfm(conn, status);
1781			hci_conn_drop(conn);
1782		}
1783	}
1784
1785	hci_dev_unlock(hdev);
1786}
1787
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1788static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1789{
1790	struct hci_cp_setup_sync_conn *cp;
1791	struct hci_conn *acl, *sco;
1792	__u16 handle;
1793
1794	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1795
1796	if (!status)
1797		return;
1798
1799	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1800	if (!cp)
1801		return;
1802
1803	handle = __le16_to_cpu(cp->handle);
 
1804
1805	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
 
 
1806
1807	hci_dev_lock(hdev);
1808
1809	acl = hci_conn_hash_lookup_handle(hdev, handle);
1810	if (acl) {
1811		sco = acl->link;
1812		if (sco) {
1813			sco->state = BT_CLOSED;
1814
1815			hci_connect_cfm(sco, status);
1816			hci_conn_del(sco);
1817		}
1818	}
1819
1820	hci_dev_unlock(hdev);
1821}
1822
1823static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1824{
1825	struct hci_cp_sniff_mode *cp;
1826	struct hci_conn *conn;
1827
1828	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1829
1830	if (!status)
1831		return;
1832
1833	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1834	if (!cp)
1835		return;
1836
1837	hci_dev_lock(hdev);
1838
1839	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1840	if (conn) {
1841		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1842
1843		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1844			hci_sco_setup(conn, status);
1845	}
1846
1847	hci_dev_unlock(hdev);
1848}
1849
1850static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1851{
1852	struct hci_cp_exit_sniff_mode *cp;
1853	struct hci_conn *conn;
1854
1855	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1856
1857	if (!status)
1858		return;
1859
1860	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1861	if (!cp)
1862		return;
1863
1864	hci_dev_lock(hdev);
1865
1866	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1867	if (conn) {
1868		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1869
1870		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1871			hci_sco_setup(conn, status);
1872	}
1873
1874	hci_dev_unlock(hdev);
1875}
1876
1877static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1878{
1879	struct hci_cp_disconnect *cp;
 
1880	struct hci_conn *conn;
 
 
 
1881
1882	if (!status)
 
 
 
1883		return;
1884
1885	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1886	if (!cp)
1887		return;
1888
1889	hci_dev_lock(hdev);
1890
1891	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1892	if (conn)
 
 
 
1893		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1894				       conn->dst_type, status);
1895
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1896	hci_dev_unlock(hdev);
1897}
1898
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1899static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1900{
1901	struct hci_cp_le_create_conn *cp;
1902	struct hci_conn *conn;
1903
1904	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1905
1906	/* All connection failure handling is taken care of by the
1907	 * hci_le_conn_failed function which is triggered by the HCI
1908	 * request completion callbacks used for connecting.
1909	 */
1910	if (status)
1911		return;
1912
1913	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1914	if (!cp)
1915		return;
1916
1917	hci_dev_lock(hdev);
1918
1919	conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
1920				       cp->peer_addr_type);
1921	if (!conn)
1922		goto unlock;
 
 
 
 
 
 
 
1923
1924	/* Store the initiator and responder address information which
1925	 * is needed for SMP. These values will not change during the
1926	 * lifetime of the connection.
1927	 */
1928	conn->init_addr_type = cp->own_address_type;
1929	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1930		bacpy(&conn->init_addr, &hdev->random_addr);
1931	else
1932		bacpy(&conn->init_addr, &hdev->bdaddr);
 
1933
1934	conn->resp_addr_type = cp->peer_addr_type;
1935	bacpy(&conn->resp_addr, &cp->peer_addr);
1936
1937	/* We don't want the connection attempt to stick around
1938	 * indefinitely since LE doesn't have a page timeout concept
1939	 * like BR/EDR. Set a timer for any connection that doesn't use
1940	 * the white list for connecting.
1941	 */
1942	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1943		queue_delayed_work(conn->hdev->workqueue,
1944				   &conn->le_conn_timeout,
1945				   conn->conn_timeout);
1946
1947unlock:
1948	hci_dev_unlock(hdev);
1949}
1950
1951static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1952{
1953	struct hci_cp_le_read_remote_features *cp;
1954	struct hci_conn *conn;
1955
1956	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1957
1958	if (!status)
1959		return;
1960
1961	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1962	if (!cp)
1963		return;
1964
1965	hci_dev_lock(hdev);
1966
1967	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1968	if (conn) {
1969		if (conn->state == BT_CONFIG) {
1970			hci_connect_cfm(conn, status);
1971			hci_conn_drop(conn);
1972		}
1973	}
1974
1975	hci_dev_unlock(hdev);
1976}
1977
1978static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1979{
1980	struct hci_cp_le_start_enc *cp;
1981	struct hci_conn *conn;
1982
1983	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1984
1985	if (!status)
1986		return;
1987
1988	hci_dev_lock(hdev);
1989
1990	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1991	if (!cp)
1992		goto unlock;
1993
1994	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1995	if (!conn)
1996		goto unlock;
1997
1998	if (conn->state != BT_CONNECTED)
1999		goto unlock;
2000
2001	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2002	hci_conn_drop(conn);
2003
2004unlock:
2005	hci_dev_unlock(hdev);
2006}
2007
2008static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2009{
2010	struct hci_cp_switch_role *cp;
2011	struct hci_conn *conn;
2012
2013	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2014
2015	if (!status)
2016		return;
2017
2018	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2019	if (!cp)
2020		return;
2021
2022	hci_dev_lock(hdev);
2023
2024	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2025	if (conn)
2026		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2027
2028	hci_dev_unlock(hdev);
2029}
2030
2031static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2032{
2033	__u8 status = *((__u8 *) skb->data);
2034	struct discovery_state *discov = &hdev->discovery;
2035	struct inquiry_entry *e;
2036
2037	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2038
2039	hci_conn_check_pending(hdev);
2040
2041	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2042		return;
2043
2044	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2045	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2046
2047	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2048		return;
2049
2050	hci_dev_lock(hdev);
2051
2052	if (discov->state != DISCOVERY_FINDING)
2053		goto unlock;
2054
2055	if (list_empty(&discov->resolve)) {
2056		/* When BR/EDR inquiry is active and no LE scanning is in
2057		 * progress, then change discovery state to indicate completion.
2058		 *
2059		 * When running LE scanning and BR/EDR inquiry simultaneously
2060		 * and the LE scan already finished, then change the discovery
2061		 * state to indicate completion.
2062		 */
2063		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2064		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2065			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2066		goto unlock;
2067	}
2068
2069	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2070	if (e && hci_resolve_name(hdev, e) == 0) {
2071		e->name_state = NAME_PENDING;
2072		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
 
2073	} else {
2074		/* When BR/EDR inquiry is active and no LE scanning is in
2075		 * progress, then change discovery state to indicate completion.
2076		 *
2077		 * When running LE scanning and BR/EDR inquiry simultaneously
2078		 * and the LE scan already finished, then change the discovery
2079		 * state to indicate completion.
2080		 */
2081		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2082		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2083			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2084	}
2085
2086unlock:
2087	hci_dev_unlock(hdev);
2088}
2089
2090static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2091{
 
2092	struct inquiry_data data;
2093	struct inquiry_info *info = (void *) (skb->data + 1);
2094	int num_rsp = *((__u8 *) skb->data);
 
 
 
2095
2096	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2097
2098	if (!num_rsp)
2099		return;
2100
2101	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2102		return;
2103
2104	hci_dev_lock(hdev);
2105
2106	for (; num_rsp; num_rsp--, info++) {
 
2107		u32 flags;
2108
2109		bacpy(&data.bdaddr, &info->bdaddr);
2110		data.pscan_rep_mode	= info->pscan_rep_mode;
2111		data.pscan_period_mode	= info->pscan_period_mode;
2112		data.pscan_mode		= info->pscan_mode;
2113		memcpy(data.dev_class, info->dev_class, 3);
2114		data.clock_offset	= info->clock_offset;
2115		data.rssi		= HCI_RSSI_INVALID;
2116		data.ssp_mode		= 0x00;
2117
2118		flags = hci_inquiry_cache_update(hdev, &data, false);
2119
2120		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2121				  info->dev_class, HCI_RSSI_INVALID,
2122				  flags, NULL, 0, NULL, 0);
2123	}
2124
2125	hci_dev_unlock(hdev);
2126}
2127
2128static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2129{
2130	struct hci_ev_conn_complete *ev = (void *) skb->data;
2131	struct hci_conn *conn;
 
2132
2133	BT_DBG("%s", hdev->name);
2134
2135	hci_dev_lock(hdev);
2136
2137	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2138	if (!conn) {
2139		if (ev->link_type != SCO_LINK)
 
 
 
2140			goto unlock;
2141
2142		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2143		if (!conn)
2144			goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2145
2146		conn->type = SCO_LINK;
 
 
 
 
 
 
 
 
2147	}
2148
2149	if (!ev->status) {
2150		conn->handle = __le16_to_cpu(ev->handle);
 
 
2151
2152		if (conn->type == ACL_LINK) {
2153			conn->state = BT_CONFIG;
2154			hci_conn_hold(conn);
2155
2156			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2157			    !hci_find_link_key(hdev, &ev->bdaddr))
2158				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2159			else
2160				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2161		} else
2162			conn->state = BT_CONNECTED;
2163
2164		hci_debugfs_create_conn(conn);
2165		hci_conn_add_sysfs(conn);
2166
2167		if (test_bit(HCI_AUTH, &hdev->flags))
2168			set_bit(HCI_CONN_AUTH, &conn->flags);
2169
2170		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2171			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2172
2173		/* Get remote features */
2174		if (conn->type == ACL_LINK) {
2175			struct hci_cp_read_remote_features cp;
2176			cp.handle = ev->handle;
2177			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2178				     sizeof(cp), &cp);
2179
2180			hci_req_update_scan(hdev);
2181		}
2182
2183		/* Set packet type for incoming connection */
2184		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2185			struct hci_cp_change_conn_ptype cp;
2186			cp.handle = ev->handle;
2187			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2188			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2189				     &cp);
2190		}
2191	} else {
2192		conn->state = BT_CLOSED;
2193		if (conn->type == ACL_LINK)
2194			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2195					    conn->dst_type, ev->status);
2196	}
2197
2198	if (conn->type == ACL_LINK)
2199		hci_sco_setup(conn, ev->status);
2200
2201	if (ev->status) {
2202		hci_connect_cfm(conn, ev->status);
2203		hci_conn_del(conn);
2204	} else if (ev->link_type != ACL_LINK)
2205		hci_connect_cfm(conn, ev->status);
 
 
 
 
 
 
 
 
2206
2207unlock:
2208	hci_dev_unlock(hdev);
2209
2210	hci_conn_check_pending(hdev);
2211}
2212
2213static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2214{
2215	struct hci_cp_reject_conn_req cp;
2216
2217	bacpy(&cp.bdaddr, bdaddr);
2218	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2219	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2220}
2221
2222static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2223{
2224	struct hci_ev_conn_request *ev = (void *) skb->data;
2225	int mask = hdev->link_mode;
2226	struct inquiry_entry *ie;
2227	struct hci_conn *conn;
2228	__u8 flags = 0;
2229
2230	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2231	       ev->link_type);
 
 
 
 
 
 
 
 
 
2232
2233	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2234				      &flags);
2235
2236	if (!(mask & HCI_LM_ACCEPT)) {
2237		hci_reject_conn(hdev, &ev->bdaddr);
2238		return;
2239	}
2240
2241	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
 
 
2242				   BDADDR_BREDR)) {
2243		hci_reject_conn(hdev, &ev->bdaddr);
2244		return;
2245	}
2246
2247	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2248	 * connection. These features are only touched through mgmt so
2249	 * only do the checks if HCI_MGMT is set.
2250	 */
2251	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2252	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2253	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2254				    BDADDR_BREDR)) {
2255		    hci_reject_conn(hdev, &ev->bdaddr);
2256		    return;
2257	}
2258
2259	/* Connection accepted */
2260
2261	hci_dev_lock(hdev);
2262
2263	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2264	if (ie)
2265		memcpy(ie->data.dev_class, ev->dev_class, 3);
2266
2267	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2268			&ev->bdaddr);
2269	if (!conn) {
2270		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2271				    HCI_ROLE_SLAVE);
2272		if (!conn) {
2273			bt_dev_err(hdev, "no memory for new connection");
2274			hci_dev_unlock(hdev);
2275			return;
2276		}
2277	}
2278
2279	memcpy(conn->dev_class, ev->dev_class, 3);
2280
2281	hci_dev_unlock(hdev);
2282
2283	if (ev->link_type == ACL_LINK ||
2284	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2285		struct hci_cp_accept_conn_req cp;
2286		conn->state = BT_CONNECT;
2287
2288		bacpy(&cp.bdaddr, &ev->bdaddr);
2289
2290		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2291			cp.role = 0x00; /* Become master */
2292		else
2293			cp.role = 0x01; /* Remain slave */
2294
2295		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2296	} else if (!(flags & HCI_PROTO_DEFER)) {
2297		struct hci_cp_accept_sync_conn_req cp;
2298		conn->state = BT_CONNECT;
2299
2300		bacpy(&cp.bdaddr, &ev->bdaddr);
2301		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2302
2303		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2304		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2305		cp.max_latency    = cpu_to_le16(0xffff);
2306		cp.content_format = cpu_to_le16(hdev->voice_setting);
2307		cp.retrans_effort = 0xff;
2308
2309		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2310			     &cp);
2311	} else {
2312		conn->state = BT_CONNECT2;
2313		hci_connect_cfm(conn, 0);
2314	}
 
 
 
 
2315}
2316
2317static u8 hci_to_mgmt_reason(u8 err)
2318{
2319	switch (err) {
2320	case HCI_ERROR_CONNECTION_TIMEOUT:
2321		return MGMT_DEV_DISCONN_TIMEOUT;
2322	case HCI_ERROR_REMOTE_USER_TERM:
2323	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2324	case HCI_ERROR_REMOTE_POWER_OFF:
2325		return MGMT_DEV_DISCONN_REMOTE;
2326	case HCI_ERROR_LOCAL_HOST_TERM:
2327		return MGMT_DEV_DISCONN_LOCAL_HOST;
2328	default:
2329		return MGMT_DEV_DISCONN_UNKNOWN;
2330	}
2331}
2332
2333static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2334{
2335	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2336	u8 reason;
2337	struct hci_conn_params *params;
2338	struct hci_conn *conn;
2339	bool mgmt_connected;
2340	u8 type;
2341
2342	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2343
2344	hci_dev_lock(hdev);
2345
2346	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2347	if (!conn)
2348		goto unlock;
2349
2350	if (ev->status) {
2351		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2352				       conn->dst_type, ev->status);
2353		goto unlock;
2354	}
2355
2356	conn->state = BT_CLOSED;
2357
2358	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2359
2360	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2361		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2362	else
2363		reason = hci_to_mgmt_reason(ev->reason);
2364
2365	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2366				reason, mgmt_connected);
2367
2368	if (conn->type == ACL_LINK) {
2369		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2370			hci_remove_link_key(hdev, &conn->dst);
2371
2372		hci_req_update_scan(hdev);
2373	}
2374
2375	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2376	if (params) {
2377		switch (params->auto_connect) {
2378		case HCI_AUTO_CONN_LINK_LOSS:
2379			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2380				break;
2381			/* Fall through */
2382
2383		case HCI_AUTO_CONN_DIRECT:
2384		case HCI_AUTO_CONN_ALWAYS:
2385			list_del_init(&params->action);
2386			list_add(&params->action, &hdev->pend_le_conns);
2387			hci_update_background_scan(hdev);
2388			break;
2389
2390		default:
2391			break;
2392		}
2393	}
2394
2395	type = conn->type;
2396
2397	hci_disconn_cfm(conn, ev->reason);
2398	hci_conn_del(conn);
2399
2400	/* Re-enable advertising if necessary, since it might
2401	 * have been disabled by the connection. From the
2402	 * HCI_LE_Set_Advertise_Enable command description in
2403	 * the core specification (v4.0):
2404	 * "The Controller shall continue advertising until the Host
2405	 * issues an LE_Set_Advertise_Enable command with
2406	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2407	 * or until a connection is created or until the Advertising
2408	 * is timed out due to Directed Advertising."
2409	 */
2410	if (type == LE_LINK)
2411		hci_req_reenable_advertising(hdev);
 
 
 
 
2412
2413unlock:
2414	hci_dev_unlock(hdev);
2415}
2416
2417static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2418{
2419	struct hci_ev_auth_complete *ev = (void *) skb->data;
2420	struct hci_conn *conn;
2421
2422	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2423
2424	hci_dev_lock(hdev);
2425
2426	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2427	if (!conn)
2428		goto unlock;
2429
2430	if (!ev->status) {
2431		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2432
2433		if (!hci_conn_ssp_enabled(conn) &&
2434		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2435			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2436		} else {
2437			set_bit(HCI_CONN_AUTH, &conn->flags);
2438			conn->sec_level = conn->pending_sec_level;
2439		}
2440	} else {
2441		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2442			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2443
2444		mgmt_auth_failed(conn, ev->status);
2445	}
2446
2447	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2448	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2449
2450	if (conn->state == BT_CONFIG) {
2451		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2452			struct hci_cp_set_conn_encrypt cp;
2453			cp.handle  = ev->handle;
2454			cp.encrypt = 0x01;
2455			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2456				     &cp);
2457		} else {
2458			conn->state = BT_CONNECTED;
2459			hci_connect_cfm(conn, ev->status);
2460			hci_conn_drop(conn);
2461		}
2462	} else {
2463		hci_auth_cfm(conn, ev->status);
2464
2465		hci_conn_hold(conn);
2466		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2467		hci_conn_drop(conn);
2468	}
2469
2470	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2471		if (!ev->status) {
2472			struct hci_cp_set_conn_encrypt cp;
2473			cp.handle  = ev->handle;
2474			cp.encrypt = 0x01;
2475			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2476				     &cp);
2477		} else {
2478			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2479			hci_encrypt_cfm(conn, ev->status, 0x00);
2480		}
2481	}
2482
2483unlock:
2484	hci_dev_unlock(hdev);
2485}
2486
2487static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
2488{
2489	struct hci_ev_remote_name *ev = (void *) skb->data;
2490	struct hci_conn *conn;
2491
2492	BT_DBG("%s", hdev->name);
2493
2494	hci_conn_check_pending(hdev);
2495
2496	hci_dev_lock(hdev);
2497
2498	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2499
2500	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2501		goto check_auth;
2502
2503	if (ev->status == 0)
2504		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2505				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2506	else
2507		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2508
2509check_auth:
2510	if (!conn)
2511		goto unlock;
2512
2513	if (!hci_outgoing_auth_needed(hdev, conn))
2514		goto unlock;
2515
2516	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2517		struct hci_cp_auth_requested cp;
2518
2519		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2520
2521		cp.handle = __cpu_to_le16(conn->handle);
2522		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2523	}
2524
2525unlock:
2526	hci_dev_unlock(hdev);
2527}
2528
2529static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2530				       u16 opcode, struct sk_buff *skb)
2531{
2532	const struct hci_rp_read_enc_key_size *rp;
2533	struct hci_conn *conn;
2534	u16 handle;
2535
2536	BT_DBG("%s status 0x%02x", hdev->name, status);
2537
2538	if (!skb || skb->len < sizeof(*rp)) {
2539		bt_dev_err(hdev, "invalid read key size response");
2540		return;
2541	}
2542
2543	rp = (void *)skb->data;
2544	handle = le16_to_cpu(rp->handle);
2545
2546	hci_dev_lock(hdev);
2547
2548	conn = hci_conn_hash_lookup_handle(hdev, handle);
2549	if (!conn)
2550		goto unlock;
2551
2552	/* If we fail to read the encryption key size, assume maximum
2553	 * (which is the same we do also when this HCI command isn't
2554	 * supported.
2555	 */
2556	if (rp->status) {
2557		bt_dev_err(hdev, "failed to read key size for handle %u",
2558			   handle);
2559		conn->enc_key_size = HCI_LINK_KEY_SIZE;
2560	} else {
2561		conn->enc_key_size = rp->key_size;
2562	}
2563
2564	if (conn->state == BT_CONFIG) {
2565		conn->state = BT_CONNECTED;
2566		hci_connect_cfm(conn, 0);
2567		hci_conn_drop(conn);
2568	} else {
2569		u8 encrypt;
2570
2571		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2572			encrypt = 0x00;
2573		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2574			encrypt = 0x02;
2575		else
2576			encrypt = 0x01;
2577
2578		hci_encrypt_cfm(conn, 0, encrypt);
2579	}
2580
2581unlock:
2582	hci_dev_unlock(hdev);
2583}
2584
2585static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2586{
2587	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2588	struct hci_conn *conn;
2589
2590	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2591
2592	hci_dev_lock(hdev);
2593
2594	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2595	if (!conn)
2596		goto unlock;
2597
2598	if (!ev->status) {
2599		if (ev->encrypt) {
2600			/* Encryption implies authentication */
2601			set_bit(HCI_CONN_AUTH, &conn->flags);
2602			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2603			conn->sec_level = conn->pending_sec_level;
2604
2605			/* P-256 authentication key implies FIPS */
2606			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2607				set_bit(HCI_CONN_FIPS, &conn->flags);
2608
2609			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2610			    conn->type == LE_LINK)
2611				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2612		} else {
2613			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2614			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2615		}
2616	}
2617
2618	/* We should disregard the current RPA and generate a new one
2619	 * whenever the encryption procedure fails.
2620	 */
2621	if (ev->status && conn->type == LE_LINK)
2622		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 
 
2623
2624	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2625
 
 
 
 
2626	if (ev->status && conn->state == BT_CONNECTED) {
2627		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2628			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2629
 
 
 
 
2630		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2631		hci_conn_drop(conn);
2632		goto unlock;
2633	}
2634
2635	/* In Secure Connections Only mode, do not allow any connections
2636	 * that are not encrypted with AES-CCM using a P-256 authenticated
2637	 * combination key.
2638	 */
2639	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2640	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2641	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2642		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2643		hci_conn_drop(conn);
2644		goto unlock;
2645	}
2646
2647	/* Try reading the encryption key size for encrypted ACL links */
2648	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2649		struct hci_cp_read_enc_key_size cp;
2650		struct hci_request req;
2651
2652		/* Only send HCI_Read_Encryption_Key_Size if the
2653		 * controller really supports it. If it doesn't, assume
2654		 * the default size (16).
2655		 */
2656		if (!(hdev->commands[20] & 0x10)) {
2657			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2658			goto notify;
2659		}
2660
2661		hci_req_init(&req, hdev);
2662
2663		cp.handle = cpu_to_le16(conn->handle);
2664		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2665
2666		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2667			bt_dev_err(hdev, "sending read key size failed");
2668			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2669			goto notify;
2670		}
2671
2672		goto unlock;
2673	}
2674
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2675notify:
2676	if (conn->state == BT_CONFIG) {
2677		if (!ev->status)
2678			conn->state = BT_CONNECTED;
2679
2680		hci_connect_cfm(conn, ev->status);
2681		hci_conn_drop(conn);
2682	} else
2683		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2684
2685unlock:
2686	hci_dev_unlock(hdev);
2687}
2688
2689static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2690					     struct sk_buff *skb)
2691{
2692	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2693	struct hci_conn *conn;
2694
2695	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2696
2697	hci_dev_lock(hdev);
2698
2699	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2700	if (conn) {
2701		if (!ev->status)
2702			set_bit(HCI_CONN_SECURE, &conn->flags);
2703
2704		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2705
2706		hci_key_change_cfm(conn, ev->status);
2707	}
2708
2709	hci_dev_unlock(hdev);
2710}
2711
2712static void hci_remote_features_evt(struct hci_dev *hdev,
2713				    struct sk_buff *skb)
2714{
2715	struct hci_ev_remote_features *ev = (void *) skb->data;
2716	struct hci_conn *conn;
2717
2718	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2719
2720	hci_dev_lock(hdev);
2721
2722	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2723	if (!conn)
2724		goto unlock;
2725
2726	if (!ev->status)
2727		memcpy(conn->features[0], ev->features, 8);
2728
2729	if (conn->state != BT_CONFIG)
2730		goto unlock;
2731
2732	if (!ev->status && lmp_ext_feat_capable(hdev) &&
2733	    lmp_ext_feat_capable(conn)) {
2734		struct hci_cp_read_remote_ext_features cp;
2735		cp.handle = ev->handle;
2736		cp.page = 0x01;
2737		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2738			     sizeof(cp), &cp);
2739		goto unlock;
2740	}
2741
2742	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2743		struct hci_cp_remote_name_req cp;
2744		memset(&cp, 0, sizeof(cp));
2745		bacpy(&cp.bdaddr, &conn->dst);
2746		cp.pscan_rep_mode = 0x02;
2747		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2748	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2749		mgmt_device_connected(hdev, conn, 0, NULL, 0);
2750
2751	if (!hci_outgoing_auth_needed(hdev, conn)) {
2752		conn->state = BT_CONNECTED;
2753		hci_connect_cfm(conn, ev->status);
2754		hci_conn_drop(conn);
2755	}
2756
2757unlock:
2758	hci_dev_unlock(hdev);
2759}
2760
2761static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2762				 u16 *opcode, u8 *status,
2763				 hci_req_complete_t *req_complete,
2764				 hci_req_complete_skb_t *req_complete_skb)
2765{
2766	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2767
2768	*opcode = __le16_to_cpu(ev->opcode);
2769	*status = skb->data[sizeof(*ev)];
 
 
 
 
 
 
 
 
 
 
 
2770
2771	skb_pull(skb, sizeof(*ev));
 
 
 
2772
2773	switch (*opcode) {
2774	case HCI_OP_INQUIRY_CANCEL:
2775		hci_cc_inquiry_cancel(hdev, skb);
2776		break;
2777
2778	case HCI_OP_PERIODIC_INQ:
2779		hci_cc_periodic_inq(hdev, skb);
2780		break;
2781
2782	case HCI_OP_EXIT_PERIODIC_INQ:
2783		hci_cc_exit_periodic_inq(hdev, skb);
2784		break;
 
2785
2786	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2787		hci_cc_remote_name_req_cancel(hdev, skb);
2788		break;
2789
2790	case HCI_OP_ROLE_DISCOVERY:
2791		hci_cc_role_discovery(hdev, skb);
2792		break;
2793
2794	case HCI_OP_READ_LINK_POLICY:
2795		hci_cc_read_link_policy(hdev, skb);
2796		break;
2797
2798	case HCI_OP_WRITE_LINK_POLICY:
2799		hci_cc_write_link_policy(hdev, skb);
2800		break;
2801
2802	case HCI_OP_READ_DEF_LINK_POLICY:
2803		hci_cc_read_def_link_policy(hdev, skb);
2804		break;
2805
2806	case HCI_OP_WRITE_DEF_LINK_POLICY:
2807		hci_cc_write_def_link_policy(hdev, skb);
2808		break;
 
2809
2810	case HCI_OP_RESET:
2811		hci_cc_reset(hdev, skb);
2812		break;
 
2813
2814	case HCI_OP_READ_STORED_LINK_KEY:
2815		hci_cc_read_stored_link_key(hdev, skb);
2816		break;
 
 
 
 
 
 
2817
2818	case HCI_OP_DELETE_STORED_LINK_KEY:
2819		hci_cc_delete_stored_link_key(hdev, skb);
2820		break;
2821
2822	case HCI_OP_WRITE_LOCAL_NAME:
2823		hci_cc_write_local_name(hdev, skb);
2824		break;
 
 
 
2825
2826	case HCI_OP_READ_LOCAL_NAME:
2827		hci_cc_read_local_name(hdev, skb);
2828		break;
2829
2830	case HCI_OP_WRITE_AUTH_ENABLE:
2831		hci_cc_write_auth_enable(hdev, skb);
2832		break;
 
 
 
 
 
 
 
 
2833
2834	case HCI_OP_WRITE_ENCRYPT_MODE:
2835		hci_cc_write_encrypt_mode(hdev, skb);
2836		break;
 
 
 
 
 
 
 
 
 
2837
2838	case HCI_OP_WRITE_SCAN_ENABLE:
2839		hci_cc_write_scan_enable(hdev, skb);
2840		break;
2841
2842	case HCI_OP_READ_CLASS_OF_DEV:
2843		hci_cc_read_class_of_dev(hdev, skb);
2844		break;
2845
2846	case HCI_OP_WRITE_CLASS_OF_DEV:
2847		hci_cc_write_class_of_dev(hdev, skb);
2848		break;
2849
2850	case HCI_OP_READ_VOICE_SETTING:
2851		hci_cc_read_voice_setting(hdev, skb);
2852		break;
2853
2854	case HCI_OP_WRITE_VOICE_SETTING:
2855		hci_cc_write_voice_setting(hdev, skb);
2856		break;
2857
2858	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2859		hci_cc_read_num_supported_iac(hdev, skb);
2860		break;
2861
2862	case HCI_OP_WRITE_SSP_MODE:
2863		hci_cc_write_ssp_mode(hdev, skb);
2864		break;
 
 
 
2865
2866	case HCI_OP_WRITE_SC_SUPPORT:
2867		hci_cc_write_sc_support(hdev, skb);
2868		break;
2869
2870	case HCI_OP_READ_LOCAL_VERSION:
2871		hci_cc_read_local_version(hdev, skb);
2872		break;
2873
2874	case HCI_OP_READ_LOCAL_COMMANDS:
2875		hci_cc_read_local_commands(hdev, skb);
2876		break;
2877
2878	case HCI_OP_READ_LOCAL_FEATURES:
2879		hci_cc_read_local_features(hdev, skb);
2880		break;
2881
2882	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2883		hci_cc_read_local_ext_features(hdev, skb);
2884		break;
 
 
2885
2886	case HCI_OP_READ_BUFFER_SIZE:
2887		hci_cc_read_buffer_size(hdev, skb);
 
 
 
 
2888		break;
2889
2890	case HCI_OP_READ_BD_ADDR:
2891		hci_cc_read_bd_addr(hdev, skb);
 
 
 
2892		break;
 
2893
2894	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2895		hci_cc_read_page_scan_activity(hdev, skb);
2896		break;
 
2897
2898	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2899		hci_cc_write_page_scan_activity(hdev, skb);
2900		break;
 
2901
2902	case HCI_OP_READ_PAGE_SCAN_TYPE:
2903		hci_cc_read_page_scan_type(hdev, skb);
2904		break;
 
 
2905
2906	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2907		hci_cc_write_page_scan_type(hdev, skb);
2908		break;
2909
2910	case HCI_OP_READ_DATA_BLOCK_SIZE:
2911		hci_cc_read_data_block_size(hdev, skb);
2912		break;
2913
2914	case HCI_OP_READ_FLOW_CONTROL_MODE:
2915		hci_cc_read_flow_control_mode(hdev, skb);
2916		break;
2917
2918	case HCI_OP_READ_LOCAL_AMP_INFO:
2919		hci_cc_read_local_amp_info(hdev, skb);
2920		break;
2921
2922	case HCI_OP_READ_CLOCK:
2923		hci_cc_read_clock(hdev, skb);
2924		break;
 
 
 
 
2925
2926	case HCI_OP_READ_INQ_RSP_TX_POWER:
2927		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2928		break;
2929
2930	case HCI_OP_PIN_CODE_REPLY:
2931		hci_cc_pin_code_reply(hdev, skb);
2932		break;
2933
2934	case HCI_OP_PIN_CODE_NEG_REPLY:
2935		hci_cc_pin_code_neg_reply(hdev, skb);
2936		break;
2937
2938	case HCI_OP_READ_LOCAL_OOB_DATA:
2939		hci_cc_read_local_oob_data(hdev, skb);
2940		break;
2941
2942	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2943		hci_cc_read_local_oob_ext_data(hdev, skb);
2944		break;
2945
2946	case HCI_OP_LE_READ_BUFFER_SIZE:
2947		hci_cc_le_read_buffer_size(hdev, skb);
2948		break;
2949
2950	case HCI_OP_LE_READ_LOCAL_FEATURES:
2951		hci_cc_le_read_local_features(hdev, skb);
2952		break;
 
 
 
 
 
 
 
 
 
 
2953
2954	case HCI_OP_LE_READ_ADV_TX_POWER:
2955		hci_cc_le_read_adv_tx_power(hdev, skb);
2956		break;
2957
2958	case HCI_OP_USER_CONFIRM_REPLY:
2959		hci_cc_user_confirm_reply(hdev, skb);
2960		break;
2961
2962	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2963		hci_cc_user_confirm_neg_reply(hdev, skb);
2964		break;
2965
2966	case HCI_OP_USER_PASSKEY_REPLY:
2967		hci_cc_user_passkey_reply(hdev, skb);
2968		break;
2969
2970	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2971		hci_cc_user_passkey_neg_reply(hdev, skb);
2972		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2973
2974	case HCI_OP_LE_SET_RANDOM_ADDR:
2975		hci_cc_le_set_random_addr(hdev, skb);
2976		break;
2977
2978	case HCI_OP_LE_SET_ADV_ENABLE:
2979		hci_cc_le_set_adv_enable(hdev, skb);
2980		break;
2981
2982	case HCI_OP_LE_SET_SCAN_PARAM:
2983		hci_cc_le_set_scan_param(hdev, skb);
2984		break;
 
 
 
 
2985
2986	case HCI_OP_LE_SET_SCAN_ENABLE:
2987		hci_cc_le_set_scan_enable(hdev, skb);
2988		break;
2989
2990	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2991		hci_cc_le_read_white_list_size(hdev, skb);
2992		break;
2993
2994	case HCI_OP_LE_CLEAR_WHITE_LIST:
2995		hci_cc_le_clear_white_list(hdev, skb);
2996		break;
 
 
 
2997
2998	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2999		hci_cc_le_add_to_white_list(hdev, skb);
3000		break;
3001
3002	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3003		hci_cc_le_del_from_white_list(hdev, skb);
3004		break;
3005
3006	case HCI_OP_LE_READ_SUPPORTED_STATES:
3007		hci_cc_le_read_supported_states(hdev, skb);
3008		break;
3009
3010	case HCI_OP_LE_READ_DEF_DATA_LEN:
3011		hci_cc_le_read_def_data_len(hdev, skb);
3012		break;
3013
3014	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3015		hci_cc_le_write_def_data_len(hdev, skb);
3016		break;
3017
3018	case HCI_OP_LE_READ_MAX_DATA_LEN:
3019		hci_cc_le_read_max_data_len(hdev, skb);
3020		break;
3021
3022	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3023		hci_cc_write_le_host_supported(hdev, skb);
3024		break;
3025
3026	case HCI_OP_LE_SET_ADV_PARAM:
3027		hci_cc_set_adv_param(hdev, skb);
3028		break;
3029
3030	case HCI_OP_READ_RSSI:
3031		hci_cc_read_rssi(hdev, skb);
3032		break;
3033
3034	case HCI_OP_READ_TX_POWER:
3035		hci_cc_read_tx_power(hdev, skb);
3036		break;
3037
3038	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3039		hci_cc_write_ssp_debug_mode(hdev, skb);
3040		break;
3041
3042	default:
3043		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3044		break;
3045	}
3046
3047	if (*opcode != HCI_OP_NOP)
3048		cancel_delayed_work(&hdev->cmd_timer);
3049
3050	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3051		atomic_set(&hdev->cmd_cnt, 1);
3052
3053	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3054			     req_complete_skb);
3055
 
 
 
 
 
 
3056	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3057		queue_work(hdev->workqueue, &hdev->cmd_work);
3058}
3059
3060static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3061			       u16 *opcode, u8 *status,
3062			       hci_req_complete_t *req_complete,
3063			       hci_req_complete_skb_t *req_complete_skb)
3064{
3065	struct hci_ev_cmd_status *ev = (void *) skb->data;
 
 
3066
3067	skb_pull(skb, sizeof(*ev));
3068
3069	*opcode = __le16_to_cpu(ev->opcode);
3070	*status = ev->status;
3071
3072	switch (*opcode) {
3073	case HCI_OP_INQUIRY:
3074		hci_cs_inquiry(hdev, ev->status);
3075		break;
3076
3077	case HCI_OP_CREATE_CONN:
3078		hci_cs_create_conn(hdev, ev->status);
3079		break;
3080
3081	case HCI_OP_DISCONNECT:
3082		hci_cs_disconnect(hdev, ev->status);
3083		break;
 
3084
3085	case HCI_OP_ADD_SCO:
3086		hci_cs_add_sco(hdev, ev->status);
3087		break;
3088
3089	case HCI_OP_AUTH_REQUESTED:
3090		hci_cs_auth_requested(hdev, ev->status);
3091		break;
 
 
 
 
 
 
 
3092
3093	case HCI_OP_SET_CONN_ENCRYPT:
3094		hci_cs_set_conn_encrypt(hdev, ev->status);
3095		break;
3096
3097	case HCI_OP_REMOTE_NAME_REQ:
3098		hci_cs_remote_name_req(hdev, ev->status);
3099		break;
3100
3101	case HCI_OP_READ_REMOTE_FEATURES:
3102		hci_cs_read_remote_features(hdev, ev->status);
3103		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3104
3105	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3106		hci_cs_read_remote_ext_features(hdev, ev->status);
3107		break;
 
 
 
 
3108
3109	case HCI_OP_SETUP_SYNC_CONN:
3110		hci_cs_setup_sync_conn(hdev, ev->status);
3111		break;
3112
3113	case HCI_OP_SNIFF_MODE:
3114		hci_cs_sniff_mode(hdev, ev->status);
3115		break;
3116
3117	case HCI_OP_EXIT_SNIFF_MODE:
3118		hci_cs_exit_sniff_mode(hdev, ev->status);
3119		break;
3120
3121	case HCI_OP_SWITCH_ROLE:
3122		hci_cs_switch_role(hdev, ev->status);
3123		break;
3124
3125	case HCI_OP_LE_CREATE_CONN:
3126		hci_cs_le_create_conn(hdev, ev->status);
3127		break;
3128
3129	case HCI_OP_LE_READ_REMOTE_FEATURES:
3130		hci_cs_le_read_remote_features(hdev, ev->status);
3131		break;
3132
3133	case HCI_OP_LE_START_ENC:
3134		hci_cs_le_start_enc(hdev, ev->status);
3135		break;
3136
3137	default:
3138		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3139		break;
3140	}
3141
3142	if (*opcode != HCI_OP_NOP)
3143		cancel_delayed_work(&hdev->cmd_timer);
3144
3145	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3146		atomic_set(&hdev->cmd_cnt, 1);
3147
3148	/* Indicate request completion if the command failed. Also, if
3149	 * we're not waiting for a special event and we get a success
3150	 * command status we should try to flag the request as completed
3151	 * (since for this kind of commands there will not be a command
3152	 * complete event).
3153	 */
3154	if (ev->status ||
3155	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3156		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3157				     req_complete_skb);
 
 
 
 
 
 
3158
3159	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3160		queue_work(hdev->workqueue, &hdev->cmd_work);
3161}
3162
3163static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3164{
3165	struct hci_ev_hardware_error *ev = (void *) skb->data;
 
 
3166
3167	hdev->hw_error_code = ev->code;
3168
3169	queue_work(hdev->req_workqueue, &hdev->error_reset);
3170}
3171
3172static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3173{
3174	struct hci_ev_role_change *ev = (void *) skb->data;
3175	struct hci_conn *conn;
3176
3177	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3178
3179	hci_dev_lock(hdev);
3180
3181	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3182	if (conn) {
3183		if (!ev->status)
3184			conn->role = ev->role;
3185
3186		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3187
3188		hci_role_switch_cfm(conn, ev->status, ev->role);
3189	}
3190
3191	hci_dev_unlock(hdev);
3192}
3193
3194static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3195{
3196	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3197	int i;
3198
 
 
 
 
3199	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3200		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3201		return;
3202	}
3203
3204	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3205	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3206		BT_DBG("%s bad parameters", hdev->name);
3207		return;
3208	}
3209
3210	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3211
3212	for (i = 0; i < ev->num_hndl; i++) {
3213		struct hci_comp_pkts_info *info = &ev->handles[i];
3214		struct hci_conn *conn;
3215		__u16  handle, count;
3216
3217		handle = __le16_to_cpu(info->handle);
3218		count  = __le16_to_cpu(info->count);
3219
3220		conn = hci_conn_hash_lookup_handle(hdev, handle);
3221		if (!conn)
3222			continue;
3223
3224		conn->sent -= count;
3225
3226		switch (conn->type) {
3227		case ACL_LINK:
3228			hdev->acl_cnt += count;
3229			if (hdev->acl_cnt > hdev->acl_pkts)
3230				hdev->acl_cnt = hdev->acl_pkts;
3231			break;
3232
3233		case LE_LINK:
3234			if (hdev->le_pkts) {
3235				hdev->le_cnt += count;
3236				if (hdev->le_cnt > hdev->le_pkts)
3237					hdev->le_cnt = hdev->le_pkts;
3238			} else {
3239				hdev->acl_cnt += count;
3240				if (hdev->acl_cnt > hdev->acl_pkts)
3241					hdev->acl_cnt = hdev->acl_pkts;
3242			}
3243			break;
3244
3245		case SCO_LINK:
3246			hdev->sco_cnt += count;
3247			if (hdev->sco_cnt > hdev->sco_pkts)
3248				hdev->sco_cnt = hdev->sco_pkts;
3249			break;
3250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3251		default:
3252			bt_dev_err(hdev, "unknown type %d conn %p",
3253				   conn->type, conn);
3254			break;
3255		}
3256	}
3257
3258	queue_work(hdev->workqueue, &hdev->tx_work);
3259}
3260
3261static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3262						 __u16 handle)
3263{
3264	struct hci_chan *chan;
3265
3266	switch (hdev->dev_type) {
3267	case HCI_PRIMARY:
3268		return hci_conn_hash_lookup_handle(hdev, handle);
3269	case HCI_AMP:
3270		chan = hci_chan_lookup_handle(hdev, handle);
3271		if (chan)
3272			return chan->conn;
3273		break;
3274	default:
3275		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3276		break;
3277	}
3278
3279	return NULL;
3280}
3281
3282static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3283{
3284	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3285	int i;
3286
3287	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3288		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3289		return;
3290	}
3291
3292	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3293	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3294		BT_DBG("%s bad parameters", hdev->name);
3295		return;
3296	}
3297
3298	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3299	       ev->num_hndl);
3300
3301	for (i = 0; i < ev->num_hndl; i++) {
3302		struct hci_comp_blocks_info *info = &ev->handles[i];
3303		struct hci_conn *conn = NULL;
3304		__u16  handle, block_count;
3305
3306		handle = __le16_to_cpu(info->handle);
3307		block_count = __le16_to_cpu(info->blocks);
3308
3309		conn = __hci_conn_lookup_handle(hdev, handle);
3310		if (!conn)
3311			continue;
3312
3313		conn->sent -= block_count;
3314
3315		switch (conn->type) {
3316		case ACL_LINK:
3317		case AMP_LINK:
3318			hdev->block_cnt += block_count;
3319			if (hdev->block_cnt > hdev->num_blocks)
3320				hdev->block_cnt = hdev->num_blocks;
3321			break;
3322
3323		default:
3324			bt_dev_err(hdev, "unknown type %d conn %p",
3325				   conn->type, conn);
3326			break;
3327		}
3328	}
3329
3330	queue_work(hdev->workqueue, &hdev->tx_work);
3331}
3332
3333static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3334{
3335	struct hci_ev_mode_change *ev = (void *) skb->data;
3336	struct hci_conn *conn;
3337
3338	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3339
3340	hci_dev_lock(hdev);
3341
3342	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3343	if (conn) {
3344		conn->mode = ev->mode;
3345
3346		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3347					&conn->flags)) {
3348			if (conn->mode == HCI_CM_ACTIVE)
3349				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3350			else
3351				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3352		}
3353
3354		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3355			hci_sco_setup(conn, ev->status);
3356	}
3357
3358	hci_dev_unlock(hdev);
3359}
3360
3361static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3362{
3363	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3364	struct hci_conn *conn;
3365
3366	BT_DBG("%s", hdev->name);
3367
3368	hci_dev_lock(hdev);
3369
3370	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3371	if (!conn)
3372		goto unlock;
3373
3374	if (conn->state == BT_CONNECTED) {
3375		hci_conn_hold(conn);
3376		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3377		hci_conn_drop(conn);
3378	}
3379
3380	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3381	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3382		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3383			     sizeof(ev->bdaddr), &ev->bdaddr);
3384	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3385		u8 secure;
3386
3387		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3388			secure = 1;
3389		else
3390			secure = 0;
3391
3392		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3393	}
3394
3395unlock:
3396	hci_dev_unlock(hdev);
3397}
3398
3399static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3400{
3401	if (key_type == HCI_LK_CHANGED_COMBINATION)
3402		return;
3403
3404	conn->pin_length = pin_len;
3405	conn->key_type = key_type;
3406
3407	switch (key_type) {
3408	case HCI_LK_LOCAL_UNIT:
3409	case HCI_LK_REMOTE_UNIT:
3410	case HCI_LK_DEBUG_COMBINATION:
3411		return;
3412	case HCI_LK_COMBINATION:
3413		if (pin_len == 16)
3414			conn->pending_sec_level = BT_SECURITY_HIGH;
3415		else
3416			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3417		break;
3418	case HCI_LK_UNAUTH_COMBINATION_P192:
3419	case HCI_LK_UNAUTH_COMBINATION_P256:
3420		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3421		break;
3422	case HCI_LK_AUTH_COMBINATION_P192:
3423		conn->pending_sec_level = BT_SECURITY_HIGH;
3424		break;
3425	case HCI_LK_AUTH_COMBINATION_P256:
3426		conn->pending_sec_level = BT_SECURITY_FIPS;
3427		break;
3428	}
3429}
3430
3431static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3432{
3433	struct hci_ev_link_key_req *ev = (void *) skb->data;
3434	struct hci_cp_link_key_reply cp;
3435	struct hci_conn *conn;
3436	struct link_key *key;
3437
3438	BT_DBG("%s", hdev->name);
3439
3440	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3441		return;
3442
3443	hci_dev_lock(hdev);
3444
3445	key = hci_find_link_key(hdev, &ev->bdaddr);
3446	if (!key) {
3447		BT_DBG("%s link key not found for %pMR", hdev->name,
3448		       &ev->bdaddr);
3449		goto not_found;
3450	}
3451
3452	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3453	       &ev->bdaddr);
3454
3455	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3456	if (conn) {
3457		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3458
3459		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3460		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3461		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3462			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3463			goto not_found;
3464		}
3465
3466		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3467		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3468		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3469			BT_DBG("%s ignoring key unauthenticated for high security",
3470			       hdev->name);
3471			goto not_found;
3472		}
3473
3474		conn_set_key(conn, key->type, key->pin_len);
3475	}
3476
3477	bacpy(&cp.bdaddr, &ev->bdaddr);
3478	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3479
3480	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3481
3482	hci_dev_unlock(hdev);
3483
3484	return;
3485
3486not_found:
3487	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3488	hci_dev_unlock(hdev);
3489}
3490
3491static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3492{
3493	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3494	struct hci_conn *conn;
3495	struct link_key *key;
3496	bool persistent;
3497	u8 pin_len = 0;
3498
3499	BT_DBG("%s", hdev->name);
3500
3501	hci_dev_lock(hdev);
3502
3503	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3504	if (!conn)
3505		goto unlock;
3506
 
 
 
 
 
 
 
 
 
3507	hci_conn_hold(conn);
3508	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3509	hci_conn_drop(conn);
3510
3511	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3512	conn_set_key(conn, ev->key_type, conn->pin_length);
3513
3514	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3515		goto unlock;
3516
3517	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3518			        ev->key_type, pin_len, &persistent);
3519	if (!key)
3520		goto unlock;
3521
3522	/* Update connection information since adding the key will have
3523	 * fixed up the type in the case of changed combination keys.
3524	 */
3525	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3526		conn_set_key(conn, key->type, key->pin_len);
3527
3528	mgmt_new_link_key(hdev, key, persistent);
3529
3530	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3531	 * is set. If it's not set simply remove the key from the kernel
3532	 * list (we've still notified user space about it but with
3533	 * store_hint being 0).
3534	 */
3535	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3536	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3537		list_del_rcu(&key->list);
3538		kfree_rcu(key, rcu);
3539		goto unlock;
3540	}
3541
3542	if (persistent)
3543		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3544	else
3545		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3546
3547unlock:
3548	hci_dev_unlock(hdev);
3549}
3550
3551static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3552{
3553	struct hci_ev_clock_offset *ev = (void *) skb->data;
3554	struct hci_conn *conn;
3555
3556	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3557
3558	hci_dev_lock(hdev);
3559
3560	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3561	if (conn && !ev->status) {
3562		struct inquiry_entry *ie;
3563
3564		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3565		if (ie) {
3566			ie->data.clock_offset = ev->clock_offset;
3567			ie->timestamp = jiffies;
3568		}
3569	}
3570
3571	hci_dev_unlock(hdev);
3572}
3573
3574static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3575{
3576	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3577	struct hci_conn *conn;
3578
3579	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3580
3581	hci_dev_lock(hdev);
3582
3583	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3584	if (conn && !ev->status)
3585		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3586
3587	hci_dev_unlock(hdev);
3588}
3589
3590static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3591{
3592	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3593	struct inquiry_entry *ie;
3594
3595	BT_DBG("%s", hdev->name);
3596
3597	hci_dev_lock(hdev);
3598
3599	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3600	if (ie) {
3601		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3602		ie->timestamp = jiffies;
3603	}
3604
3605	hci_dev_unlock(hdev);
3606}
3607
3608static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3609					     struct sk_buff *skb)
3610{
 
3611	struct inquiry_data data;
3612	int num_rsp = *((__u8 *) skb->data);
3613
3614	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3615
3616	if (!num_rsp)
3617		return;
3618
3619	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3620		return;
3621
3622	hci_dev_lock(hdev);
3623
3624	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3625		struct inquiry_info_with_rssi_and_pscan_mode *info;
3626		info = (void *) (skb->data + 1);
3627
3628		for (; num_rsp; num_rsp--, info++) {
3629			u32 flags;
3630
 
 
 
 
 
 
 
 
 
3631			bacpy(&data.bdaddr, &info->bdaddr);
3632			data.pscan_rep_mode	= info->pscan_rep_mode;
3633			data.pscan_period_mode	= info->pscan_period_mode;
3634			data.pscan_mode		= info->pscan_mode;
3635			memcpy(data.dev_class, info->dev_class, 3);
3636			data.clock_offset	= info->clock_offset;
3637			data.rssi		= info->rssi;
3638			data.ssp_mode		= 0x00;
3639
3640			flags = hci_inquiry_cache_update(hdev, &data, false);
3641
3642			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3643					  info->dev_class, info->rssi,
3644					  flags, NULL, 0, NULL, 0);
3645		}
3646	} else {
3647		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
 
3648
3649		for (; num_rsp; num_rsp--, info++) {
3650			u32 flags;
3651
 
 
 
 
 
 
 
 
 
3652			bacpy(&data.bdaddr, &info->bdaddr);
3653			data.pscan_rep_mode	= info->pscan_rep_mode;
3654			data.pscan_period_mode	= info->pscan_period_mode;
3655			data.pscan_mode		= 0x00;
3656			memcpy(data.dev_class, info->dev_class, 3);
3657			data.clock_offset	= info->clock_offset;
3658			data.rssi		= info->rssi;
3659			data.ssp_mode		= 0x00;
3660
3661			flags = hci_inquiry_cache_update(hdev, &data, false);
3662
3663			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3664					  info->dev_class, info->rssi,
3665					  flags, NULL, 0, NULL, 0);
3666		}
 
 
 
3667	}
3668
3669	hci_dev_unlock(hdev);
3670}
3671
3672static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3673					struct sk_buff *skb)
3674{
3675	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3676	struct hci_conn *conn;
3677
3678	BT_DBG("%s", hdev->name);
3679
3680	hci_dev_lock(hdev);
3681
3682	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3683	if (!conn)
3684		goto unlock;
3685
3686	if (ev->page < HCI_MAX_PAGES)
3687		memcpy(conn->features[ev->page], ev->features, 8);
3688
3689	if (!ev->status && ev->page == 0x01) {
3690		struct inquiry_entry *ie;
3691
3692		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3693		if (ie)
3694			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3695
3696		if (ev->features[0] & LMP_HOST_SSP) {
3697			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3698		} else {
3699			/* It is mandatory by the Bluetooth specification that
3700			 * Extended Inquiry Results are only used when Secure
3701			 * Simple Pairing is enabled, but some devices violate
3702			 * this.
3703			 *
3704			 * To make these devices work, the internal SSP
3705			 * enabled flag needs to be cleared if the remote host
3706			 * features do not indicate SSP support */
3707			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3708		}
3709
3710		if (ev->features[0] & LMP_HOST_SC)
3711			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3712	}
3713
3714	if (conn->state != BT_CONFIG)
3715		goto unlock;
3716
3717	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3718		struct hci_cp_remote_name_req cp;
3719		memset(&cp, 0, sizeof(cp));
3720		bacpy(&cp.bdaddr, &conn->dst);
3721		cp.pscan_rep_mode = 0x02;
3722		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3723	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3724		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3725
3726	if (!hci_outgoing_auth_needed(hdev, conn)) {
3727		conn->state = BT_CONNECTED;
3728		hci_connect_cfm(conn, ev->status);
3729		hci_conn_drop(conn);
3730	}
3731
3732unlock:
3733	hci_dev_unlock(hdev);
3734}
3735
3736static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3737				       struct sk_buff *skb)
3738{
3739	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3740	struct hci_conn *conn;
 
3741
3742	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
 
 
 
 
 
 
 
 
 
 
 
 
 
3743
3744	hci_dev_lock(hdev);
3745
3746	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3747	if (!conn) {
3748		if (ev->link_type == ESCO_LINK)
3749			goto unlock;
3750
3751		/* When the link type in the event indicates SCO connection
3752		 * and lookup of the connection object fails, then check
3753		 * if an eSCO connection object exists.
3754		 *
3755		 * The core limits the synchronous connections to either
3756		 * SCO or eSCO. The eSCO connection is preferred and tried
3757		 * to be setup first and until successfully established,
3758		 * the link type will be hinted as eSCO.
3759		 */
3760		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3761		if (!conn)
3762			goto unlock;
3763	}
3764
3765	switch (ev->status) {
 
 
 
 
 
 
 
 
 
 
 
3766	case 0x00:
3767		conn->handle = __le16_to_cpu(ev->handle);
 
 
 
 
 
3768		conn->state  = BT_CONNECTED;
3769		conn->type   = ev->link_type;
3770
3771		hci_debugfs_create_conn(conn);
3772		hci_conn_add_sysfs(conn);
3773		break;
3774
3775	case 0x10:	/* Connection Accept Timeout */
3776	case 0x0d:	/* Connection Rejected due to Limited Resources */
3777	case 0x11:	/* Unsupported Feature or Parameter Value */
3778	case 0x1c:	/* SCO interval rejected */
3779	case 0x1a:	/* Unsupported Remote Feature */
 
3780	case 0x1f:	/* Unspecified error */
3781	case 0x20:	/* Unsupported LMP Parameter value */
3782		if (conn->out) {
3783			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3784					(hdev->esco_type & EDR_ESCO_MASK);
3785			if (hci_setup_sync(conn, conn->link->handle))
3786				goto unlock;
3787		}
3788		/* fall through */
3789
3790	default:
3791		conn->state = BT_CLOSED;
3792		break;
3793	}
3794
3795	hci_connect_cfm(conn, ev->status);
3796	if (ev->status)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3797		hci_conn_del(conn);
3798
3799unlock:
3800	hci_dev_unlock(hdev);
3801}
3802
3803static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3804{
3805	size_t parsed = 0;
3806
3807	while (parsed < eir_len) {
3808		u8 field_len = eir[0];
3809
3810		if (field_len == 0)
3811			return parsed;
3812
3813		parsed += field_len + 1;
3814		eir += field_len + 1;
3815	}
3816
3817	return eir_len;
3818}
3819
3820static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3821					    struct sk_buff *skb)
3822{
 
3823	struct inquiry_data data;
3824	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3825	int num_rsp = *((__u8 *) skb->data);
3826	size_t eir_len;
 
 
 
 
 
3827
3828	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3829
3830	if (!num_rsp)
3831		return;
3832
3833	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3834		return;
3835
3836	hci_dev_lock(hdev);
3837
3838	for (; num_rsp; num_rsp--, info++) {
 
3839		u32 flags;
3840		bool name_known;
3841
3842		bacpy(&data.bdaddr, &info->bdaddr);
3843		data.pscan_rep_mode	= info->pscan_rep_mode;
3844		data.pscan_period_mode	= info->pscan_period_mode;
3845		data.pscan_mode		= 0x00;
3846		memcpy(data.dev_class, info->dev_class, 3);
3847		data.clock_offset	= info->clock_offset;
3848		data.rssi		= info->rssi;
3849		data.ssp_mode		= 0x01;
3850
3851		if (hci_dev_test_flag(hdev, HCI_MGMT))
3852			name_known = eir_get_data(info->data,
3853						  sizeof(info->data),
3854						  EIR_NAME_COMPLETE, NULL);
3855		else
3856			name_known = true;
3857
3858		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3859
3860		eir_len = eir_get_length(info->data, sizeof(info->data));
3861
3862		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3863				  info->dev_class, info->rssi,
3864				  flags, info->data, eir_len, NULL, 0);
3865	}
3866
3867	hci_dev_unlock(hdev);
3868}
3869
3870static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3871					 struct sk_buff *skb)
3872{
3873	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3874	struct hci_conn *conn;
3875
3876	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3877	       __le16_to_cpu(ev->handle));
3878
3879	hci_dev_lock(hdev);
3880
3881	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3882	if (!conn)
3883		goto unlock;
3884
3885	/* For BR/EDR the necessary steps are taken through the
3886	 * auth_complete event.
3887	 */
3888	if (conn->type != LE_LINK)
3889		goto unlock;
3890
3891	if (!ev->status)
3892		conn->sec_level = conn->pending_sec_level;
3893
3894	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3895
3896	if (ev->status && conn->state == BT_CONNECTED) {
3897		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3898		hci_conn_drop(conn);
3899		goto unlock;
3900	}
3901
3902	if (conn->state == BT_CONFIG) {
3903		if (!ev->status)
3904			conn->state = BT_CONNECTED;
3905
3906		hci_connect_cfm(conn, ev->status);
3907		hci_conn_drop(conn);
3908	} else {
3909		hci_auth_cfm(conn, ev->status);
3910
3911		hci_conn_hold(conn);
3912		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3913		hci_conn_drop(conn);
3914	}
3915
3916unlock:
3917	hci_dev_unlock(hdev);
3918}
3919
3920static u8 hci_get_auth_req(struct hci_conn *conn)
3921{
3922	/* If remote requests no-bonding follow that lead */
3923	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3924	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3925		return conn->remote_auth | (conn->auth_type & 0x01);
3926
3927	/* If both remote and local have enough IO capabilities, require
3928	 * MITM protection
3929	 */
3930	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3931	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3932		return conn->remote_auth | 0x01;
3933
3934	/* No MITM protection possible so ignore remote requirement */
3935	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3936}
3937
3938static u8 bredr_oob_data_present(struct hci_conn *conn)
3939{
3940	struct hci_dev *hdev = conn->hdev;
3941	struct oob_data *data;
3942
3943	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3944	if (!data)
3945		return 0x00;
3946
3947	if (bredr_sc_enabled(hdev)) {
3948		/* When Secure Connections is enabled, then just
3949		 * return the present value stored with the OOB
3950		 * data. The stored value contains the right present
3951		 * information. However it can only be trusted when
3952		 * not in Secure Connection Only mode.
3953		 */
3954		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3955			return data->present;
3956
3957		/* When Secure Connections Only mode is enabled, then
3958		 * the P-256 values are required. If they are not
3959		 * available, then do not declare that OOB data is
3960		 * present.
3961		 */
3962		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3963		    !memcmp(data->hash256, ZERO_KEY, 16))
3964			return 0x00;
3965
3966		return 0x02;
3967	}
3968
3969	/* When Secure Connections is not enabled or actually
3970	 * not supported by the hardware, then check that if
3971	 * P-192 data values are present.
3972	 */
3973	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3974	    !memcmp(data->hash192, ZERO_KEY, 16))
3975		return 0x00;
3976
3977	return 0x01;
3978}
3979
3980static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
3981{
3982	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3983	struct hci_conn *conn;
3984
3985	BT_DBG("%s", hdev->name);
3986
3987	hci_dev_lock(hdev);
3988
3989	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3990	if (!conn)
3991		goto unlock;
3992
 
 
 
3993	hci_conn_hold(conn);
3994
3995	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3996		goto unlock;
3997
3998	/* Allow pairing if we're pairable, the initiators of the
3999	 * pairing or if the remote is not requesting bonding.
4000	 */
4001	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4002	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4003	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4004		struct hci_cp_io_capability_reply cp;
4005
4006		bacpy(&cp.bdaddr, &ev->bdaddr);
4007		/* Change the IO capability from KeyboardDisplay
4008		 * to DisplayYesNo as it is not supported by BT spec. */
4009		cp.capability = (conn->io_capability == 0x04) ?
4010				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4011
4012		/* If we are initiators, there is no remote information yet */
4013		if (conn->remote_auth == 0xff) {
4014			/* Request MITM protection if our IO caps allow it
4015			 * except for the no-bonding case.
4016			 */
4017			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4018			    conn->auth_type != HCI_AT_NO_BONDING)
4019				conn->auth_type |= 0x01;
4020		} else {
4021			conn->auth_type = hci_get_auth_req(conn);
4022		}
4023
4024		/* If we're not bondable, force one of the non-bondable
4025		 * authentication requirement values.
4026		 */
4027		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4028			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4029
4030		cp.authentication = conn->auth_type;
4031		cp.oob_data = bredr_oob_data_present(conn);
4032
4033		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4034			     sizeof(cp), &cp);
4035	} else {
4036		struct hci_cp_io_capability_neg_reply cp;
4037
4038		bacpy(&cp.bdaddr, &ev->bdaddr);
4039		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4040
4041		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4042			     sizeof(cp), &cp);
4043	}
4044
4045unlock:
4046	hci_dev_unlock(hdev);
4047}
4048
4049static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4050{
4051	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4052	struct hci_conn *conn;
4053
4054	BT_DBG("%s", hdev->name);
4055
4056	hci_dev_lock(hdev);
4057
4058	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4059	if (!conn)
4060		goto unlock;
4061
4062	conn->remote_cap = ev->capability;
4063	conn->remote_auth = ev->authentication;
4064
4065unlock:
4066	hci_dev_unlock(hdev);
4067}
4068
4069static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4070					 struct sk_buff *skb)
4071{
4072	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4073	int loc_mitm, rem_mitm, confirm_hint = 0;
4074	struct hci_conn *conn;
4075
4076	BT_DBG("%s", hdev->name);
4077
4078	hci_dev_lock(hdev);
4079
4080	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4081		goto unlock;
4082
4083	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4084	if (!conn)
4085		goto unlock;
4086
4087	loc_mitm = (conn->auth_type & 0x01);
4088	rem_mitm = (conn->remote_auth & 0x01);
4089
4090	/* If we require MITM but the remote device can't provide that
4091	 * (it has NoInputNoOutput) then reject the confirmation
4092	 * request. We check the security level here since it doesn't
4093	 * necessarily match conn->auth_type.
4094	 */
4095	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4096	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4097		BT_DBG("Rejecting request: remote device can't provide MITM");
4098		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4099			     sizeof(ev->bdaddr), &ev->bdaddr);
4100		goto unlock;
4101	}
4102
4103	/* If no side requires MITM protection; auto-accept */
4104	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4105	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4106
4107		/* If we're not the initiators request authorization to
4108		 * proceed from user space (mgmt_user_confirm with
4109		 * confirm_hint set to 1). The exception is if neither
4110		 * side had MITM or if the local IO capability is
4111		 * NoInputNoOutput, in which case we do auto-accept
4112		 */
4113		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4114		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4115		    (loc_mitm || rem_mitm)) {
4116			BT_DBG("Confirming auto-accept as acceptor");
 
 
 
 
 
 
 
 
 
 
4117			confirm_hint = 1;
4118			goto confirm;
4119		}
4120
4121		BT_DBG("Auto-accept of user confirmation with %ums delay",
4122		       hdev->auto_accept_delay);
4123
4124		if (hdev->auto_accept_delay > 0) {
4125			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4126			queue_delayed_work(conn->hdev->workqueue,
4127					   &conn->auto_accept_work, delay);
4128			goto unlock;
4129		}
4130
4131		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4132			     sizeof(ev->bdaddr), &ev->bdaddr);
4133		goto unlock;
4134	}
4135
4136confirm:
4137	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4138				  le32_to_cpu(ev->passkey), confirm_hint);
4139
4140unlock:
4141	hci_dev_unlock(hdev);
4142}
4143
4144static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4145					 struct sk_buff *skb)
4146{
4147	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4148
4149	BT_DBG("%s", hdev->name);
4150
4151	if (hci_dev_test_flag(hdev, HCI_MGMT))
4152		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4153}
4154
4155static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4156					struct sk_buff *skb)
4157{
4158	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4159	struct hci_conn *conn;
4160
4161	BT_DBG("%s", hdev->name);
4162
4163	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4164	if (!conn)
4165		return;
4166
4167	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4168	conn->passkey_entered = 0;
4169
4170	if (hci_dev_test_flag(hdev, HCI_MGMT))
4171		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4172					 conn->dst_type, conn->passkey_notify,
4173					 conn->passkey_entered);
4174}
4175
4176static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4177{
4178	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4179	struct hci_conn *conn;
4180
4181	BT_DBG("%s", hdev->name);
4182
4183	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4184	if (!conn)
4185		return;
4186
4187	switch (ev->type) {
4188	case HCI_KEYPRESS_STARTED:
4189		conn->passkey_entered = 0;
4190		return;
4191
4192	case HCI_KEYPRESS_ENTERED:
4193		conn->passkey_entered++;
4194		break;
4195
4196	case HCI_KEYPRESS_ERASED:
4197		conn->passkey_entered--;
4198		break;
4199
4200	case HCI_KEYPRESS_CLEARED:
4201		conn->passkey_entered = 0;
4202		break;
4203
4204	case HCI_KEYPRESS_COMPLETED:
4205		return;
4206	}
4207
4208	if (hci_dev_test_flag(hdev, HCI_MGMT))
4209		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4210					 conn->dst_type, conn->passkey_notify,
4211					 conn->passkey_entered);
4212}
4213
4214static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4215					 struct sk_buff *skb)
4216{
4217	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4218	struct hci_conn *conn;
4219
4220	BT_DBG("%s", hdev->name);
4221
4222	hci_dev_lock(hdev);
4223
4224	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4225	if (!conn)
4226		goto unlock;
4227
4228	/* Reset the authentication requirement to unknown */
4229	conn->remote_auth = 0xff;
4230
4231	/* To avoid duplicate auth_failed events to user space we check
4232	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4233	 * initiated the authentication. A traditional auth_complete
4234	 * event gets always produced as initiator and is also mapped to
4235	 * the mgmt_auth_failed event */
4236	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4237		mgmt_auth_failed(conn, ev->status);
4238
4239	hci_conn_drop(conn);
4240
4241unlock:
4242	hci_dev_unlock(hdev);
4243}
4244
4245static void hci_remote_host_features_evt(struct hci_dev *hdev,
4246					 struct sk_buff *skb)
4247{
4248	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4249	struct inquiry_entry *ie;
4250	struct hci_conn *conn;
4251
4252	BT_DBG("%s", hdev->name);
4253
4254	hci_dev_lock(hdev);
4255
4256	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4257	if (conn)
4258		memcpy(conn->features[1], ev->features, 8);
4259
4260	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4261	if (ie)
4262		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4263
4264	hci_dev_unlock(hdev);
4265}
4266
4267static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4268					    struct sk_buff *skb)
4269{
4270	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4271	struct oob_data *data;
4272
4273	BT_DBG("%s", hdev->name);
4274
4275	hci_dev_lock(hdev);
4276
4277	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4278		goto unlock;
4279
4280	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4281	if (!data) {
4282		struct hci_cp_remote_oob_data_neg_reply cp;
4283
4284		bacpy(&cp.bdaddr, &ev->bdaddr);
4285		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4286			     sizeof(cp), &cp);
4287		goto unlock;
4288	}
4289
4290	if (bredr_sc_enabled(hdev)) {
4291		struct hci_cp_remote_oob_ext_data_reply cp;
4292
4293		bacpy(&cp.bdaddr, &ev->bdaddr);
4294		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4295			memset(cp.hash192, 0, sizeof(cp.hash192));
4296			memset(cp.rand192, 0, sizeof(cp.rand192));
4297		} else {
4298			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4299			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4300		}
4301		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4302		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4303
4304		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4305			     sizeof(cp), &cp);
4306	} else {
4307		struct hci_cp_remote_oob_data_reply cp;
4308
4309		bacpy(&cp.bdaddr, &ev->bdaddr);
4310		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4311		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4312
4313		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4314			     sizeof(cp), &cp);
4315	}
4316
4317unlock:
4318	hci_dev_unlock(hdev);
4319}
4320
4321#if IS_ENABLED(CONFIG_BT_HS)
4322static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4323{
4324	struct hci_ev_channel_selected *ev = (void *)skb->data;
4325	struct hci_conn *hcon;
4326
4327	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4328
4329	skb_pull(skb, sizeof(*ev));
4330
4331	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4332	if (!hcon)
4333		return;
4334
4335	amp_read_loc_assoc_final_data(hdev, hcon);
4336}
4337
4338static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4339				      struct sk_buff *skb)
4340{
4341	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4342	struct hci_conn *hcon, *bredr_hcon;
4343
4344	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4345	       ev->status);
4346
4347	hci_dev_lock(hdev);
4348
4349	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4350	if (!hcon) {
4351		hci_dev_unlock(hdev);
4352		return;
4353	}
 
4354
4355	if (ev->status) {
4356		hci_conn_del(hcon);
4357		hci_dev_unlock(hdev);
4358		return;
4359	}
4360
4361	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4362
4363	hcon->state = BT_CONNECTED;
4364	bacpy(&hcon->dst, &bredr_hcon->dst);
4365
4366	hci_conn_hold(hcon);
4367	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4368	hci_conn_drop(hcon);
4369
4370	hci_debugfs_create_conn(hcon);
4371	hci_conn_add_sysfs(hcon);
4372
4373	amp_physical_cfm(bredr_hcon, hcon);
4374
 
4375	hci_dev_unlock(hdev);
4376}
4377
4378static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4379{
4380	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4381	struct hci_conn *hcon;
4382	struct hci_chan *hchan;
4383	struct amp_mgr *mgr;
4384
4385	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4386	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4387	       ev->status);
4388
4389	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4390	if (!hcon)
4391		return;
4392
4393	/* Create AMP hchan */
4394	hchan = hci_chan_create(hcon);
4395	if (!hchan)
4396		return;
4397
4398	hchan->handle = le16_to_cpu(ev->handle);
 
4399
4400	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4401
4402	mgr = hcon->amp_mgr;
4403	if (mgr && mgr->bredr_chan) {
4404		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4405
4406		l2cap_chan_lock(bredr_chan);
4407
4408		bredr_chan->conn->mtu = hdev->block_mtu;
4409		l2cap_logical_cfm(bredr_chan, hchan, 0);
4410		hci_conn_hold(hcon);
4411
4412		l2cap_chan_unlock(bredr_chan);
4413	}
4414}
4415
4416static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4417					     struct sk_buff *skb)
4418{
4419	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4420	struct hci_chan *hchan;
4421
4422	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4423	       le16_to_cpu(ev->handle), ev->status);
4424
4425	if (ev->status)
4426		return;
4427
4428	hci_dev_lock(hdev);
4429
4430	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4431	if (!hchan)
4432		goto unlock;
4433
4434	amp_destroy_logical_link(hchan, ev->reason);
4435
4436unlock:
4437	hci_dev_unlock(hdev);
4438}
4439
4440static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4441					     struct sk_buff *skb)
4442{
4443	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4444	struct hci_conn *hcon;
4445
4446	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4447
4448	if (ev->status)
4449		return;
4450
4451	hci_dev_lock(hdev);
4452
4453	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4454	if (hcon) {
4455		hcon->state = BT_CLOSED;
 
4456		hci_conn_del(hcon);
4457	}
4458
4459	hci_dev_unlock(hdev);
4460}
4461#endif
4462
4463static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4464{
4465	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4466	struct hci_conn_params *params;
4467	struct hci_conn *conn;
4468	struct smp_irk *irk;
4469	u8 addr_type;
4470
4471	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4472
4473	hci_dev_lock(hdev);
4474
4475	/* All controllers implicitly stop advertising in the event of a
4476	 * connection, so ensure that the state bit is cleared.
4477	 */
4478	hci_dev_clear_flag(hdev, HCI_LE_ADV);
4479
4480	conn = hci_lookup_le_connect(hdev);
4481	if (!conn) {
4482		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
 
 
 
 
 
 
4483		if (!conn) {
4484			bt_dev_err(hdev, "no memory for new connection");
4485			goto unlock;
4486		}
4487
4488		conn->dst_type = ev->bdaddr_type;
4489
4490		/* If we didn't have a hci_conn object previously
4491		 * but we're in master role this must be something
4492		 * initiated using a white list. Since white list based
4493		 * connections are not "first class citizens" we don't
4494		 * have full tracking of them. Therefore, we go ahead
4495		 * with a "best effort" approach of determining the
4496		 * initiator address based on the HCI_PRIVACY flag.
4497		 */
4498		if (conn->out) {
4499			conn->resp_addr_type = ev->bdaddr_type;
4500			bacpy(&conn->resp_addr, &ev->bdaddr);
4501			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4502				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4503				bacpy(&conn->init_addr, &hdev->rpa);
4504			} else {
4505				hci_copy_identity_address(hdev,
4506							  &conn->init_addr,
4507							  &conn->init_addr_type);
4508			}
4509		}
4510	} else {
4511		cancel_delayed_work(&conn->le_conn_timeout);
4512	}
4513
4514	if (!conn->out) {
4515		/* Set the responder (our side) address type based on
4516		 * the advertising address type.
4517		 */
4518		conn->resp_addr_type = hdev->adv_addr_type;
4519		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4520			bacpy(&conn->resp_addr, &hdev->random_addr);
4521		else
4522			bacpy(&conn->resp_addr, &hdev->bdaddr);
 
4523
4524		conn->init_addr_type = ev->bdaddr_type;
4525		bacpy(&conn->init_addr, &ev->bdaddr);
4526
4527		/* For incoming connections, set the default minimum
4528		 * and maximum connection interval. They will be used
4529		 * to check if the parameters are in range and if not
4530		 * trigger the connection update procedure.
4531		 */
4532		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4533		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4534	}
4535
4536	/* Lookup the identity address from the stored connection
4537	 * address and address type.
4538	 *
4539	 * When establishing connections to an identity address, the
4540	 * connection procedure will store the resolvable random
4541	 * address first. Now if it can be converted back into the
4542	 * identity address, start using the identity address from
4543	 * now on.
4544	 */
4545	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4546	if (irk) {
4547		bacpy(&conn->dst, &irk->bdaddr);
4548		conn->dst_type = irk->addr_type;
4549	}
4550
4551	if (ev->status) {
4552		hci_le_conn_failed(conn, ev->status);
 
 
 
 
 
 
 
 
 
 
4553		goto unlock;
4554	}
4555
4556	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4557		addr_type = BDADDR_LE_PUBLIC;
4558	else
4559		addr_type = BDADDR_LE_RANDOM;
4560
4561	/* Drop the connection if the device is blocked */
4562	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4563		hci_conn_drop(conn);
4564		goto unlock;
4565	}
4566
4567	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4568		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4569
4570	conn->sec_level = BT_SECURITY_LOW;
4571	conn->handle = __le16_to_cpu(ev->handle);
4572	conn->state = BT_CONFIG;
4573
4574	conn->le_conn_interval = le16_to_cpu(ev->interval);
4575	conn->le_conn_latency = le16_to_cpu(ev->latency);
4576	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
 
 
 
 
 
 
 
4577
4578	hci_debugfs_create_conn(conn);
4579	hci_conn_add_sysfs(conn);
4580
4581	if (!ev->status) {
4582		/* The remote features procedure is defined for master
4583		 * role only. So only in case of an initiated connection
4584		 * request the remote features.
4585		 *
4586		 * If the local controller supports slave-initiated features
4587		 * exchange, then requesting the remote features in slave
4588		 * role is possible. Otherwise just transition into the
4589		 * connected state without requesting the remote features.
4590		 */
4591		if (conn->out ||
4592		    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4593			struct hci_cp_le_read_remote_features cp;
4594
4595			cp.handle = __cpu_to_le16(conn->handle);
4596
4597			hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4598				     sizeof(cp), &cp);
4599
4600			hci_conn_hold(conn);
4601		} else {
4602			conn->state = BT_CONNECTED;
4603			hci_connect_cfm(conn, ev->status);
4604		}
4605	} else {
4606		hci_connect_cfm(conn, ev->status);
 
4607	}
4608
4609	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4610					   conn->dst_type);
4611	if (params) {
4612		list_del_init(&params->action);
4613		if (params->conn) {
4614			hci_conn_drop(params->conn);
4615			hci_conn_put(params->conn);
4616			params->conn = NULL;
4617		}
4618	}
4619
4620unlock:
4621	hci_update_background_scan(hdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4622	hci_dev_unlock(hdev);
4623}
4624
4625static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4626					    struct sk_buff *skb)
4627{
4628	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4629	struct hci_conn *conn;
4630
4631	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4632
4633	if (ev->status)
4634		return;
4635
4636	hci_dev_lock(hdev);
4637
4638	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4639	if (conn) {
4640		conn->le_conn_interval = le16_to_cpu(ev->interval);
4641		conn->le_conn_latency = le16_to_cpu(ev->latency);
4642		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4643	}
4644
4645	hci_dev_unlock(hdev);
4646}
4647
4648/* This function requires the caller holds hdev->lock */
4649static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4650					      bdaddr_t *addr,
4651					      u8 addr_type, u8 adv_type,
4652					      bdaddr_t *direct_rpa)
4653{
4654	struct hci_conn *conn;
4655	struct hci_conn_params *params;
4656
4657	/* If the event is not connectable don't proceed further */
4658	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4659		return NULL;
4660
4661	/* Ignore if the device is blocked */
4662	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
 
4663		return NULL;
4664
4665	/* Most controller will fail if we try to create new connections
4666	 * while we have an existing one in slave role.
4667	 */
4668	if (hdev->conn_hash.le_num_slave > 0)
 
 
4669		return NULL;
4670
4671	/* If we're not connectable only connect devices that we have in
4672	 * our pend_le_conns list.
4673	 */
4674	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4675					   addr_type);
4676	if (!params)
4677		return NULL;
4678
4679	if (!params->explicit_connect) {
4680		switch (params->auto_connect) {
4681		case HCI_AUTO_CONN_DIRECT:
4682			/* Only devices advertising with ADV_DIRECT_IND are
4683			 * triggering a connection attempt. This is allowing
4684			 * incoming connections from slave devices.
4685			 */
4686			if (adv_type != LE_ADV_DIRECT_IND)
4687				return NULL;
4688			break;
4689		case HCI_AUTO_CONN_ALWAYS:
4690			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
4691			 * are triggering a connection attempt. This means
4692			 * that incoming connectioms from slave device are
4693			 * accepted and also outgoing connections to slave
4694			 * devices are established when found.
4695			 */
4696			break;
4697		default:
4698			return NULL;
4699		}
4700	}
4701
4702	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4703			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
4704			      direct_rpa);
4705	if (!IS_ERR(conn)) {
4706		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4707		 * by higher layer that tried to connect, if no then
4708		 * store the pointer since we don't really have any
4709		 * other owner of the object besides the params that
4710		 * triggered it. This way we can abort the connection if
4711		 * the parameters get removed and keep the reference
4712		 * count consistent once the connection is established.
4713		 */
4714
4715		if (!params->explicit_connect)
4716			params->conn = hci_conn_get(conn);
4717
4718		return conn;
4719	}
4720
4721	switch (PTR_ERR(conn)) {
4722	case -EBUSY:
4723		/* If hci_connect() returns -EBUSY it means there is already
4724		 * an LE connection attempt going on. Since controllers don't
4725		 * support more than one connection attempt at the time, we
4726		 * don't consider this an error case.
4727		 */
4728		break;
4729	default:
4730		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4731		return NULL;
4732	}
4733
4734	return NULL;
4735}
4736
4737static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4738			       u8 bdaddr_type, bdaddr_t *direct_addr,
4739			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
 
4740{
4741	struct discovery_state *d = &hdev->discovery;
4742	struct smp_irk *irk;
4743	struct hci_conn *conn;
4744	bool match;
4745	u32 flags;
4746	u8 *ptr, real_len;
4747
4748	switch (type) {
4749	case LE_ADV_IND:
4750	case LE_ADV_DIRECT_IND:
4751	case LE_ADV_SCAN_IND:
4752	case LE_ADV_NONCONN_IND:
4753	case LE_ADV_SCAN_RSP:
4754		break;
4755	default:
4756		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
4757				       "type: 0x%02x", type);
4758		return;
4759	}
4760
 
 
 
 
 
 
4761	/* Find the end of the data in case the report contains padded zero
4762	 * bytes at the end causing an invalid length value.
4763	 *
4764	 * When data is NULL, len is 0 so there is no need for extra ptr
4765	 * check as 'ptr < data + 0' is already false in such case.
4766	 */
4767	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
4768		if (ptr + 1 + *ptr > data + len)
4769			break;
4770	}
4771
4772	real_len = ptr - data;
4773
4774	/* Adjust for actual length */
4775	if (len != real_len) {
4776		bt_dev_err_ratelimited(hdev, "advertising data len corrected");
4777		len = real_len;
4778	}
4779
4780	/* If the direct address is present, then this report is from
4781	 * a LE Direct Advertising Report event. In that case it is
4782	 * important to see if the address is matching the local
4783	 * controller address.
4784	 */
4785	if (direct_addr) {
 
 
 
4786		/* Only resolvable random addresses are valid for these
4787		 * kind of reports and others can be ignored.
4788		 */
4789		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4790			return;
4791
4792		/* If the controller is not using resolvable random
4793		 * addresses, then this report can be ignored.
4794		 */
4795		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4796			return;
4797
4798		/* If the local IRK of the controller does not match
4799		 * with the resolvable random address provided, then
4800		 * this report can be ignored.
4801		 */
4802		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4803			return;
4804	}
4805
4806	/* Check if we need to convert to identity address */
4807	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4808	if (irk) {
4809		bdaddr = &irk->bdaddr;
4810		bdaddr_type = irk->addr_type;
4811	}
4812
 
 
4813	/* Check if we have been requested to connect to this device.
4814	 *
4815	 * direct_addr is set only for directed advertising reports (it is NULL
4816	 * for advertising reports) and is already verified to be RPA above.
4817	 */
4818	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
4819								direct_addr);
4820	if (conn && type == LE_ADV_IND) {
 
4821		/* Store report for later inclusion by
4822		 * mgmt_device_connected
4823		 */
4824		memcpy(conn->le_adv_data, data, len);
4825		conn->le_adv_data_len = len;
4826	}
4827
 
 
 
 
 
 
 
 
 
 
 
 
4828	/* Passive scanning shouldn't trigger any device found events,
4829	 * except for devices marked as CONN_REPORT for which we do send
4830	 * device found events.
4831	 */
4832	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4833		if (type == LE_ADV_DIRECT_IND)
4834			return;
4835
4836		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4837					       bdaddr, bdaddr_type))
 
4838			return;
4839
4840		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4841			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4842		else
4843			flags = 0;
4844		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4845				  rssi, flags, data, len, NULL, 0);
4846		return;
4847	}
4848
4849	/* When receiving non-connectable or scannable undirected
4850	 * advertising reports, this means that the remote device is
4851	 * not connectable and then clearly indicate this in the
4852	 * device found event.
4853	 *
4854	 * When receiving a scan response, then there is no way to
4855	 * know if the remote device is connectable or not. However
4856	 * since scan responses are merged with a previously seen
4857	 * advertising report, the flags field from that report
4858	 * will be used.
4859	 *
4860	 * In the really unlikely case that a controller get confused
4861	 * and just sends a scan response event, then it is marked as
4862	 * not connectable as well.
4863	 */
4864	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4865	    type == LE_ADV_SCAN_RSP)
4866		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4867	else
4868		flags = 0;
4869
4870	/* If there's nothing pending either store the data from this
4871	 * event or send an immediate device found event if the data
4872	 * should not be stored for later.
4873	 */
4874	if (!has_pending_adv_report(hdev)) {
4875		/* If the report will trigger a SCAN_REQ store it for
4876		 * later merging.
4877		 */
4878		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4879			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4880						 rssi, flags, data, len);
4881			return;
4882		}
4883
4884		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4885				  rssi, flags, data, len, NULL, 0);
4886		return;
4887	}
4888
4889	/* Check if the pending report is for the same device as the new one */
4890	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4891		 bdaddr_type == d->last_adv_addr_type);
4892
4893	/* If the pending data doesn't match this report or this isn't a
4894	 * scan response (e.g. we got a duplicate ADV_IND) then force
4895	 * sending of the pending data.
4896	 */
4897	if (type != LE_ADV_SCAN_RSP || !match) {
4898		/* Send out whatever is in the cache, but skip duplicates */
4899		if (!match)
4900			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4901					  d->last_adv_addr_type, NULL,
4902					  d->last_adv_rssi, d->last_adv_flags,
4903					  d->last_adv_data,
4904					  d->last_adv_data_len, NULL, 0);
4905
4906		/* If the new report will trigger a SCAN_REQ store it for
4907		 * later merging.
4908		 */
4909		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
 
4910			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4911						 rssi, flags, data, len);
4912			return;
4913		}
4914
4915		/* The advertising reports cannot be merged, so clear
4916		 * the pending report and send out a device found event.
4917		 */
4918		clear_pending_adv_report(hdev);
4919		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4920				  rssi, flags, data, len, NULL, 0);
4921		return;
4922	}
4923
4924	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4925	 * the new event is a SCAN_RSP. We can therefore proceed with
4926	 * sending a merged device found event.
4927	 */
4928	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4929			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4930			  d->last_adv_data, d->last_adv_data_len, data, len);
4931	clear_pending_adv_report(hdev);
4932}
4933
4934static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4935{
4936	u8 num_reports = skb->data[0];
4937	void *ptr = &skb->data[1];
 
 
 
4938
4939	hci_dev_lock(hdev);
4940
4941	while (num_reports--) {
4942		struct hci_ev_le_advertising_info *ev = ptr;
4943		s8 rssi;
4944
4945		rssi = ev->data[ev->length];
4946		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4947				   ev->bdaddr_type, NULL, 0, rssi,
4948				   ev->data, ev->length);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4949
4950		ptr += sizeof(*ev) + ev->length + 1;
 
4951	}
4952
 
4953	hci_dev_unlock(hdev);
4954}
4955
4956static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4957					    struct sk_buff *skb)
4958{
4959	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4960	struct hci_conn *conn;
4961
4962	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4963
4964	hci_dev_lock(hdev);
4965
4966	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4967	if (conn) {
4968		if (!ev->status)
4969			memcpy(conn->features[0], ev->features, 8);
4970
4971		if (conn->state == BT_CONFIG) {
4972			__u8 status;
4973
4974			/* If the local controller supports slave-initiated
4975			 * features exchange, but the remote controller does
4976			 * not, then it is possible that the error code 0x1a
4977			 * for unsupported remote feature gets returned.
4978			 *
4979			 * In this specific case, allow the connection to
4980			 * transition into connected state and mark it as
4981			 * successful.
4982			 */
4983			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4984			    !conn->out && ev->status == 0x1a)
4985				status = 0x00;
4986			else
4987				status = ev->status;
4988
4989			conn->state = BT_CONNECTED;
4990			hci_connect_cfm(conn, status);
4991			hci_conn_drop(conn);
4992		}
4993	}
4994
4995	hci_dev_unlock(hdev);
4996}
4997
4998static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
4999{
5000	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5001	struct hci_cp_le_ltk_reply cp;
5002	struct hci_cp_le_ltk_neg_reply neg;
5003	struct hci_conn *conn;
5004	struct smp_ltk *ltk;
5005
5006	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5007
5008	hci_dev_lock(hdev);
5009
5010	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5011	if (conn == NULL)
5012		goto not_found;
5013
5014	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5015	if (!ltk)
5016		goto not_found;
5017
5018	if (smp_ltk_is_sc(ltk)) {
5019		/* With SC both EDiv and Rand are set to zero */
5020		if (ev->ediv || ev->rand)
5021			goto not_found;
5022	} else {
5023		/* For non-SC keys check that EDiv and Rand match */
5024		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5025			goto not_found;
5026	}
5027
5028	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5029	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5030	cp.handle = cpu_to_le16(conn->handle);
5031
5032	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5033
5034	conn->enc_key_size = ltk->enc_size;
5035
5036	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5037
5038	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5039	 * temporary key used to encrypt a connection following
5040	 * pairing. It is used during the Encrypted Session Setup to
5041	 * distribute the keys. Later, security can be re-established
5042	 * using a distributed LTK.
5043	 */
5044	if (ltk->type == SMP_STK) {
5045		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5046		list_del_rcu(&ltk->list);
5047		kfree_rcu(ltk, rcu);
5048	} else {
5049		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5050	}
5051
5052	hci_dev_unlock(hdev);
5053
5054	return;
5055
5056not_found:
5057	neg.handle = ev->handle;
5058	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5059	hci_dev_unlock(hdev);
5060}
5061
5062static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5063				      u8 reason)
5064{
5065	struct hci_cp_le_conn_param_req_neg_reply cp;
5066
5067	cp.handle = cpu_to_le16(handle);
5068	cp.reason = reason;
5069
5070	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5071		     &cp);
5072}
5073
5074static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5075					     struct sk_buff *skb)
5076{
5077	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5078	struct hci_cp_le_conn_param_req_reply cp;
5079	struct hci_conn *hcon;
5080	u16 handle, min, max, latency, timeout;
5081
 
 
5082	handle = le16_to_cpu(ev->handle);
5083	min = le16_to_cpu(ev->interval_min);
5084	max = le16_to_cpu(ev->interval_max);
5085	latency = le16_to_cpu(ev->latency);
5086	timeout = le16_to_cpu(ev->timeout);
5087
5088	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5089	if (!hcon || hcon->state != BT_CONNECTED)
5090		return send_conn_param_neg_reply(hdev, handle,
5091						 HCI_ERROR_UNKNOWN_CONN_ID);
5092
 
 
 
 
5093	if (hci_check_conn_params(min, max, latency, timeout))
5094		return send_conn_param_neg_reply(hdev, handle,
5095						 HCI_ERROR_INVALID_LL_PARAMS);
5096
5097	if (hcon->role == HCI_ROLE_MASTER) {
5098		struct hci_conn_params *params;
5099		u8 store_hint;
5100
5101		hci_dev_lock(hdev);
5102
5103		params = hci_conn_params_lookup(hdev, &hcon->dst,
5104						hcon->dst_type);
5105		if (params) {
5106			params->conn_min_interval = min;
5107			params->conn_max_interval = max;
5108			params->conn_latency = latency;
5109			params->supervision_timeout = timeout;
5110			store_hint = 0x01;
5111		} else{
5112			store_hint = 0x00;
5113		}
5114
5115		hci_dev_unlock(hdev);
5116
5117		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5118				    store_hint, min, max, latency, timeout);
5119	}
5120
5121	cp.handle = ev->handle;
5122	cp.interval_min = ev->interval_min;
5123	cp.interval_max = ev->interval_max;
5124	cp.latency = ev->latency;
5125	cp.timeout = ev->timeout;
5126	cp.min_ce_len = 0;
5127	cp.max_ce_len = 0;
5128
5129	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5130}
5131
5132static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5133					 struct sk_buff *skb)
5134{
5135	u8 num_reports = skb->data[0];
5136	void *ptr = &skb->data[1];
 
 
 
 
 
 
 
 
5137
5138	hci_dev_lock(hdev);
5139
5140	while (num_reports--) {
5141		struct hci_ev_le_direct_adv_info *ev = ptr;
5142
5143		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5144				   ev->bdaddr_type, &ev->direct_addr,
5145				   ev->direct_addr_type, ev->rssi, NULL, 0);
5146
5147		ptr += sizeof(*ev);
 
 
 
5148	}
5149
5150	hci_dev_unlock(hdev);
5151}
5152
5153static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
5154{
5155	struct hci_ev_le_meta *le_ev = (void *) skb->data;
 
 
 
5156
5157	skb_pull(skb, sizeof(*le_ev));
 
 
 
5158
5159	switch (le_ev->subevent) {
5160	case HCI_EV_LE_CONN_COMPLETE:
5161		hci_le_conn_complete_evt(hdev, skb);
5162		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5163
5164	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5165		hci_le_conn_update_complete_evt(hdev, skb);
5166		break;
5167
5168	case HCI_EV_LE_ADVERTISING_REPORT:
5169		hci_le_adv_report_evt(hdev, skb);
5170		break;
 
 
 
 
5171
5172	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5173		hci_le_remote_feat_complete_evt(hdev, skb);
5174		break;
 
 
 
5175
5176	case HCI_EV_LE_LTK_REQ:
5177		hci_le_ltk_request_evt(hdev, skb);
5178		break;
5179
5180	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5181		hci_le_remote_conn_param_req_evt(hdev, skb);
5182		break;
5183
5184	case HCI_EV_LE_DIRECT_ADV_REPORT:
5185		hci_le_direct_adv_report_evt(hdev, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5186		break;
 
5187
5188	default:
5189		break;
 
 
 
 
5190	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5191}
5192
5193static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5194				 u8 event, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
5195{
5196	struct hci_ev_cmd_complete *ev;
5197	struct hci_event_hdr *hdr;
 
 
 
 
 
 
5198
5199	if (!skb)
5200		return false;
5201
5202	if (skb->len < sizeof(*hdr)) {
5203		bt_dev_err(hdev, "too short HCI event");
5204		return false;
5205	}
5206
5207	hdr = (void *) skb->data;
5208	skb_pull(skb, HCI_EVENT_HDR_SIZE);
 
5209
5210	if (event) {
5211		if (hdr->evt != event)
5212			return false;
5213		return true;
5214	}
5215
5216	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5217		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5218			   hdr->evt);
5219		return false;
 
 
 
 
5220	}
5221
5222	if (skb->len < sizeof(*ev)) {
5223		bt_dev_err(hdev, "too short cmd_complete event");
5224		return false;
 
 
 
 
 
5225	}
5226
5227	ev = (void *) skb->data;
5228	skb_pull(skb, sizeof(*ev));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5229
5230	if (opcode != __le16_to_cpu(ev->opcode)) {
5231		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5232		       __le16_to_cpu(ev->opcode));
5233		return false;
5234	}
5235
5236	return true;
 
 
 
 
 
 
 
 
 
 
 
5237}
5238
5239void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
 
5240{
5241	struct hci_event_hdr *hdr = (void *) skb->data;
5242	hci_req_complete_t req_complete = NULL;
5243	hci_req_complete_skb_t req_complete_skb = NULL;
5244	struct sk_buff *orig_skb = NULL;
5245	u8 status = 0, event = hdr->evt, req_evt = 0;
5246	u16 opcode = HCI_OP_NOP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5247
5248	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5249		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5250		opcode = __le16_to_cpu(cmd_hdr->opcode);
5251		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5252				     &req_complete_skb);
5253		req_evt = event;
5254	}
5255
5256	/* If it looks like we might end up having to call
5257	 * req_complete_skb, store a pristine copy of the skb since the
5258	 * various handlers may modify the original one through
5259	 * skb_pull() calls, etc.
5260	 */
5261	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5262	    event == HCI_EV_CMD_COMPLETE)
5263		orig_skb = skb_clone(skb, GFP_KERNEL);
 
 
 
 
 
 
5264
5265	skb_pull(skb, HCI_EVENT_HDR_SIZE);
 
5266
5267	switch (event) {
5268	case HCI_EV_INQUIRY_COMPLETE:
5269		hci_inquiry_complete_evt(hdev, skb);
5270		break;
 
 
 
5271
5272	case HCI_EV_INQUIRY_RESULT:
5273		hci_inquiry_result_evt(hdev, skb);
5274		break;
5275
5276	case HCI_EV_CONN_COMPLETE:
5277		hci_conn_complete_evt(hdev, skb);
5278		break;
5279
5280	case HCI_EV_CONN_REQUEST:
5281		hci_conn_request_evt(hdev, skb);
5282		break;
 
 
5283
5284	case HCI_EV_DISCONN_COMPLETE:
5285		hci_disconn_complete_evt(hdev, skb);
5286		break;
5287
5288	case HCI_EV_AUTH_COMPLETE:
5289		hci_auth_complete_evt(hdev, skb);
5290		break;
5291
5292	case HCI_EV_REMOTE_NAME:
5293		hci_remote_name_evt(hdev, skb);
5294		break;
5295
5296	case HCI_EV_ENCRYPT_CHANGE:
5297		hci_encrypt_change_evt(hdev, skb);
5298		break;
5299
5300	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5301		hci_change_link_key_complete_evt(hdev, skb);
5302		break;
5303
5304	case HCI_EV_REMOTE_FEATURES:
5305		hci_remote_features_evt(hdev, skb);
5306		break;
5307
5308	case HCI_EV_CMD_COMPLETE:
5309		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5310				     &req_complete, &req_complete_skb);
5311		break;
5312
5313	case HCI_EV_CMD_STATUS:
5314		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5315				   &req_complete_skb);
5316		break;
5317
5318	case HCI_EV_HARDWARE_ERROR:
5319		hci_hardware_error_evt(hdev, skb);
5320		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5321
5322	case HCI_EV_ROLE_CHANGE:
5323		hci_role_change_evt(hdev, skb);
5324		break;
5325
5326	case HCI_EV_NUM_COMP_PKTS:
5327		hci_num_comp_pkts_evt(hdev, skb);
5328		break;
 
 
5329
5330	case HCI_EV_MODE_CHANGE:
5331		hci_mode_change_evt(hdev, skb);
5332		break;
 
 
 
 
 
 
 
5333
5334	case HCI_EV_PIN_CODE_REQ:
5335		hci_pin_code_request_evt(hdev, skb);
5336		break;
5337
5338	case HCI_EV_LINK_KEY_REQ:
5339		hci_link_key_request_evt(hdev, skb);
5340		break;
 
 
5341
5342	case HCI_EV_LINK_KEY_NOTIFY:
5343		hci_link_key_notify_evt(hdev, skb);
5344		break;
5345
5346	case HCI_EV_CLOCK_OFFSET:
5347		hci_clock_offset_evt(hdev, skb);
5348		break;
5349
5350	case HCI_EV_PKT_TYPE_CHANGE:
5351		hci_pkt_type_change_evt(hdev, skb);
5352		break;
 
 
5353
5354	case HCI_EV_PSCAN_REP_MODE:
5355		hci_pscan_rep_mode_evt(hdev, skb);
5356		break;
 
 
5357
5358	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5359		hci_inquiry_result_with_rssi_evt(hdev, skb);
5360		break;
 
 
5361
5362	case HCI_EV_REMOTE_EXT_FEATURES:
5363		hci_remote_ext_features_evt(hdev, skb);
5364		break;
5365
5366	case HCI_EV_SYNC_CONN_COMPLETE:
5367		hci_sync_conn_complete_evt(hdev, skb);
5368		break;
 
 
5369
5370	case HCI_EV_EXTENDED_INQUIRY_RESULT:
5371		hci_extended_inquiry_result_evt(hdev, skb);
5372		break;
5373
5374	case HCI_EV_KEY_REFRESH_COMPLETE:
5375		hci_key_refresh_complete_evt(hdev, skb);
5376		break;
 
 
 
 
 
5377
5378	case HCI_EV_IO_CAPA_REQUEST:
5379		hci_io_capa_request_evt(hdev, skb);
5380		break;
5381
5382	case HCI_EV_IO_CAPA_REPLY:
5383		hci_io_capa_reply_evt(hdev, skb);
5384		break;
 
 
5385
5386	case HCI_EV_USER_CONFIRM_REQUEST:
5387		hci_user_confirm_request_evt(hdev, skb);
5388		break;
 
5389
5390	case HCI_EV_USER_PASSKEY_REQUEST:
5391		hci_user_passkey_request_evt(hdev, skb);
5392		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5393
5394	case HCI_EV_USER_PASSKEY_NOTIFY:
5395		hci_user_passkey_notify_evt(hdev, skb);
5396		break;
5397
5398	case HCI_EV_KEYPRESS_NOTIFY:
5399		hci_keypress_notify_evt(hdev, skb);
5400		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5401
5402	case HCI_EV_SIMPLE_PAIR_COMPLETE:
5403		hci_simple_pair_complete_evt(hdev, skb);
5404		break;
 
 
 
 
5405
5406	case HCI_EV_REMOTE_HOST_FEATURES:
5407		hci_remote_host_features_evt(hdev, skb);
5408		break;
5409
5410	case HCI_EV_LE_META:
5411		hci_le_meta_evt(hdev, skb);
5412		break;
 
 
5413
5414	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5415		hci_remote_oob_data_request_evt(hdev, skb);
5416		break;
 
 
 
 
 
5417
5418#if IS_ENABLED(CONFIG_BT_HS)
5419	case HCI_EV_CHANNEL_SELECTED:
5420		hci_chan_selected_evt(hdev, skb);
5421		break;
5422
5423	case HCI_EV_PHY_LINK_COMPLETE:
5424		hci_phy_link_complete_evt(hdev, skb);
5425		break;
 
 
 
5426
5427	case HCI_EV_LOGICAL_LINK_COMPLETE:
5428		hci_loglink_complete_evt(hdev, skb);
5429		break;
 
 
 
 
 
5430
5431	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5432		hci_disconn_loglink_complete_evt(hdev, skb);
5433		break;
 
5434
5435	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5436		hci_disconn_phylink_complete_evt(hdev, skb);
5437		break;
5438#endif
5439
5440	case HCI_EV_NUM_COMP_BLOCKS:
5441		hci_num_comp_blocks_evt(hdev, skb);
5442		break;
 
 
 
5443
5444	default:
5445		BT_DBG("%s event 0x%2.2x", hdev->name, event);
5446		break;
 
 
 
 
5447	}
5448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5449	if (req_complete) {
5450		req_complete(hdev, status, opcode);
5451	} else if (req_complete_skb) {
5452		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5453			kfree_skb(orig_skb);
5454			orig_skb = NULL;
5455		}
5456		req_complete_skb(hdev, status, opcode, orig_skb);
5457	}
5458
 
5459	kfree_skb(orig_skb);
5460	kfree_skb(skb);
5461	hdev->stat.evt_rx++;
5462}