Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u32             cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
  60};
  61
  62void hci_sock_set_flag(struct sock *sk, int nr)
  63{
  64	set_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67void hci_sock_clear_flag(struct sock *sk, int nr)
  68{
  69	clear_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72int hci_sock_test_flag(struct sock *sk, int nr)
  73{
  74	return test_bit(nr, &hci_pi(sk)->flags);
  75}
  76
  77unsigned short hci_sock_get_channel(struct sock *sk)
  78{
  79	return hci_pi(sk)->channel;
  80}
  81
  82u32 hci_sock_get_cookie(struct sock *sk)
  83{
  84	return hci_pi(sk)->cookie;
  85}
  86
  87static bool hci_sock_gen_cookie(struct sock *sk)
  88{
  89	int id = hci_pi(sk)->cookie;
  90
  91	if (!id) {
  92		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
  93		if (id < 0)
  94			id = 0xffffffff;
  95
  96		hci_pi(sk)->cookie = id;
  97		get_task_comm(hci_pi(sk)->comm, current);
  98		return true;
  99	}
 100
 101	return false;
 102}
 103
 104static void hci_sock_free_cookie(struct sock *sk)
 105{
 106	int id = hci_pi(sk)->cookie;
 107
 108	if (id) {
 109		hci_pi(sk)->cookie = 0xffffffff;
 110		ida_simple_remove(&sock_cookie_ida, id);
 111	}
 112}
 113
 114static inline int hci_test_bit(int nr, const void *addr)
 115{
 116	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 117}
 118
 119/* Security filter */
 120#define HCI_SFLT_MAX_OGF  5
 121
 122struct hci_sec_filter {
 123	__u32 type_mask;
 124	__u32 event_mask[2];
 125	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 126};
 127
 128static const struct hci_sec_filter hci_sec_filter = {
 129	/* Packet types */
 130	0x10,
 131	/* Events */
 132	{ 0x1000d9fe, 0x0000b00c },
 133	/* Commands */
 134	{
 135		{ 0x0 },
 136		/* OGF_LINK_CTL */
 137		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 138		/* OGF_LINK_POLICY */
 139		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 140		/* OGF_HOST_CTL */
 141		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 142		/* OGF_INFO_PARAM */
 143		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 144		/* OGF_STATUS_PARAM */
 145		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 146	}
 147};
 148
 149static struct bt_sock_list hci_sk_list = {
 150	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 151};
 152
 153static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 154{
 155	struct hci_filter *flt;
 156	int flt_type, flt_event;
 157
 158	/* Apply filter */
 159	flt = &hci_pi(sk)->filter;
 160
 161	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 162
 163	if (!test_bit(flt_type, &flt->type_mask))
 164		return true;
 165
 166	/* Extra filter for event packets only */
 167	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 168		return false;
 169
 170	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 171
 172	if (!hci_test_bit(flt_event, &flt->event_mask))
 173		return true;
 174
 175	/* Check filter only when opcode is set */
 176	if (!flt->opcode)
 177		return false;
 178
 179	if (flt_event == HCI_EV_CMD_COMPLETE &&
 180	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 181		return true;
 182
 183	if (flt_event == HCI_EV_CMD_STATUS &&
 184	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 185		return true;
 186
 187	return false;
 188}
 189
 190/* Send frame to RAW socket */
 191void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 192{
 193	struct sock *sk;
 194	struct sk_buff *skb_copy = NULL;
 195
 196	BT_DBG("hdev %p len %d", hdev, skb->len);
 197
 198	read_lock(&hci_sk_list.lock);
 199
 200	sk_for_each(sk, &hci_sk_list.head) {
 201		struct sk_buff *nskb;
 202
 203		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 204			continue;
 205
 206		/* Don't send frame to the socket it came from */
 207		if (skb->sk == sk)
 208			continue;
 209
 210		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 211			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 212			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 213			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 214			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 215				continue;
 216			if (is_filtered_packet(sk, skb))
 217				continue;
 218		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 219			if (!bt_cb(skb)->incoming)
 220				continue;
 221			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 222			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 223			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 224				continue;
 225		} else {
 226			/* Don't send frame to other channel types */
 227			continue;
 228		}
 229
 230		if (!skb_copy) {
 231			/* Create a private copy with headroom */
 232			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 233			if (!skb_copy)
 234				continue;
 235
 236			/* Put type byte before the data */
 237			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 238		}
 239
 240		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 241		if (!nskb)
 242			continue;
 243
 244		if (sock_queue_rcv_skb(sk, nskb))
 245			kfree_skb(nskb);
 246	}
 247
 248	read_unlock(&hci_sk_list.lock);
 249
 250	kfree_skb(skb_copy);
 251}
 252
 253/* Send frame to sockets with specific channel */
 254static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 255				  int flag, struct sock *skip_sk)
 256{
 257	struct sock *sk;
 258
 259	BT_DBG("channel %u len %d", channel, skb->len);
 260
 
 
 261	sk_for_each(sk, &hci_sk_list.head) {
 262		struct sk_buff *nskb;
 263
 264		/* Ignore socket without the flag set */
 265		if (!hci_sock_test_flag(sk, flag))
 266			continue;
 267
 268		/* Skip the original socket */
 269		if (sk == skip_sk)
 270			continue;
 271
 272		if (sk->sk_state != BT_BOUND)
 273			continue;
 274
 275		if (hci_pi(sk)->channel != channel)
 276			continue;
 277
 278		nskb = skb_clone(skb, GFP_ATOMIC);
 279		if (!nskb)
 280			continue;
 281
 282		if (sock_queue_rcv_skb(sk, nskb))
 283			kfree_skb(nskb);
 284	}
 285
 286}
 287
 288void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 289			 int flag, struct sock *skip_sk)
 290{
 291	read_lock(&hci_sk_list.lock);
 292	__hci_send_to_channel(channel, skb, flag, skip_sk);
 293	read_unlock(&hci_sk_list.lock);
 294}
 295
 296/* Send frame to monitor socket */
 297void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 298{
 299	struct sk_buff *skb_copy = NULL;
 300	struct hci_mon_hdr *hdr;
 301	__le16 opcode;
 302
 303	if (!atomic_read(&monitor_promisc))
 304		return;
 305
 306	BT_DBG("hdev %p len %d", hdev, skb->len);
 307
 308	switch (hci_skb_pkt_type(skb)) {
 309	case HCI_COMMAND_PKT:
 310		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 311		break;
 312	case HCI_EVENT_PKT:
 313		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 314		break;
 315	case HCI_ACLDATA_PKT:
 316		if (bt_cb(skb)->incoming)
 317			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 318		else
 319			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 320		break;
 321	case HCI_SCODATA_PKT:
 322		if (bt_cb(skb)->incoming)
 323			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 324		else
 325			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 326		break;
 327	case HCI_DIAG_PKT:
 328		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 329		break;
 330	default:
 331		return;
 332	}
 333
 334	/* Create a private copy with headroom */
 335	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 336	if (!skb_copy)
 337		return;
 338
 339	/* Put header before the data */
 340	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
 341	hdr->opcode = opcode;
 342	hdr->index = cpu_to_le16(hdev->id);
 343	hdr->len = cpu_to_le16(skb->len);
 344
 345	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 346			    HCI_SOCK_TRUSTED, NULL);
 347	kfree_skb(skb_copy);
 348}
 349
 350void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 351				 void *data, u16 data_len, ktime_t tstamp,
 352				 int flag, struct sock *skip_sk)
 353{
 354	struct sock *sk;
 355	__le16 index;
 356
 357	if (hdev)
 358		index = cpu_to_le16(hdev->id);
 359	else
 360		index = cpu_to_le16(MGMT_INDEX_NONE);
 361
 362	read_lock(&hci_sk_list.lock);
 363
 364	sk_for_each(sk, &hci_sk_list.head) {
 365		struct hci_mon_hdr *hdr;
 366		struct sk_buff *skb;
 367
 368		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 369			continue;
 370
 371		/* Ignore socket without the flag set */
 372		if (!hci_sock_test_flag(sk, flag))
 373			continue;
 374
 375		/* Skip the original socket */
 376		if (sk == skip_sk)
 377			continue;
 378
 379		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 380		if (!skb)
 381			continue;
 382
 383		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 384		put_unaligned_le16(event, skb_put(skb, 2));
 385
 386		if (data)
 387			skb_put_data(skb, data, data_len);
 388
 389		skb->tstamp = tstamp;
 390
 391		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 392		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 393		hdr->index = index;
 394		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 395
 396		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 397				      HCI_SOCK_TRUSTED, NULL);
 398		kfree_skb(skb);
 399	}
 400
 401	read_unlock(&hci_sk_list.lock);
 402}
 403
 404static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 405{
 406	struct hci_mon_hdr *hdr;
 407	struct hci_mon_new_index *ni;
 408	struct hci_mon_index_info *ii;
 409	struct sk_buff *skb;
 410	__le16 opcode;
 411
 412	switch (event) {
 413	case HCI_DEV_REG:
 414		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 415		if (!skb)
 416			return NULL;
 417
 418		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 419		ni->type = hdev->dev_type;
 420		ni->bus = hdev->bus;
 421		bacpy(&ni->bdaddr, &hdev->bdaddr);
 422		memcpy(ni->name, hdev->name, 8);
 423
 424		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 425		break;
 426
 427	case HCI_DEV_UNREG:
 428		skb = bt_skb_alloc(0, GFP_ATOMIC);
 429		if (!skb)
 430			return NULL;
 431
 432		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 433		break;
 434
 435	case HCI_DEV_SETUP:
 436		if (hdev->manufacturer == 0xffff)
 437			return NULL;
 438
 439		/* fall through */
 440
 441	case HCI_DEV_UP:
 442		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 443		if (!skb)
 444			return NULL;
 445
 446		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 447		bacpy(&ii->bdaddr, &hdev->bdaddr);
 448		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 449
 450		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 451		break;
 452
 453	case HCI_DEV_OPEN:
 454		skb = bt_skb_alloc(0, GFP_ATOMIC);
 455		if (!skb)
 456			return NULL;
 457
 458		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 459		break;
 460
 461	case HCI_DEV_CLOSE:
 462		skb = bt_skb_alloc(0, GFP_ATOMIC);
 463		if (!skb)
 464			return NULL;
 465
 466		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 467		break;
 468
 469	default:
 470		return NULL;
 471	}
 472
 473	__net_timestamp(skb);
 474
 475	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 476	hdr->opcode = opcode;
 477	hdr->index = cpu_to_le16(hdev->id);
 478	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 479
 480	return skb;
 481}
 482
 483static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 484{
 485	struct hci_mon_hdr *hdr;
 486	struct sk_buff *skb;
 487	u16 format;
 488	u8 ver[3];
 489	u32 flags;
 490
 491	/* No message needed when cookie is not present */
 492	if (!hci_pi(sk)->cookie)
 493		return NULL;
 494
 495	switch (hci_pi(sk)->channel) {
 496	case HCI_CHANNEL_RAW:
 497		format = 0x0000;
 498		ver[0] = BT_SUBSYS_VERSION;
 499		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 500		break;
 501	case HCI_CHANNEL_USER:
 502		format = 0x0001;
 503		ver[0] = BT_SUBSYS_VERSION;
 504		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 505		break;
 506	case HCI_CHANNEL_CONTROL:
 507		format = 0x0002;
 508		mgmt_fill_version_info(ver);
 509		break;
 510	default:
 511		/* No message for unsupported format */
 512		return NULL;
 513	}
 514
 515	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
 516	if (!skb)
 517		return NULL;
 518
 519	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 520
 521	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 522	put_unaligned_le16(format, skb_put(skb, 2));
 523	skb_put_data(skb, ver, sizeof(ver));
 524	put_unaligned_le32(flags, skb_put(skb, 4));
 525	skb_put_u8(skb, TASK_COMM_LEN);
 526	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
 527
 528	__net_timestamp(skb);
 529
 530	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 531	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 532	if (hci_pi(sk)->hdev)
 533		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 534	else
 535		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 536	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 537
 538	return skb;
 539}
 540
 541static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 542{
 543	struct hci_mon_hdr *hdr;
 544	struct sk_buff *skb;
 545
 546	/* No message needed when cookie is not present */
 547	if (!hci_pi(sk)->cookie)
 548		return NULL;
 549
 550	switch (hci_pi(sk)->channel) {
 551	case HCI_CHANNEL_RAW:
 552	case HCI_CHANNEL_USER:
 553	case HCI_CHANNEL_CONTROL:
 554		break;
 555	default:
 556		/* No message for unsupported format */
 557		return NULL;
 558	}
 559
 560	skb = bt_skb_alloc(4, GFP_ATOMIC);
 561	if (!skb)
 562		return NULL;
 563
 564	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 565
 566	__net_timestamp(skb);
 567
 568	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 569	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 570	if (hci_pi(sk)->hdev)
 571		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 572	else
 573		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 574	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 575
 576	return skb;
 577}
 578
 579static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 580						   u16 opcode, u16 len,
 581						   const void *buf)
 582{
 583	struct hci_mon_hdr *hdr;
 584	struct sk_buff *skb;
 585
 586	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 587	if (!skb)
 588		return NULL;
 589
 590	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 591	put_unaligned_le16(opcode, skb_put(skb, 2));
 592
 593	if (buf)
 594		skb_put_data(skb, buf, len);
 595
 596	__net_timestamp(skb);
 597
 598	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 599	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 600	hdr->index = cpu_to_le16(index);
 601	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 602
 603	return skb;
 604}
 605
 606static void __printf(2, 3)
 607send_monitor_note(struct sock *sk, const char *fmt, ...)
 608{
 609	size_t len;
 610	struct hci_mon_hdr *hdr;
 611	struct sk_buff *skb;
 612	va_list args;
 613
 614	va_start(args, fmt);
 615	len = vsnprintf(NULL, 0, fmt, args);
 616	va_end(args);
 617
 618	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 619	if (!skb)
 620		return;
 621
 622	va_start(args, fmt);
 623	vsprintf(skb_put(skb, len), fmt, args);
 624	*(u8 *)skb_put(skb, 1) = 0;
 625	va_end(args);
 626
 627	__net_timestamp(skb);
 628
 629	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 630	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 631	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 632	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 633
 634	if (sock_queue_rcv_skb(sk, skb))
 635		kfree_skb(skb);
 636}
 637
 638static void send_monitor_replay(struct sock *sk)
 639{
 640	struct hci_dev *hdev;
 641
 642	read_lock(&hci_dev_list_lock);
 643
 644	list_for_each_entry(hdev, &hci_dev_list, list) {
 645		struct sk_buff *skb;
 646
 647		skb = create_monitor_event(hdev, HCI_DEV_REG);
 648		if (!skb)
 649			continue;
 650
 651		if (sock_queue_rcv_skb(sk, skb))
 652			kfree_skb(skb);
 653
 654		if (!test_bit(HCI_RUNNING, &hdev->flags))
 655			continue;
 656
 657		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 658		if (!skb)
 659			continue;
 660
 661		if (sock_queue_rcv_skb(sk, skb))
 662			kfree_skb(skb);
 663
 664		if (test_bit(HCI_UP, &hdev->flags))
 665			skb = create_monitor_event(hdev, HCI_DEV_UP);
 666		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 667			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 668		else
 669			skb = NULL;
 670
 671		if (skb) {
 672			if (sock_queue_rcv_skb(sk, skb))
 673				kfree_skb(skb);
 674		}
 675	}
 676
 677	read_unlock(&hci_dev_list_lock);
 678}
 679
 680static void send_monitor_control_replay(struct sock *mon_sk)
 681{
 682	struct sock *sk;
 683
 684	read_lock(&hci_sk_list.lock);
 685
 686	sk_for_each(sk, &hci_sk_list.head) {
 687		struct sk_buff *skb;
 688
 689		skb = create_monitor_ctrl_open(sk);
 690		if (!skb)
 691			continue;
 692
 693		if (sock_queue_rcv_skb(mon_sk, skb))
 694			kfree_skb(skb);
 695	}
 696
 697	read_unlock(&hci_sk_list.lock);
 698}
 699
 700/* Generate internal stack event */
 701static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 702{
 703	struct hci_event_hdr *hdr;
 704	struct hci_ev_stack_internal *ev;
 705	struct sk_buff *skb;
 706
 707	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 708	if (!skb)
 709		return;
 710
 711	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
 712	hdr->evt  = HCI_EV_STACK_INTERNAL;
 713	hdr->plen = sizeof(*ev) + dlen;
 714
 715	ev = skb_put(skb, sizeof(*ev) + dlen);
 716	ev->type = type;
 717	memcpy(ev->data, data, dlen);
 718
 719	bt_cb(skb)->incoming = 1;
 720	__net_timestamp(skb);
 721
 722	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 723	hci_send_to_sock(hdev, skb);
 724	kfree_skb(skb);
 725}
 726
 727void hci_sock_dev_event(struct hci_dev *hdev, int event)
 728{
 729	BT_DBG("hdev %s event %d", hdev->name, event);
 730
 731	if (atomic_read(&monitor_promisc)) {
 732		struct sk_buff *skb;
 733
 734		/* Send event to monitor */
 735		skb = create_monitor_event(hdev, event);
 736		if (skb) {
 737			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 738					    HCI_SOCK_TRUSTED, NULL);
 739			kfree_skb(skb);
 740		}
 741	}
 742
 743	if (event <= HCI_DEV_DOWN) {
 744		struct hci_ev_si_device ev;
 745
 746		/* Send event to sockets */
 747		ev.event  = event;
 748		ev.dev_id = hdev->id;
 749		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 750	}
 751
 752	if (event == HCI_DEV_UNREG) {
 753		struct sock *sk;
 754
 755		/* Detach sockets from device */
 756		read_lock(&hci_sk_list.lock);
 757		sk_for_each(sk, &hci_sk_list.head) {
 758			bh_lock_sock_nested(sk);
 759			if (hci_pi(sk)->hdev == hdev) {
 760				hci_pi(sk)->hdev = NULL;
 761				sk->sk_err = EPIPE;
 762				sk->sk_state = BT_OPEN;
 763				sk->sk_state_change(sk);
 764
 765				hci_dev_put(hdev);
 766			}
 767			bh_unlock_sock(sk);
 768		}
 769		read_unlock(&hci_sk_list.lock);
 770	}
 771}
 772
 773static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 774{
 775	struct hci_mgmt_chan *c;
 776
 777	list_for_each_entry(c, &mgmt_chan_list, list) {
 778		if (c->channel == channel)
 779			return c;
 780	}
 781
 782	return NULL;
 783}
 784
 785static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 786{
 787	struct hci_mgmt_chan *c;
 788
 789	mutex_lock(&mgmt_chan_list_lock);
 790	c = __hci_mgmt_chan_find(channel);
 791	mutex_unlock(&mgmt_chan_list_lock);
 792
 793	return c;
 794}
 795
 796int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 797{
 798	if (c->channel < HCI_CHANNEL_CONTROL)
 799		return -EINVAL;
 800
 801	mutex_lock(&mgmt_chan_list_lock);
 802	if (__hci_mgmt_chan_find(c->channel)) {
 803		mutex_unlock(&mgmt_chan_list_lock);
 804		return -EALREADY;
 805	}
 806
 807	list_add_tail(&c->list, &mgmt_chan_list);
 808
 809	mutex_unlock(&mgmt_chan_list_lock);
 810
 811	return 0;
 812}
 813EXPORT_SYMBOL(hci_mgmt_chan_register);
 814
 815void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 816{
 817	mutex_lock(&mgmt_chan_list_lock);
 818	list_del(&c->list);
 819	mutex_unlock(&mgmt_chan_list_lock);
 820}
 821EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 822
 823static int hci_sock_release(struct socket *sock)
 824{
 825	struct sock *sk = sock->sk;
 826	struct hci_dev *hdev;
 827	struct sk_buff *skb;
 828
 829	BT_DBG("sock %p sk %p", sock, sk);
 830
 831	if (!sk)
 832		return 0;
 833
 
 
 834	switch (hci_pi(sk)->channel) {
 835	case HCI_CHANNEL_MONITOR:
 836		atomic_dec(&monitor_promisc);
 837		break;
 838	case HCI_CHANNEL_RAW:
 839	case HCI_CHANNEL_USER:
 840	case HCI_CHANNEL_CONTROL:
 841		/* Send event to monitor */
 842		skb = create_monitor_ctrl_close(sk);
 843		if (skb) {
 844			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 845					    HCI_SOCK_TRUSTED, NULL);
 846			kfree_skb(skb);
 847		}
 848
 849		hci_sock_free_cookie(sk);
 850		break;
 851	}
 852
 853	bt_sock_unlink(&hci_sk_list, sk);
 854
 855	hdev = hci_pi(sk)->hdev;
 856	if (hdev) {
 857		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 858			/* When releasing a user channel exclusive access,
 859			 * call hci_dev_do_close directly instead of calling
 860			 * hci_dev_close to ensure the exclusive access will
 861			 * be released and the controller brought back down.
 862			 *
 863			 * The checking of HCI_AUTO_OFF is not needed in this
 864			 * case since it will have been cleared already when
 865			 * opening the user channel.
 866			 */
 867			hci_dev_do_close(hdev);
 868			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 869			mgmt_index_added(hdev);
 870		}
 871
 872		atomic_dec(&hdev->promisc);
 873		hci_dev_put(hdev);
 874	}
 875
 876	sock_orphan(sk);
 877
 878	skb_queue_purge(&sk->sk_receive_queue);
 879	skb_queue_purge(&sk->sk_write_queue);
 880
 881	sock_put(sk);
 882	return 0;
 883}
 884
 885static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 886{
 887	bdaddr_t bdaddr;
 888	int err;
 889
 890	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 891		return -EFAULT;
 892
 893	hci_dev_lock(hdev);
 894
 895	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 896
 897	hci_dev_unlock(hdev);
 898
 899	return err;
 900}
 901
 902static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 903{
 904	bdaddr_t bdaddr;
 905	int err;
 906
 907	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 908		return -EFAULT;
 909
 910	hci_dev_lock(hdev);
 911
 912	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 913
 914	hci_dev_unlock(hdev);
 915
 916	return err;
 917}
 918
 919/* Ioctls that require bound socket */
 920static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 921				unsigned long arg)
 922{
 923	struct hci_dev *hdev = hci_pi(sk)->hdev;
 924
 925	if (!hdev)
 926		return -EBADFD;
 927
 928	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 929		return -EBUSY;
 930
 931	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 932		return -EOPNOTSUPP;
 933
 934	if (hdev->dev_type != HCI_PRIMARY)
 935		return -EOPNOTSUPP;
 936
 937	switch (cmd) {
 938	case HCISETRAW:
 939		if (!capable(CAP_NET_ADMIN))
 940			return -EPERM;
 941		return -EOPNOTSUPP;
 942
 943	case HCIGETCONNINFO:
 944		return hci_get_conn_info(hdev, (void __user *)arg);
 945
 946	case HCIGETAUTHINFO:
 947		return hci_get_auth_info(hdev, (void __user *)arg);
 948
 949	case HCIBLOCKADDR:
 950		if (!capable(CAP_NET_ADMIN))
 951			return -EPERM;
 952		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 953
 954	case HCIUNBLOCKADDR:
 955		if (!capable(CAP_NET_ADMIN))
 956			return -EPERM;
 957		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 958	}
 959
 960	return -ENOIOCTLCMD;
 961}
 962
 963static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 964			  unsigned long arg)
 965{
 966	void __user *argp = (void __user *)arg;
 967	struct sock *sk = sock->sk;
 968	int err;
 969
 970	BT_DBG("cmd %x arg %lx", cmd, arg);
 971
 972	lock_sock(sk);
 973
 974	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 975		err = -EBADFD;
 976		goto done;
 977	}
 978
 979	/* When calling an ioctl on an unbound raw socket, then ensure
 980	 * that the monitor gets informed. Ensure that the resulting event
 981	 * is only send once by checking if the cookie exists or not. The
 982	 * socket cookie will be only ever generated once for the lifetime
 983	 * of a given socket.
 984	 */
 985	if (hci_sock_gen_cookie(sk)) {
 986		struct sk_buff *skb;
 987
 988		if (capable(CAP_NET_ADMIN))
 989			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 990
 991		/* Send event to monitor */
 992		skb = create_monitor_ctrl_open(sk);
 993		if (skb) {
 994			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 995					    HCI_SOCK_TRUSTED, NULL);
 996			kfree_skb(skb);
 997		}
 998	}
 999
1000	release_sock(sk);
1001
1002	switch (cmd) {
1003	case HCIGETDEVLIST:
1004		return hci_get_dev_list(argp);
1005
1006	case HCIGETDEVINFO:
1007		return hci_get_dev_info(argp);
1008
1009	case HCIGETCONNLIST:
1010		return hci_get_conn_list(argp);
1011
1012	case HCIDEVUP:
1013		if (!capable(CAP_NET_ADMIN))
1014			return -EPERM;
1015		return hci_dev_open(arg);
1016
1017	case HCIDEVDOWN:
1018		if (!capable(CAP_NET_ADMIN))
1019			return -EPERM;
1020		return hci_dev_close(arg);
1021
1022	case HCIDEVRESET:
1023		if (!capable(CAP_NET_ADMIN))
1024			return -EPERM;
1025		return hci_dev_reset(arg);
1026
1027	case HCIDEVRESTAT:
1028		if (!capable(CAP_NET_ADMIN))
1029			return -EPERM;
1030		return hci_dev_reset_stat(arg);
1031
1032	case HCISETSCAN:
1033	case HCISETAUTH:
1034	case HCISETENCRYPT:
1035	case HCISETPTYPE:
1036	case HCISETLINKPOL:
1037	case HCISETLINKMODE:
1038	case HCISETACLMTU:
1039	case HCISETSCOMTU:
1040		if (!capable(CAP_NET_ADMIN))
1041			return -EPERM;
1042		return hci_dev_cmd(cmd, argp);
1043
1044	case HCIINQUIRY:
1045		return hci_inquiry(argp);
1046	}
1047
1048	lock_sock(sk);
1049
1050	err = hci_sock_bound_ioctl(sk, cmd, arg);
1051
1052done:
1053	release_sock(sk);
1054	return err;
1055}
1056
1057static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1058			 int addr_len)
1059{
1060	struct sockaddr_hci haddr;
1061	struct sock *sk = sock->sk;
1062	struct hci_dev *hdev = NULL;
1063	struct sk_buff *skb;
1064	int len, err = 0;
1065
1066	BT_DBG("sock %p sk %p", sock, sk);
1067
1068	if (!addr)
1069		return -EINVAL;
1070
1071	memset(&haddr, 0, sizeof(haddr));
1072	len = min_t(unsigned int, sizeof(haddr), addr_len);
1073	memcpy(&haddr, addr, len);
1074
1075	if (haddr.hci_family != AF_BLUETOOTH)
1076		return -EINVAL;
1077
1078	lock_sock(sk);
1079
1080	if (sk->sk_state == BT_BOUND) {
1081		err = -EALREADY;
1082		goto done;
1083	}
1084
1085	switch (haddr.hci_channel) {
1086	case HCI_CHANNEL_RAW:
1087		if (hci_pi(sk)->hdev) {
1088			err = -EALREADY;
1089			goto done;
1090		}
1091
1092		if (haddr.hci_dev != HCI_DEV_NONE) {
1093			hdev = hci_dev_get(haddr.hci_dev);
1094			if (!hdev) {
1095				err = -ENODEV;
1096				goto done;
1097			}
1098
1099			atomic_inc(&hdev->promisc);
1100		}
1101
1102		hci_pi(sk)->channel = haddr.hci_channel;
1103
1104		if (!hci_sock_gen_cookie(sk)) {
1105			/* In the case when a cookie has already been assigned,
1106			 * then there has been already an ioctl issued against
1107			 * an unbound socket and with that triggerd an open
1108			 * notification. Send a close notification first to
1109			 * allow the state transition to bounded.
1110			 */
1111			skb = create_monitor_ctrl_close(sk);
1112			if (skb) {
1113				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1114						    HCI_SOCK_TRUSTED, NULL);
1115				kfree_skb(skb);
1116			}
1117		}
1118
1119		if (capable(CAP_NET_ADMIN))
1120			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1121
1122		hci_pi(sk)->hdev = hdev;
1123
1124		/* Send event to monitor */
1125		skb = create_monitor_ctrl_open(sk);
1126		if (skb) {
1127			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1128					    HCI_SOCK_TRUSTED, NULL);
1129			kfree_skb(skb);
1130		}
1131		break;
1132
1133	case HCI_CHANNEL_USER:
1134		if (hci_pi(sk)->hdev) {
1135			err = -EALREADY;
1136			goto done;
1137		}
1138
1139		if (haddr.hci_dev == HCI_DEV_NONE) {
1140			err = -EINVAL;
1141			goto done;
1142		}
1143
1144		if (!capable(CAP_NET_ADMIN)) {
1145			err = -EPERM;
1146			goto done;
1147		}
1148
1149		hdev = hci_dev_get(haddr.hci_dev);
1150		if (!hdev) {
1151			err = -ENODEV;
1152			goto done;
1153		}
1154
1155		if (test_bit(HCI_INIT, &hdev->flags) ||
1156		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1157		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1158		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1159		     test_bit(HCI_UP, &hdev->flags))) {
1160			err = -EBUSY;
1161			hci_dev_put(hdev);
1162			goto done;
1163		}
1164
1165		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1166			err = -EUSERS;
1167			hci_dev_put(hdev);
1168			goto done;
1169		}
1170
1171		mgmt_index_removed(hdev);
1172
1173		err = hci_dev_open(hdev->id);
1174		if (err) {
1175			if (err == -EALREADY) {
1176				/* In case the transport is already up and
1177				 * running, clear the error here.
1178				 *
1179				 * This can happen when opening a user
1180				 * channel and HCI_AUTO_OFF grace period
1181				 * is still active.
1182				 */
1183				err = 0;
1184			} else {
1185				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1186				mgmt_index_added(hdev);
1187				hci_dev_put(hdev);
1188				goto done;
1189			}
1190		}
1191
1192		hci_pi(sk)->channel = haddr.hci_channel;
1193
1194		if (!hci_sock_gen_cookie(sk)) {
1195			/* In the case when a cookie has already been assigned,
1196			 * this socket will transition from a raw socket into
1197			 * a user channel socket. For a clean transition, send
1198			 * the close notification first.
1199			 */
1200			skb = create_monitor_ctrl_close(sk);
1201			if (skb) {
1202				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1203						    HCI_SOCK_TRUSTED, NULL);
1204				kfree_skb(skb);
1205			}
1206		}
1207
1208		/* The user channel is restricted to CAP_NET_ADMIN
1209		 * capabilities and with that implicitly trusted.
1210		 */
1211		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1212
1213		hci_pi(sk)->hdev = hdev;
1214
1215		/* Send event to monitor */
1216		skb = create_monitor_ctrl_open(sk);
1217		if (skb) {
1218			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1219					    HCI_SOCK_TRUSTED, NULL);
1220			kfree_skb(skb);
1221		}
1222
1223		atomic_inc(&hdev->promisc);
1224		break;
1225
1226	case HCI_CHANNEL_MONITOR:
1227		if (haddr.hci_dev != HCI_DEV_NONE) {
1228			err = -EINVAL;
1229			goto done;
1230		}
1231
1232		if (!capable(CAP_NET_RAW)) {
1233			err = -EPERM;
1234			goto done;
1235		}
1236
1237		hci_pi(sk)->channel = haddr.hci_channel;
1238
1239		/* The monitor interface is restricted to CAP_NET_RAW
1240		 * capabilities and with that implicitly trusted.
1241		 */
1242		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1243
1244		send_monitor_note(sk, "Linux version %s (%s)",
1245				  init_utsname()->release,
1246				  init_utsname()->machine);
1247		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1248				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1249		send_monitor_replay(sk);
1250		send_monitor_control_replay(sk);
1251
1252		atomic_inc(&monitor_promisc);
1253		break;
1254
1255	case HCI_CHANNEL_LOGGING:
1256		if (haddr.hci_dev != HCI_DEV_NONE) {
1257			err = -EINVAL;
1258			goto done;
1259		}
1260
1261		if (!capable(CAP_NET_ADMIN)) {
1262			err = -EPERM;
1263			goto done;
1264		}
1265
1266		hci_pi(sk)->channel = haddr.hci_channel;
1267		break;
1268
1269	default:
1270		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1271			err = -EINVAL;
1272			goto done;
1273		}
1274
1275		if (haddr.hci_dev != HCI_DEV_NONE) {
1276			err = -EINVAL;
1277			goto done;
1278		}
1279
1280		/* Users with CAP_NET_ADMIN capabilities are allowed
1281		 * access to all management commands and events. For
1282		 * untrusted users the interface is restricted and
1283		 * also only untrusted events are sent.
1284		 */
1285		if (capable(CAP_NET_ADMIN))
1286			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1287
1288		hci_pi(sk)->channel = haddr.hci_channel;
1289
1290		/* At the moment the index and unconfigured index events
1291		 * are enabled unconditionally. Setting them on each
1292		 * socket when binding keeps this functionality. They
1293		 * however might be cleared later and then sending of these
1294		 * events will be disabled, but that is then intentional.
1295		 *
1296		 * This also enables generic events that are safe to be
1297		 * received by untrusted users. Example for such events
1298		 * are changes to settings, class of device, name etc.
1299		 */
1300		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1301			if (!hci_sock_gen_cookie(sk)) {
1302				/* In the case when a cookie has already been
1303				 * assigned, this socket will transtion from
1304				 * a raw socket into a control socket. To
1305				 * allow for a clean transtion, send the
1306				 * close notification first.
1307				 */
1308				skb = create_monitor_ctrl_close(sk);
1309				if (skb) {
1310					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1311							    HCI_SOCK_TRUSTED, NULL);
1312					kfree_skb(skb);
1313				}
1314			}
1315
1316			/* Send event to monitor */
1317			skb = create_monitor_ctrl_open(sk);
1318			if (skb) {
1319				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1320						    HCI_SOCK_TRUSTED, NULL);
1321				kfree_skb(skb);
1322			}
1323
1324			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1325			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1326			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1327			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1328			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1329			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1330		}
1331		break;
1332	}
1333
1334	sk->sk_state = BT_BOUND;
1335
1336done:
1337	release_sock(sk);
1338	return err;
1339}
1340
1341static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1342			    int peer)
1343{
1344	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1345	struct sock *sk = sock->sk;
1346	struct hci_dev *hdev;
1347	int err = 0;
1348
1349	BT_DBG("sock %p sk %p", sock, sk);
1350
1351	if (peer)
1352		return -EOPNOTSUPP;
1353
1354	lock_sock(sk);
1355
1356	hdev = hci_pi(sk)->hdev;
1357	if (!hdev) {
1358		err = -EBADFD;
1359		goto done;
1360	}
1361
 
1362	haddr->hci_family = AF_BLUETOOTH;
1363	haddr->hci_dev    = hdev->id;
1364	haddr->hci_channel= hci_pi(sk)->channel;
1365	err = sizeof(*haddr);
1366
1367done:
1368	release_sock(sk);
1369	return err;
1370}
1371
1372static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1373			  struct sk_buff *skb)
1374{
1375	__u32 mask = hci_pi(sk)->cmsg_mask;
1376
1377	if (mask & HCI_CMSG_DIR) {
1378		int incoming = bt_cb(skb)->incoming;
1379		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1380			 &incoming);
1381	}
1382
1383	if (mask & HCI_CMSG_TSTAMP) {
1384#ifdef CONFIG_COMPAT
1385		struct old_timeval32 ctv;
1386#endif
1387		struct __kernel_old_timeval tv;
1388		void *data;
1389		int len;
1390
1391		skb_get_timestamp(skb, &tv);
1392
1393		data = &tv;
1394		len = sizeof(tv);
1395#ifdef CONFIG_COMPAT
1396		if (!COMPAT_USE_64BIT_TIME &&
1397		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1398			ctv.tv_sec = tv.tv_sec;
1399			ctv.tv_usec = tv.tv_usec;
1400			data = &ctv;
1401			len = sizeof(ctv);
1402		}
1403#endif
1404
1405		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1406	}
1407}
1408
1409static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1410			    size_t len, int flags)
1411{
1412	int noblock = flags & MSG_DONTWAIT;
1413	struct sock *sk = sock->sk;
1414	struct sk_buff *skb;
1415	int copied, err;
1416	unsigned int skblen;
1417
1418	BT_DBG("sock %p, sk %p", sock, sk);
1419
1420	if (flags & MSG_OOB)
1421		return -EOPNOTSUPP;
1422
1423	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1424		return -EOPNOTSUPP;
1425
1426	if (sk->sk_state == BT_CLOSED)
1427		return 0;
1428
1429	skb = skb_recv_datagram(sk, flags, noblock, &err);
1430	if (!skb)
1431		return err;
1432
1433	skblen = skb->len;
1434	copied = skb->len;
1435	if (len < copied) {
1436		msg->msg_flags |= MSG_TRUNC;
1437		copied = len;
1438	}
1439
1440	skb_reset_transport_header(skb);
1441	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1442
1443	switch (hci_pi(sk)->channel) {
1444	case HCI_CHANNEL_RAW:
1445		hci_sock_cmsg(sk, msg, skb);
1446		break;
1447	case HCI_CHANNEL_USER:
1448	case HCI_CHANNEL_MONITOR:
1449		sock_recv_timestamp(msg, sk, skb);
1450		break;
1451	default:
1452		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1453			sock_recv_timestamp(msg, sk, skb);
1454		break;
1455	}
1456
1457	skb_free_datagram(sk, skb);
1458
1459	if (flags & MSG_TRUNC)
1460		copied = skblen;
1461
1462	return err ? : copied;
1463}
1464
1465static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1466			struct msghdr *msg, size_t msglen)
1467{
1468	void *buf;
1469	u8 *cp;
1470	struct mgmt_hdr *hdr;
1471	u16 opcode, index, len;
1472	struct hci_dev *hdev = NULL;
1473	const struct hci_mgmt_handler *handler;
1474	bool var_len, no_hdev;
1475	int err;
1476
1477	BT_DBG("got %zu bytes", msglen);
1478
1479	if (msglen < sizeof(*hdr))
1480		return -EINVAL;
1481
1482	buf = kmalloc(msglen, GFP_KERNEL);
1483	if (!buf)
1484		return -ENOMEM;
1485
1486	if (memcpy_from_msg(buf, msg, msglen)) {
1487		err = -EFAULT;
1488		goto done;
1489	}
1490
1491	hdr = buf;
1492	opcode = __le16_to_cpu(hdr->opcode);
1493	index = __le16_to_cpu(hdr->index);
1494	len = __le16_to_cpu(hdr->len);
1495
1496	if (len != msglen - sizeof(*hdr)) {
1497		err = -EINVAL;
1498		goto done;
1499	}
1500
1501	if (chan->channel == HCI_CHANNEL_CONTROL) {
1502		struct sk_buff *skb;
1503
1504		/* Send event to monitor */
1505		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1506						  buf + sizeof(*hdr));
1507		if (skb) {
1508			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1509					    HCI_SOCK_TRUSTED, NULL);
1510			kfree_skb(skb);
1511		}
1512	}
1513
1514	if (opcode >= chan->handler_count ||
1515	    chan->handlers[opcode].func == NULL) {
1516		BT_DBG("Unknown op %u", opcode);
1517		err = mgmt_cmd_status(sk, index, opcode,
1518				      MGMT_STATUS_UNKNOWN_COMMAND);
1519		goto done;
1520	}
1521
1522	handler = &chan->handlers[opcode];
1523
1524	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1525	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1526		err = mgmt_cmd_status(sk, index, opcode,
1527				      MGMT_STATUS_PERMISSION_DENIED);
1528		goto done;
1529	}
1530
1531	if (index != MGMT_INDEX_NONE) {
1532		hdev = hci_dev_get(index);
1533		if (!hdev) {
1534			err = mgmt_cmd_status(sk, index, opcode,
1535					      MGMT_STATUS_INVALID_INDEX);
1536			goto done;
1537		}
1538
1539		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1540		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1541		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1542			err = mgmt_cmd_status(sk, index, opcode,
1543					      MGMT_STATUS_INVALID_INDEX);
1544			goto done;
1545		}
1546
1547		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1548		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1549			err = mgmt_cmd_status(sk, index, opcode,
1550					      MGMT_STATUS_INVALID_INDEX);
1551			goto done;
1552		}
1553	}
1554
1555	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1556	if (no_hdev != !hdev) {
1557		err = mgmt_cmd_status(sk, index, opcode,
1558				      MGMT_STATUS_INVALID_INDEX);
1559		goto done;
1560	}
1561
1562	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1563	if ((var_len && len < handler->data_len) ||
1564	    (!var_len && len != handler->data_len)) {
1565		err = mgmt_cmd_status(sk, index, opcode,
1566				      MGMT_STATUS_INVALID_PARAMS);
1567		goto done;
1568	}
1569
1570	if (hdev && chan->hdev_init)
1571		chan->hdev_init(sk, hdev);
1572
1573	cp = buf + sizeof(*hdr);
1574
1575	err = handler->func(sk, hdev, cp, len);
1576	if (err < 0)
1577		goto done;
1578
1579	err = msglen;
1580
1581done:
1582	if (hdev)
1583		hci_dev_put(hdev);
1584
1585	kfree(buf);
1586	return err;
1587}
1588
1589static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1590{
1591	struct hci_mon_hdr *hdr;
1592	struct sk_buff *skb;
1593	struct hci_dev *hdev;
1594	u16 index;
1595	int err;
1596
1597	/* The logging frame consists at minimum of the standard header,
1598	 * the priority byte, the ident length byte and at least one string
1599	 * terminator NUL byte. Anything shorter are invalid packets.
1600	 */
1601	if (len < sizeof(*hdr) + 3)
1602		return -EINVAL;
1603
1604	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1605	if (!skb)
1606		return err;
1607
1608	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1609		err = -EFAULT;
1610		goto drop;
1611	}
1612
1613	hdr = (void *)skb->data;
1614
1615	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1616		err = -EINVAL;
1617		goto drop;
1618	}
1619
1620	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1621		__u8 priority = skb->data[sizeof(*hdr)];
1622		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1623
1624		/* Only the priorities 0-7 are valid and with that any other
1625		 * value results in an invalid packet.
1626		 *
1627		 * The priority byte is followed by an ident length byte and
1628		 * the NUL terminated ident string. Check that the ident
1629		 * length is not overflowing the packet and also that the
1630		 * ident string itself is NUL terminated. In case the ident
1631		 * length is zero, the length value actually doubles as NUL
1632		 * terminator identifier.
1633		 *
1634		 * The message follows the ident string (if present) and
1635		 * must be NUL terminated. Otherwise it is not a valid packet.
1636		 */
1637		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1638		    ident_len > len - sizeof(*hdr) - 3 ||
1639		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1640			err = -EINVAL;
1641			goto drop;
1642		}
1643	} else {
1644		err = -EINVAL;
1645		goto drop;
1646	}
1647
1648	index = __le16_to_cpu(hdr->index);
1649
1650	if (index != MGMT_INDEX_NONE) {
1651		hdev = hci_dev_get(index);
1652		if (!hdev) {
1653			err = -ENODEV;
1654			goto drop;
1655		}
1656	} else {
1657		hdev = NULL;
1658	}
1659
1660	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1661
1662	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1663	err = len;
1664
1665	if (hdev)
1666		hci_dev_put(hdev);
1667
1668drop:
1669	kfree_skb(skb);
1670	return err;
1671}
1672
1673static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1674			    size_t len)
1675{
1676	struct sock *sk = sock->sk;
1677	struct hci_mgmt_chan *chan;
1678	struct hci_dev *hdev;
1679	struct sk_buff *skb;
1680	int err;
1681
1682	BT_DBG("sock %p sk %p", sock, sk);
1683
1684	if (msg->msg_flags & MSG_OOB)
1685		return -EOPNOTSUPP;
1686
1687	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1688			       MSG_CMSG_COMPAT))
1689		return -EINVAL;
1690
1691	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1692		return -EINVAL;
1693
1694	lock_sock(sk);
1695
1696	switch (hci_pi(sk)->channel) {
1697	case HCI_CHANNEL_RAW:
1698	case HCI_CHANNEL_USER:
1699		break;
1700	case HCI_CHANNEL_MONITOR:
1701		err = -EOPNOTSUPP;
1702		goto done;
1703	case HCI_CHANNEL_LOGGING:
1704		err = hci_logging_frame(sk, msg, len);
1705		goto done;
1706	default:
1707		mutex_lock(&mgmt_chan_list_lock);
1708		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1709		if (chan)
1710			err = hci_mgmt_cmd(chan, sk, msg, len);
1711		else
1712			err = -EINVAL;
1713
1714		mutex_unlock(&mgmt_chan_list_lock);
1715		goto done;
1716	}
1717
1718	hdev = hci_pi(sk)->hdev;
1719	if (!hdev) {
1720		err = -EBADFD;
1721		goto done;
1722	}
1723
1724	if (!test_bit(HCI_UP, &hdev->flags)) {
1725		err = -ENETDOWN;
1726		goto done;
1727	}
1728
1729	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1730	if (!skb)
1731		goto done;
1732
1733	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1734		err = -EFAULT;
1735		goto drop;
1736	}
1737
1738	hci_skb_pkt_type(skb) = skb->data[0];
1739	skb_pull(skb, 1);
1740
1741	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1742		/* No permission check is needed for user channel
1743		 * since that gets enforced when binding the socket.
1744		 *
1745		 * However check that the packet type is valid.
1746		 */
1747		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1748		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1749		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1750			err = -EINVAL;
1751			goto drop;
1752		}
1753
1754		skb_queue_tail(&hdev->raw_q, skb);
1755		queue_work(hdev->workqueue, &hdev->tx_work);
1756	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1757		u16 opcode = get_unaligned_le16(skb->data);
1758		u16 ogf = hci_opcode_ogf(opcode);
1759		u16 ocf = hci_opcode_ocf(opcode);
1760
1761		if (((ogf > HCI_SFLT_MAX_OGF) ||
1762		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1763				   &hci_sec_filter.ocf_mask[ogf])) &&
1764		    !capable(CAP_NET_RAW)) {
1765			err = -EPERM;
1766			goto drop;
1767		}
1768
1769		/* Since the opcode has already been extracted here, store
1770		 * a copy of the value for later use by the drivers.
1771		 */
1772		hci_skb_opcode(skb) = opcode;
1773
1774		if (ogf == 0x3f) {
1775			skb_queue_tail(&hdev->raw_q, skb);
1776			queue_work(hdev->workqueue, &hdev->tx_work);
1777		} else {
1778			/* Stand-alone HCI commands must be flagged as
1779			 * single-command requests.
1780			 */
1781			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1782
1783			skb_queue_tail(&hdev->cmd_q, skb);
1784			queue_work(hdev->workqueue, &hdev->cmd_work);
1785		}
1786	} else {
1787		if (!capable(CAP_NET_RAW)) {
1788			err = -EPERM;
1789			goto drop;
1790		}
1791
1792		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1793		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1794			err = -EINVAL;
1795			goto drop;
1796		}
1797
1798		skb_queue_tail(&hdev->raw_q, skb);
1799		queue_work(hdev->workqueue, &hdev->tx_work);
1800	}
1801
1802	err = len;
1803
1804done:
1805	release_sock(sk);
1806	return err;
1807
1808drop:
1809	kfree_skb(skb);
1810	goto done;
1811}
1812
1813static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1814			       char __user *optval, unsigned int len)
1815{
1816	struct hci_ufilter uf = { .opcode = 0 };
1817	struct sock *sk = sock->sk;
1818	int err = 0, opt = 0;
1819
1820	BT_DBG("sk %p, opt %d", sk, optname);
1821
1822	if (level != SOL_HCI)
1823		return -ENOPROTOOPT;
1824
1825	lock_sock(sk);
1826
1827	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1828		err = -EBADFD;
1829		goto done;
1830	}
1831
1832	switch (optname) {
1833	case HCI_DATA_DIR:
1834		if (get_user(opt, (int __user *)optval)) {
1835			err = -EFAULT;
1836			break;
1837		}
1838
1839		if (opt)
1840			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1841		else
1842			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1843		break;
1844
1845	case HCI_TIME_STAMP:
1846		if (get_user(opt, (int __user *)optval)) {
1847			err = -EFAULT;
1848			break;
1849		}
1850
1851		if (opt)
1852			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1853		else
1854			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1855		break;
1856
1857	case HCI_FILTER:
1858		{
1859			struct hci_filter *f = &hci_pi(sk)->filter;
1860
1861			uf.type_mask = f->type_mask;
1862			uf.opcode    = f->opcode;
1863			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1864			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1865		}
1866
1867		len = min_t(unsigned int, len, sizeof(uf));
1868		if (copy_from_user(&uf, optval, len)) {
1869			err = -EFAULT;
1870			break;
1871		}
1872
1873		if (!capable(CAP_NET_RAW)) {
1874			uf.type_mask &= hci_sec_filter.type_mask;
1875			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1876			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1877		}
1878
1879		{
1880			struct hci_filter *f = &hci_pi(sk)->filter;
1881
1882			f->type_mask = uf.type_mask;
1883			f->opcode    = uf.opcode;
1884			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1885			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1886		}
1887		break;
1888
1889	default:
1890		err = -ENOPROTOOPT;
1891		break;
1892	}
1893
1894done:
1895	release_sock(sk);
1896	return err;
1897}
1898
1899static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1900			       char __user *optval, int __user *optlen)
1901{
1902	struct hci_ufilter uf;
1903	struct sock *sk = sock->sk;
1904	int len, opt, err = 0;
1905
1906	BT_DBG("sk %p, opt %d", sk, optname);
1907
1908	if (level != SOL_HCI)
1909		return -ENOPROTOOPT;
1910
1911	if (get_user(len, optlen))
1912		return -EFAULT;
1913
1914	lock_sock(sk);
1915
1916	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1917		err = -EBADFD;
1918		goto done;
1919	}
1920
1921	switch (optname) {
1922	case HCI_DATA_DIR:
1923		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1924			opt = 1;
1925		else
1926			opt = 0;
1927
1928		if (put_user(opt, optval))
1929			err = -EFAULT;
1930		break;
1931
1932	case HCI_TIME_STAMP:
1933		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1934			opt = 1;
1935		else
1936			opt = 0;
1937
1938		if (put_user(opt, optval))
1939			err = -EFAULT;
1940		break;
1941
1942	case HCI_FILTER:
1943		{
1944			struct hci_filter *f = &hci_pi(sk)->filter;
1945
1946			memset(&uf, 0, sizeof(uf));
1947			uf.type_mask = f->type_mask;
1948			uf.opcode    = f->opcode;
1949			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1950			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1951		}
1952
1953		len = min_t(unsigned int, len, sizeof(uf));
1954		if (copy_to_user(optval, &uf, len))
1955			err = -EFAULT;
1956		break;
1957
1958	default:
1959		err = -ENOPROTOOPT;
1960		break;
1961	}
1962
1963done:
1964	release_sock(sk);
1965	return err;
1966}
1967
1968static const struct proto_ops hci_sock_ops = {
1969	.family		= PF_BLUETOOTH,
1970	.owner		= THIS_MODULE,
1971	.release	= hci_sock_release,
1972	.bind		= hci_sock_bind,
1973	.getname	= hci_sock_getname,
1974	.sendmsg	= hci_sock_sendmsg,
1975	.recvmsg	= hci_sock_recvmsg,
1976	.ioctl		= hci_sock_ioctl,
1977	.poll		= datagram_poll,
1978	.listen		= sock_no_listen,
1979	.shutdown	= sock_no_shutdown,
1980	.setsockopt	= hci_sock_setsockopt,
1981	.getsockopt	= hci_sock_getsockopt,
1982	.connect	= sock_no_connect,
1983	.socketpair	= sock_no_socketpair,
1984	.accept		= sock_no_accept,
1985	.mmap		= sock_no_mmap
1986};
1987
1988static struct proto hci_sk_proto = {
1989	.name		= "HCI",
1990	.owner		= THIS_MODULE,
1991	.obj_size	= sizeof(struct hci_pinfo)
1992};
1993
1994static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1995			   int kern)
1996{
1997	struct sock *sk;
1998
1999	BT_DBG("sock %p", sock);
2000
2001	if (sock->type != SOCK_RAW)
2002		return -ESOCKTNOSUPPORT;
2003
2004	sock->ops = &hci_sock_ops;
2005
2006	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2007	if (!sk)
2008		return -ENOMEM;
2009
2010	sock_init_data(sock, sk);
2011
2012	sock_reset_flag(sk, SOCK_ZAPPED);
2013
2014	sk->sk_protocol = protocol;
2015
2016	sock->state = SS_UNCONNECTED;
2017	sk->sk_state = BT_OPEN;
2018
2019	bt_sock_link(&hci_sk_list, sk);
2020	return 0;
2021}
2022
2023static const struct net_proto_family hci_sock_family_ops = {
2024	.family	= PF_BLUETOOTH,
2025	.owner	= THIS_MODULE,
2026	.create	= hci_sock_create,
2027};
2028
2029int __init hci_sock_init(void)
2030{
2031	int err;
2032
2033	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2034
2035	err = proto_register(&hci_sk_proto, 0);
2036	if (err < 0)
2037		return err;
2038
2039	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2040	if (err < 0) {
2041		BT_ERR("HCI socket registration failed");
2042		goto error;
2043	}
2044
2045	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2046	if (err < 0) {
2047		BT_ERR("Failed to create HCI proc file");
2048		bt_sock_unregister(BTPROTO_HCI);
2049		goto error;
2050	}
2051
2052	BT_INFO("HCI socket layer initialized");
2053
2054	return 0;
2055
2056error:
2057	proto_unregister(&hci_sk_proto);
2058	return err;
2059}
2060
2061void hci_sock_cleanup(void)
2062{
2063	bt_procfs_cleanup(&init_net, "hci");
2064	bt_sock_unregister(BTPROTO_HCI);
2065	proto_unregister(&hci_sk_proto);
2066}
v4.10.11
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u32             cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
  60};
  61
  62void hci_sock_set_flag(struct sock *sk, int nr)
  63{
  64	set_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67void hci_sock_clear_flag(struct sock *sk, int nr)
  68{
  69	clear_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72int hci_sock_test_flag(struct sock *sk, int nr)
  73{
  74	return test_bit(nr, &hci_pi(sk)->flags);
  75}
  76
  77unsigned short hci_sock_get_channel(struct sock *sk)
  78{
  79	return hci_pi(sk)->channel;
  80}
  81
  82u32 hci_sock_get_cookie(struct sock *sk)
  83{
  84	return hci_pi(sk)->cookie;
  85}
  86
  87static bool hci_sock_gen_cookie(struct sock *sk)
  88{
  89	int id = hci_pi(sk)->cookie;
  90
  91	if (!id) {
  92		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
  93		if (id < 0)
  94			id = 0xffffffff;
  95
  96		hci_pi(sk)->cookie = id;
  97		get_task_comm(hci_pi(sk)->comm, current);
  98		return true;
  99	}
 100
 101	return false;
 102}
 103
 104static void hci_sock_free_cookie(struct sock *sk)
 105{
 106	int id = hci_pi(sk)->cookie;
 107
 108	if (id) {
 109		hci_pi(sk)->cookie = 0xffffffff;
 110		ida_simple_remove(&sock_cookie_ida, id);
 111	}
 112}
 113
 114static inline int hci_test_bit(int nr, const void *addr)
 115{
 116	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 117}
 118
 119/* Security filter */
 120#define HCI_SFLT_MAX_OGF  5
 121
 122struct hci_sec_filter {
 123	__u32 type_mask;
 124	__u32 event_mask[2];
 125	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 126};
 127
 128static const struct hci_sec_filter hci_sec_filter = {
 129	/* Packet types */
 130	0x10,
 131	/* Events */
 132	{ 0x1000d9fe, 0x0000b00c },
 133	/* Commands */
 134	{
 135		{ 0x0 },
 136		/* OGF_LINK_CTL */
 137		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 138		/* OGF_LINK_POLICY */
 139		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 140		/* OGF_HOST_CTL */
 141		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 142		/* OGF_INFO_PARAM */
 143		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 144		/* OGF_STATUS_PARAM */
 145		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 146	}
 147};
 148
 149static struct bt_sock_list hci_sk_list = {
 150	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 151};
 152
 153static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 154{
 155	struct hci_filter *flt;
 156	int flt_type, flt_event;
 157
 158	/* Apply filter */
 159	flt = &hci_pi(sk)->filter;
 160
 161	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 162
 163	if (!test_bit(flt_type, &flt->type_mask))
 164		return true;
 165
 166	/* Extra filter for event packets only */
 167	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 168		return false;
 169
 170	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 171
 172	if (!hci_test_bit(flt_event, &flt->event_mask))
 173		return true;
 174
 175	/* Check filter only when opcode is set */
 176	if (!flt->opcode)
 177		return false;
 178
 179	if (flt_event == HCI_EV_CMD_COMPLETE &&
 180	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 181		return true;
 182
 183	if (flt_event == HCI_EV_CMD_STATUS &&
 184	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 185		return true;
 186
 187	return false;
 188}
 189
 190/* Send frame to RAW socket */
 191void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 192{
 193	struct sock *sk;
 194	struct sk_buff *skb_copy = NULL;
 195
 196	BT_DBG("hdev %p len %d", hdev, skb->len);
 197
 198	read_lock(&hci_sk_list.lock);
 199
 200	sk_for_each(sk, &hci_sk_list.head) {
 201		struct sk_buff *nskb;
 202
 203		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 204			continue;
 205
 206		/* Don't send frame to the socket it came from */
 207		if (skb->sk == sk)
 208			continue;
 209
 210		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 211			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 212			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 213			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 214			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 215				continue;
 216			if (is_filtered_packet(sk, skb))
 217				continue;
 218		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 219			if (!bt_cb(skb)->incoming)
 220				continue;
 221			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 222			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 223			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 224				continue;
 225		} else {
 226			/* Don't send frame to other channel types */
 227			continue;
 228		}
 229
 230		if (!skb_copy) {
 231			/* Create a private copy with headroom */
 232			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 233			if (!skb_copy)
 234				continue;
 235
 236			/* Put type byte before the data */
 237			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 238		}
 239
 240		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 241		if (!nskb)
 242			continue;
 243
 244		if (sock_queue_rcv_skb(sk, nskb))
 245			kfree_skb(nskb);
 246	}
 247
 248	read_unlock(&hci_sk_list.lock);
 249
 250	kfree_skb(skb_copy);
 251}
 252
 253/* Send frame to sockets with specific channel */
 254void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 255			 int flag, struct sock *skip_sk)
 256{
 257	struct sock *sk;
 258
 259	BT_DBG("channel %u len %d", channel, skb->len);
 260
 261	read_lock(&hci_sk_list.lock);
 262
 263	sk_for_each(sk, &hci_sk_list.head) {
 264		struct sk_buff *nskb;
 265
 266		/* Ignore socket without the flag set */
 267		if (!hci_sock_test_flag(sk, flag))
 268			continue;
 269
 270		/* Skip the original socket */
 271		if (sk == skip_sk)
 272			continue;
 273
 274		if (sk->sk_state != BT_BOUND)
 275			continue;
 276
 277		if (hci_pi(sk)->channel != channel)
 278			continue;
 279
 280		nskb = skb_clone(skb, GFP_ATOMIC);
 281		if (!nskb)
 282			continue;
 283
 284		if (sock_queue_rcv_skb(sk, nskb))
 285			kfree_skb(nskb);
 286	}
 287
 
 
 
 
 
 
 
 288	read_unlock(&hci_sk_list.lock);
 289}
 290
 291/* Send frame to monitor socket */
 292void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 293{
 294	struct sk_buff *skb_copy = NULL;
 295	struct hci_mon_hdr *hdr;
 296	__le16 opcode;
 297
 298	if (!atomic_read(&monitor_promisc))
 299		return;
 300
 301	BT_DBG("hdev %p len %d", hdev, skb->len);
 302
 303	switch (hci_skb_pkt_type(skb)) {
 304	case HCI_COMMAND_PKT:
 305		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 306		break;
 307	case HCI_EVENT_PKT:
 308		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 309		break;
 310	case HCI_ACLDATA_PKT:
 311		if (bt_cb(skb)->incoming)
 312			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 313		else
 314			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 315		break;
 316	case HCI_SCODATA_PKT:
 317		if (bt_cb(skb)->incoming)
 318			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 319		else
 320			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 321		break;
 322	case HCI_DIAG_PKT:
 323		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 324		break;
 325	default:
 326		return;
 327	}
 328
 329	/* Create a private copy with headroom */
 330	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 331	if (!skb_copy)
 332		return;
 333
 334	/* Put header before the data */
 335	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 336	hdr->opcode = opcode;
 337	hdr->index = cpu_to_le16(hdev->id);
 338	hdr->len = cpu_to_le16(skb->len);
 339
 340	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 341			    HCI_SOCK_TRUSTED, NULL);
 342	kfree_skb(skb_copy);
 343}
 344
 345void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 346				 void *data, u16 data_len, ktime_t tstamp,
 347				 int flag, struct sock *skip_sk)
 348{
 349	struct sock *sk;
 350	__le16 index;
 351
 352	if (hdev)
 353		index = cpu_to_le16(hdev->id);
 354	else
 355		index = cpu_to_le16(MGMT_INDEX_NONE);
 356
 357	read_lock(&hci_sk_list.lock);
 358
 359	sk_for_each(sk, &hci_sk_list.head) {
 360		struct hci_mon_hdr *hdr;
 361		struct sk_buff *skb;
 362
 363		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 364			continue;
 365
 366		/* Ignore socket without the flag set */
 367		if (!hci_sock_test_flag(sk, flag))
 368			continue;
 369
 370		/* Skip the original socket */
 371		if (sk == skip_sk)
 372			continue;
 373
 374		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 375		if (!skb)
 376			continue;
 377
 378		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 379		put_unaligned_le16(event, skb_put(skb, 2));
 380
 381		if (data)
 382			memcpy(skb_put(skb, data_len), data, data_len);
 383
 384		skb->tstamp = tstamp;
 385
 386		hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 387		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 388		hdr->index = index;
 389		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 390
 391		hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 392				    HCI_SOCK_TRUSTED, NULL);
 393		kfree_skb(skb);
 394	}
 395
 396	read_unlock(&hci_sk_list.lock);
 397}
 398
 399static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 400{
 401	struct hci_mon_hdr *hdr;
 402	struct hci_mon_new_index *ni;
 403	struct hci_mon_index_info *ii;
 404	struct sk_buff *skb;
 405	__le16 opcode;
 406
 407	switch (event) {
 408	case HCI_DEV_REG:
 409		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 410		if (!skb)
 411			return NULL;
 412
 413		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 414		ni->type = hdev->dev_type;
 415		ni->bus = hdev->bus;
 416		bacpy(&ni->bdaddr, &hdev->bdaddr);
 417		memcpy(ni->name, hdev->name, 8);
 418
 419		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 420		break;
 421
 422	case HCI_DEV_UNREG:
 423		skb = bt_skb_alloc(0, GFP_ATOMIC);
 424		if (!skb)
 425			return NULL;
 426
 427		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 428		break;
 429
 430	case HCI_DEV_SETUP:
 431		if (hdev->manufacturer == 0xffff)
 432			return NULL;
 433
 434		/* fall through */
 435
 436	case HCI_DEV_UP:
 437		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 438		if (!skb)
 439			return NULL;
 440
 441		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 442		bacpy(&ii->bdaddr, &hdev->bdaddr);
 443		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 444
 445		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 446		break;
 447
 448	case HCI_DEV_OPEN:
 449		skb = bt_skb_alloc(0, GFP_ATOMIC);
 450		if (!skb)
 451			return NULL;
 452
 453		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 454		break;
 455
 456	case HCI_DEV_CLOSE:
 457		skb = bt_skb_alloc(0, GFP_ATOMIC);
 458		if (!skb)
 459			return NULL;
 460
 461		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 462		break;
 463
 464	default:
 465		return NULL;
 466	}
 467
 468	__net_timestamp(skb);
 469
 470	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 471	hdr->opcode = opcode;
 472	hdr->index = cpu_to_le16(hdev->id);
 473	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 474
 475	return skb;
 476}
 477
 478static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 479{
 480	struct hci_mon_hdr *hdr;
 481	struct sk_buff *skb;
 482	u16 format;
 483	u8 ver[3];
 484	u32 flags;
 485
 486	/* No message needed when cookie is not present */
 487	if (!hci_pi(sk)->cookie)
 488		return NULL;
 489
 490	switch (hci_pi(sk)->channel) {
 491	case HCI_CHANNEL_RAW:
 492		format = 0x0000;
 493		ver[0] = BT_SUBSYS_VERSION;
 494		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 495		break;
 496	case HCI_CHANNEL_USER:
 497		format = 0x0001;
 498		ver[0] = BT_SUBSYS_VERSION;
 499		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 500		break;
 501	case HCI_CHANNEL_CONTROL:
 502		format = 0x0002;
 503		mgmt_fill_version_info(ver);
 504		break;
 505	default:
 506		/* No message for unsupported format */
 507		return NULL;
 508	}
 509
 510	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
 511	if (!skb)
 512		return NULL;
 513
 514	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 515
 516	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 517	put_unaligned_le16(format, skb_put(skb, 2));
 518	memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
 519	put_unaligned_le32(flags, skb_put(skb, 4));
 520	*skb_put(skb, 1) = TASK_COMM_LEN;
 521	memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
 522
 523	__net_timestamp(skb);
 524
 525	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 526	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 527	if (hci_pi(sk)->hdev)
 528		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 529	else
 530		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 531	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 532
 533	return skb;
 534}
 535
 536static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 537{
 538	struct hci_mon_hdr *hdr;
 539	struct sk_buff *skb;
 540
 541	/* No message needed when cookie is not present */
 542	if (!hci_pi(sk)->cookie)
 543		return NULL;
 544
 545	switch (hci_pi(sk)->channel) {
 546	case HCI_CHANNEL_RAW:
 547	case HCI_CHANNEL_USER:
 548	case HCI_CHANNEL_CONTROL:
 549		break;
 550	default:
 551		/* No message for unsupported format */
 552		return NULL;
 553	}
 554
 555	skb = bt_skb_alloc(4, GFP_ATOMIC);
 556	if (!skb)
 557		return NULL;
 558
 559	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 560
 561	__net_timestamp(skb);
 562
 563	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 564	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 565	if (hci_pi(sk)->hdev)
 566		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 567	else
 568		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 569	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 570
 571	return skb;
 572}
 573
 574static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 575						   u16 opcode, u16 len,
 576						   const void *buf)
 577{
 578	struct hci_mon_hdr *hdr;
 579	struct sk_buff *skb;
 580
 581	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 582	if (!skb)
 583		return NULL;
 584
 585	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 586	put_unaligned_le16(opcode, skb_put(skb, 2));
 587
 588	if (buf)
 589		memcpy(skb_put(skb, len), buf, len);
 590
 591	__net_timestamp(skb);
 592
 593	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 594	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 595	hdr->index = cpu_to_le16(index);
 596	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 597
 598	return skb;
 599}
 600
 601static void __printf(2, 3)
 602send_monitor_note(struct sock *sk, const char *fmt, ...)
 603{
 604	size_t len;
 605	struct hci_mon_hdr *hdr;
 606	struct sk_buff *skb;
 607	va_list args;
 608
 609	va_start(args, fmt);
 610	len = vsnprintf(NULL, 0, fmt, args);
 611	va_end(args);
 612
 613	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 614	if (!skb)
 615		return;
 616
 617	va_start(args, fmt);
 618	vsprintf(skb_put(skb, len), fmt, args);
 619	*skb_put(skb, 1) = 0;
 620	va_end(args);
 621
 622	__net_timestamp(skb);
 623
 624	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 625	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 626	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 627	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 628
 629	if (sock_queue_rcv_skb(sk, skb))
 630		kfree_skb(skb);
 631}
 632
 633static void send_monitor_replay(struct sock *sk)
 634{
 635	struct hci_dev *hdev;
 636
 637	read_lock(&hci_dev_list_lock);
 638
 639	list_for_each_entry(hdev, &hci_dev_list, list) {
 640		struct sk_buff *skb;
 641
 642		skb = create_monitor_event(hdev, HCI_DEV_REG);
 643		if (!skb)
 644			continue;
 645
 646		if (sock_queue_rcv_skb(sk, skb))
 647			kfree_skb(skb);
 648
 649		if (!test_bit(HCI_RUNNING, &hdev->flags))
 650			continue;
 651
 652		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 653		if (!skb)
 654			continue;
 655
 656		if (sock_queue_rcv_skb(sk, skb))
 657			kfree_skb(skb);
 658
 659		if (test_bit(HCI_UP, &hdev->flags))
 660			skb = create_monitor_event(hdev, HCI_DEV_UP);
 661		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 662			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 663		else
 664			skb = NULL;
 665
 666		if (skb) {
 667			if (sock_queue_rcv_skb(sk, skb))
 668				kfree_skb(skb);
 669		}
 670	}
 671
 672	read_unlock(&hci_dev_list_lock);
 673}
 674
 675static void send_monitor_control_replay(struct sock *mon_sk)
 676{
 677	struct sock *sk;
 678
 679	read_lock(&hci_sk_list.lock);
 680
 681	sk_for_each(sk, &hci_sk_list.head) {
 682		struct sk_buff *skb;
 683
 684		skb = create_monitor_ctrl_open(sk);
 685		if (!skb)
 686			continue;
 687
 688		if (sock_queue_rcv_skb(mon_sk, skb))
 689			kfree_skb(skb);
 690	}
 691
 692	read_unlock(&hci_sk_list.lock);
 693}
 694
 695/* Generate internal stack event */
 696static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 697{
 698	struct hci_event_hdr *hdr;
 699	struct hci_ev_stack_internal *ev;
 700	struct sk_buff *skb;
 701
 702	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 703	if (!skb)
 704		return;
 705
 706	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 707	hdr->evt  = HCI_EV_STACK_INTERNAL;
 708	hdr->plen = sizeof(*ev) + dlen;
 709
 710	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 711	ev->type = type;
 712	memcpy(ev->data, data, dlen);
 713
 714	bt_cb(skb)->incoming = 1;
 715	__net_timestamp(skb);
 716
 717	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 718	hci_send_to_sock(hdev, skb);
 719	kfree_skb(skb);
 720}
 721
 722void hci_sock_dev_event(struct hci_dev *hdev, int event)
 723{
 724	BT_DBG("hdev %s event %d", hdev->name, event);
 725
 726	if (atomic_read(&monitor_promisc)) {
 727		struct sk_buff *skb;
 728
 729		/* Send event to monitor */
 730		skb = create_monitor_event(hdev, event);
 731		if (skb) {
 732			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 733					    HCI_SOCK_TRUSTED, NULL);
 734			kfree_skb(skb);
 735		}
 736	}
 737
 738	if (event <= HCI_DEV_DOWN) {
 739		struct hci_ev_si_device ev;
 740
 741		/* Send event to sockets */
 742		ev.event  = event;
 743		ev.dev_id = hdev->id;
 744		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 745	}
 746
 747	if (event == HCI_DEV_UNREG) {
 748		struct sock *sk;
 749
 750		/* Detach sockets from device */
 751		read_lock(&hci_sk_list.lock);
 752		sk_for_each(sk, &hci_sk_list.head) {
 753			bh_lock_sock_nested(sk);
 754			if (hci_pi(sk)->hdev == hdev) {
 755				hci_pi(sk)->hdev = NULL;
 756				sk->sk_err = EPIPE;
 757				sk->sk_state = BT_OPEN;
 758				sk->sk_state_change(sk);
 759
 760				hci_dev_put(hdev);
 761			}
 762			bh_unlock_sock(sk);
 763		}
 764		read_unlock(&hci_sk_list.lock);
 765	}
 766}
 767
 768static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 769{
 770	struct hci_mgmt_chan *c;
 771
 772	list_for_each_entry(c, &mgmt_chan_list, list) {
 773		if (c->channel == channel)
 774			return c;
 775	}
 776
 777	return NULL;
 778}
 779
 780static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 781{
 782	struct hci_mgmt_chan *c;
 783
 784	mutex_lock(&mgmt_chan_list_lock);
 785	c = __hci_mgmt_chan_find(channel);
 786	mutex_unlock(&mgmt_chan_list_lock);
 787
 788	return c;
 789}
 790
 791int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 792{
 793	if (c->channel < HCI_CHANNEL_CONTROL)
 794		return -EINVAL;
 795
 796	mutex_lock(&mgmt_chan_list_lock);
 797	if (__hci_mgmt_chan_find(c->channel)) {
 798		mutex_unlock(&mgmt_chan_list_lock);
 799		return -EALREADY;
 800	}
 801
 802	list_add_tail(&c->list, &mgmt_chan_list);
 803
 804	mutex_unlock(&mgmt_chan_list_lock);
 805
 806	return 0;
 807}
 808EXPORT_SYMBOL(hci_mgmt_chan_register);
 809
 810void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 811{
 812	mutex_lock(&mgmt_chan_list_lock);
 813	list_del(&c->list);
 814	mutex_unlock(&mgmt_chan_list_lock);
 815}
 816EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 817
 818static int hci_sock_release(struct socket *sock)
 819{
 820	struct sock *sk = sock->sk;
 821	struct hci_dev *hdev;
 822	struct sk_buff *skb;
 823
 824	BT_DBG("sock %p sk %p", sock, sk);
 825
 826	if (!sk)
 827		return 0;
 828
 829	hdev = hci_pi(sk)->hdev;
 830
 831	switch (hci_pi(sk)->channel) {
 832	case HCI_CHANNEL_MONITOR:
 833		atomic_dec(&monitor_promisc);
 834		break;
 835	case HCI_CHANNEL_RAW:
 836	case HCI_CHANNEL_USER:
 837	case HCI_CHANNEL_CONTROL:
 838		/* Send event to monitor */
 839		skb = create_monitor_ctrl_close(sk);
 840		if (skb) {
 841			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 842					    HCI_SOCK_TRUSTED, NULL);
 843			kfree_skb(skb);
 844		}
 845
 846		hci_sock_free_cookie(sk);
 847		break;
 848	}
 849
 850	bt_sock_unlink(&hci_sk_list, sk);
 851
 
 852	if (hdev) {
 853		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 854			/* When releasing an user channel exclusive access,
 855			 * call hci_dev_do_close directly instead of calling
 856			 * hci_dev_close to ensure the exclusive access will
 857			 * be released and the controller brought back down.
 858			 *
 859			 * The checking of HCI_AUTO_OFF is not needed in this
 860			 * case since it will have been cleared already when
 861			 * opening the user channel.
 862			 */
 863			hci_dev_do_close(hdev);
 864			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 865			mgmt_index_added(hdev);
 866		}
 867
 868		atomic_dec(&hdev->promisc);
 869		hci_dev_put(hdev);
 870	}
 871
 872	sock_orphan(sk);
 873
 874	skb_queue_purge(&sk->sk_receive_queue);
 875	skb_queue_purge(&sk->sk_write_queue);
 876
 877	sock_put(sk);
 878	return 0;
 879}
 880
 881static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 882{
 883	bdaddr_t bdaddr;
 884	int err;
 885
 886	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 887		return -EFAULT;
 888
 889	hci_dev_lock(hdev);
 890
 891	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 892
 893	hci_dev_unlock(hdev);
 894
 895	return err;
 896}
 897
 898static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 899{
 900	bdaddr_t bdaddr;
 901	int err;
 902
 903	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 904		return -EFAULT;
 905
 906	hci_dev_lock(hdev);
 907
 908	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 909
 910	hci_dev_unlock(hdev);
 911
 912	return err;
 913}
 914
 915/* Ioctls that require bound socket */
 916static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 917				unsigned long arg)
 918{
 919	struct hci_dev *hdev = hci_pi(sk)->hdev;
 920
 921	if (!hdev)
 922		return -EBADFD;
 923
 924	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 925		return -EBUSY;
 926
 927	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 928		return -EOPNOTSUPP;
 929
 930	if (hdev->dev_type != HCI_PRIMARY)
 931		return -EOPNOTSUPP;
 932
 933	switch (cmd) {
 934	case HCISETRAW:
 935		if (!capable(CAP_NET_ADMIN))
 936			return -EPERM;
 937		return -EOPNOTSUPP;
 938
 939	case HCIGETCONNINFO:
 940		return hci_get_conn_info(hdev, (void __user *)arg);
 941
 942	case HCIGETAUTHINFO:
 943		return hci_get_auth_info(hdev, (void __user *)arg);
 944
 945	case HCIBLOCKADDR:
 946		if (!capable(CAP_NET_ADMIN))
 947			return -EPERM;
 948		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 949
 950	case HCIUNBLOCKADDR:
 951		if (!capable(CAP_NET_ADMIN))
 952			return -EPERM;
 953		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 954	}
 955
 956	return -ENOIOCTLCMD;
 957}
 958
 959static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 960			  unsigned long arg)
 961{
 962	void __user *argp = (void __user *)arg;
 963	struct sock *sk = sock->sk;
 964	int err;
 965
 966	BT_DBG("cmd %x arg %lx", cmd, arg);
 967
 968	lock_sock(sk);
 969
 970	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 971		err = -EBADFD;
 972		goto done;
 973	}
 974
 975	/* When calling an ioctl on an unbound raw socket, then ensure
 976	 * that the monitor gets informed. Ensure that the resulting event
 977	 * is only send once by checking if the cookie exists or not. The
 978	 * socket cookie will be only ever generated once for the lifetime
 979	 * of a given socket.
 980	 */
 981	if (hci_sock_gen_cookie(sk)) {
 982		struct sk_buff *skb;
 983
 984		if (capable(CAP_NET_ADMIN))
 985			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 986
 987		/* Send event to monitor */
 988		skb = create_monitor_ctrl_open(sk);
 989		if (skb) {
 990			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 991					    HCI_SOCK_TRUSTED, NULL);
 992			kfree_skb(skb);
 993		}
 994	}
 995
 996	release_sock(sk);
 997
 998	switch (cmd) {
 999	case HCIGETDEVLIST:
1000		return hci_get_dev_list(argp);
1001
1002	case HCIGETDEVINFO:
1003		return hci_get_dev_info(argp);
1004
1005	case HCIGETCONNLIST:
1006		return hci_get_conn_list(argp);
1007
1008	case HCIDEVUP:
1009		if (!capable(CAP_NET_ADMIN))
1010			return -EPERM;
1011		return hci_dev_open(arg);
1012
1013	case HCIDEVDOWN:
1014		if (!capable(CAP_NET_ADMIN))
1015			return -EPERM;
1016		return hci_dev_close(arg);
1017
1018	case HCIDEVRESET:
1019		if (!capable(CAP_NET_ADMIN))
1020			return -EPERM;
1021		return hci_dev_reset(arg);
1022
1023	case HCIDEVRESTAT:
1024		if (!capable(CAP_NET_ADMIN))
1025			return -EPERM;
1026		return hci_dev_reset_stat(arg);
1027
1028	case HCISETSCAN:
1029	case HCISETAUTH:
1030	case HCISETENCRYPT:
1031	case HCISETPTYPE:
1032	case HCISETLINKPOL:
1033	case HCISETLINKMODE:
1034	case HCISETACLMTU:
1035	case HCISETSCOMTU:
1036		if (!capable(CAP_NET_ADMIN))
1037			return -EPERM;
1038		return hci_dev_cmd(cmd, argp);
1039
1040	case HCIINQUIRY:
1041		return hci_inquiry(argp);
1042	}
1043
1044	lock_sock(sk);
1045
1046	err = hci_sock_bound_ioctl(sk, cmd, arg);
1047
1048done:
1049	release_sock(sk);
1050	return err;
1051}
1052
1053static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1054			 int addr_len)
1055{
1056	struct sockaddr_hci haddr;
1057	struct sock *sk = sock->sk;
1058	struct hci_dev *hdev = NULL;
1059	struct sk_buff *skb;
1060	int len, err = 0;
1061
1062	BT_DBG("sock %p sk %p", sock, sk);
1063
1064	if (!addr)
1065		return -EINVAL;
1066
1067	memset(&haddr, 0, sizeof(haddr));
1068	len = min_t(unsigned int, sizeof(haddr), addr_len);
1069	memcpy(&haddr, addr, len);
1070
1071	if (haddr.hci_family != AF_BLUETOOTH)
1072		return -EINVAL;
1073
1074	lock_sock(sk);
1075
1076	if (sk->sk_state == BT_BOUND) {
1077		err = -EALREADY;
1078		goto done;
1079	}
1080
1081	switch (haddr.hci_channel) {
1082	case HCI_CHANNEL_RAW:
1083		if (hci_pi(sk)->hdev) {
1084			err = -EALREADY;
1085			goto done;
1086		}
1087
1088		if (haddr.hci_dev != HCI_DEV_NONE) {
1089			hdev = hci_dev_get(haddr.hci_dev);
1090			if (!hdev) {
1091				err = -ENODEV;
1092				goto done;
1093			}
1094
1095			atomic_inc(&hdev->promisc);
1096		}
1097
1098		hci_pi(sk)->channel = haddr.hci_channel;
1099
1100		if (!hci_sock_gen_cookie(sk)) {
1101			/* In the case when a cookie has already been assigned,
1102			 * then there has been already an ioctl issued against
1103			 * an unbound socket and with that triggerd an open
1104			 * notification. Send a close notification first to
1105			 * allow the state transition to bounded.
1106			 */
1107			skb = create_monitor_ctrl_close(sk);
1108			if (skb) {
1109				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1110						    HCI_SOCK_TRUSTED, NULL);
1111				kfree_skb(skb);
1112			}
1113		}
1114
1115		if (capable(CAP_NET_ADMIN))
1116			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1117
1118		hci_pi(sk)->hdev = hdev;
1119
1120		/* Send event to monitor */
1121		skb = create_monitor_ctrl_open(sk);
1122		if (skb) {
1123			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1124					    HCI_SOCK_TRUSTED, NULL);
1125			kfree_skb(skb);
1126		}
1127		break;
1128
1129	case HCI_CHANNEL_USER:
1130		if (hci_pi(sk)->hdev) {
1131			err = -EALREADY;
1132			goto done;
1133		}
1134
1135		if (haddr.hci_dev == HCI_DEV_NONE) {
1136			err = -EINVAL;
1137			goto done;
1138		}
1139
1140		if (!capable(CAP_NET_ADMIN)) {
1141			err = -EPERM;
1142			goto done;
1143		}
1144
1145		hdev = hci_dev_get(haddr.hci_dev);
1146		if (!hdev) {
1147			err = -ENODEV;
1148			goto done;
1149		}
1150
1151		if (test_bit(HCI_INIT, &hdev->flags) ||
1152		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1153		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1154		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1155		     test_bit(HCI_UP, &hdev->flags))) {
1156			err = -EBUSY;
1157			hci_dev_put(hdev);
1158			goto done;
1159		}
1160
1161		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1162			err = -EUSERS;
1163			hci_dev_put(hdev);
1164			goto done;
1165		}
1166
1167		mgmt_index_removed(hdev);
1168
1169		err = hci_dev_open(hdev->id);
1170		if (err) {
1171			if (err == -EALREADY) {
1172				/* In case the transport is already up and
1173				 * running, clear the error here.
1174				 *
1175				 * This can happen when opening an user
1176				 * channel and HCI_AUTO_OFF grace period
1177				 * is still active.
1178				 */
1179				err = 0;
1180			} else {
1181				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1182				mgmt_index_added(hdev);
1183				hci_dev_put(hdev);
1184				goto done;
1185			}
1186		}
1187
1188		hci_pi(sk)->channel = haddr.hci_channel;
1189
1190		if (!hci_sock_gen_cookie(sk)) {
1191			/* In the case when a cookie has already been assigned,
1192			 * this socket will transition from a raw socket into
1193			 * an user channel socket. For a clean transition, send
1194			 * the close notification first.
1195			 */
1196			skb = create_monitor_ctrl_close(sk);
1197			if (skb) {
1198				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1199						    HCI_SOCK_TRUSTED, NULL);
1200				kfree_skb(skb);
1201			}
1202		}
1203
1204		/* The user channel is restricted to CAP_NET_ADMIN
1205		 * capabilities and with that implicitly trusted.
1206		 */
1207		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1208
1209		hci_pi(sk)->hdev = hdev;
1210
1211		/* Send event to monitor */
1212		skb = create_monitor_ctrl_open(sk);
1213		if (skb) {
1214			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1215					    HCI_SOCK_TRUSTED, NULL);
1216			kfree_skb(skb);
1217		}
1218
1219		atomic_inc(&hdev->promisc);
1220		break;
1221
1222	case HCI_CHANNEL_MONITOR:
1223		if (haddr.hci_dev != HCI_DEV_NONE) {
1224			err = -EINVAL;
1225			goto done;
1226		}
1227
1228		if (!capable(CAP_NET_RAW)) {
1229			err = -EPERM;
1230			goto done;
1231		}
1232
1233		hci_pi(sk)->channel = haddr.hci_channel;
1234
1235		/* The monitor interface is restricted to CAP_NET_RAW
1236		 * capabilities and with that implicitly trusted.
1237		 */
1238		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1239
1240		send_monitor_note(sk, "Linux version %s (%s)",
1241				  init_utsname()->release,
1242				  init_utsname()->machine);
1243		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1244				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1245		send_monitor_replay(sk);
1246		send_monitor_control_replay(sk);
1247
1248		atomic_inc(&monitor_promisc);
1249		break;
1250
1251	case HCI_CHANNEL_LOGGING:
1252		if (haddr.hci_dev != HCI_DEV_NONE) {
1253			err = -EINVAL;
1254			goto done;
1255		}
1256
1257		if (!capable(CAP_NET_ADMIN)) {
1258			err = -EPERM;
1259			goto done;
1260		}
1261
1262		hci_pi(sk)->channel = haddr.hci_channel;
1263		break;
1264
1265	default:
1266		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1267			err = -EINVAL;
1268			goto done;
1269		}
1270
1271		if (haddr.hci_dev != HCI_DEV_NONE) {
1272			err = -EINVAL;
1273			goto done;
1274		}
1275
1276		/* Users with CAP_NET_ADMIN capabilities are allowed
1277		 * access to all management commands and events. For
1278		 * untrusted users the interface is restricted and
1279		 * also only untrusted events are sent.
1280		 */
1281		if (capable(CAP_NET_ADMIN))
1282			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1283
1284		hci_pi(sk)->channel = haddr.hci_channel;
1285
1286		/* At the moment the index and unconfigured index events
1287		 * are enabled unconditionally. Setting them on each
1288		 * socket when binding keeps this functionality. They
1289		 * however might be cleared later and then sending of these
1290		 * events will be disabled, but that is then intentional.
1291		 *
1292		 * This also enables generic events that are safe to be
1293		 * received by untrusted users. Example for such events
1294		 * are changes to settings, class of device, name etc.
1295		 */
1296		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1297			if (!hci_sock_gen_cookie(sk)) {
1298				/* In the case when a cookie has already been
1299				 * assigned, this socket will transtion from
1300				 * a raw socket into a control socket. To
1301				 * allow for a clean transtion, send the
1302				 * close notification first.
1303				 */
1304				skb = create_monitor_ctrl_close(sk);
1305				if (skb) {
1306					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1307							    HCI_SOCK_TRUSTED, NULL);
1308					kfree_skb(skb);
1309				}
1310			}
1311
1312			/* Send event to monitor */
1313			skb = create_monitor_ctrl_open(sk);
1314			if (skb) {
1315				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1316						    HCI_SOCK_TRUSTED, NULL);
1317				kfree_skb(skb);
1318			}
1319
1320			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1321			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1322			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1323			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1324			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1325			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1326		}
1327		break;
1328	}
1329
1330	sk->sk_state = BT_BOUND;
1331
1332done:
1333	release_sock(sk);
1334	return err;
1335}
1336
1337static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1338			    int *addr_len, int peer)
1339{
1340	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1341	struct sock *sk = sock->sk;
1342	struct hci_dev *hdev;
1343	int err = 0;
1344
1345	BT_DBG("sock %p sk %p", sock, sk);
1346
1347	if (peer)
1348		return -EOPNOTSUPP;
1349
1350	lock_sock(sk);
1351
1352	hdev = hci_pi(sk)->hdev;
1353	if (!hdev) {
1354		err = -EBADFD;
1355		goto done;
1356	}
1357
1358	*addr_len = sizeof(*haddr);
1359	haddr->hci_family = AF_BLUETOOTH;
1360	haddr->hci_dev    = hdev->id;
1361	haddr->hci_channel= hci_pi(sk)->channel;
 
1362
1363done:
1364	release_sock(sk);
1365	return err;
1366}
1367
1368static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1369			  struct sk_buff *skb)
1370{
1371	__u32 mask = hci_pi(sk)->cmsg_mask;
1372
1373	if (mask & HCI_CMSG_DIR) {
1374		int incoming = bt_cb(skb)->incoming;
1375		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1376			 &incoming);
1377	}
1378
1379	if (mask & HCI_CMSG_TSTAMP) {
1380#ifdef CONFIG_COMPAT
1381		struct compat_timeval ctv;
1382#endif
1383		struct timeval tv;
1384		void *data;
1385		int len;
1386
1387		skb_get_timestamp(skb, &tv);
1388
1389		data = &tv;
1390		len = sizeof(tv);
1391#ifdef CONFIG_COMPAT
1392		if (!COMPAT_USE_64BIT_TIME &&
1393		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1394			ctv.tv_sec = tv.tv_sec;
1395			ctv.tv_usec = tv.tv_usec;
1396			data = &ctv;
1397			len = sizeof(ctv);
1398		}
1399#endif
1400
1401		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1402	}
1403}
1404
1405static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1406			    size_t len, int flags)
1407{
1408	int noblock = flags & MSG_DONTWAIT;
1409	struct sock *sk = sock->sk;
1410	struct sk_buff *skb;
1411	int copied, err;
1412	unsigned int skblen;
1413
1414	BT_DBG("sock %p, sk %p", sock, sk);
1415
1416	if (flags & MSG_OOB)
1417		return -EOPNOTSUPP;
1418
1419	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1420		return -EOPNOTSUPP;
1421
1422	if (sk->sk_state == BT_CLOSED)
1423		return 0;
1424
1425	skb = skb_recv_datagram(sk, flags, noblock, &err);
1426	if (!skb)
1427		return err;
1428
1429	skblen = skb->len;
1430	copied = skb->len;
1431	if (len < copied) {
1432		msg->msg_flags |= MSG_TRUNC;
1433		copied = len;
1434	}
1435
1436	skb_reset_transport_header(skb);
1437	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1438
1439	switch (hci_pi(sk)->channel) {
1440	case HCI_CHANNEL_RAW:
1441		hci_sock_cmsg(sk, msg, skb);
1442		break;
1443	case HCI_CHANNEL_USER:
1444	case HCI_CHANNEL_MONITOR:
1445		sock_recv_timestamp(msg, sk, skb);
1446		break;
1447	default:
1448		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1449			sock_recv_timestamp(msg, sk, skb);
1450		break;
1451	}
1452
1453	skb_free_datagram(sk, skb);
1454
1455	if (flags & MSG_TRUNC)
1456		copied = skblen;
1457
1458	return err ? : copied;
1459}
1460
1461static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1462			struct msghdr *msg, size_t msglen)
1463{
1464	void *buf;
1465	u8 *cp;
1466	struct mgmt_hdr *hdr;
1467	u16 opcode, index, len;
1468	struct hci_dev *hdev = NULL;
1469	const struct hci_mgmt_handler *handler;
1470	bool var_len, no_hdev;
1471	int err;
1472
1473	BT_DBG("got %zu bytes", msglen);
1474
1475	if (msglen < sizeof(*hdr))
1476		return -EINVAL;
1477
1478	buf = kmalloc(msglen, GFP_KERNEL);
1479	if (!buf)
1480		return -ENOMEM;
1481
1482	if (memcpy_from_msg(buf, msg, msglen)) {
1483		err = -EFAULT;
1484		goto done;
1485	}
1486
1487	hdr = buf;
1488	opcode = __le16_to_cpu(hdr->opcode);
1489	index = __le16_to_cpu(hdr->index);
1490	len = __le16_to_cpu(hdr->len);
1491
1492	if (len != msglen - sizeof(*hdr)) {
1493		err = -EINVAL;
1494		goto done;
1495	}
1496
1497	if (chan->channel == HCI_CHANNEL_CONTROL) {
1498		struct sk_buff *skb;
1499
1500		/* Send event to monitor */
1501		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1502						  buf + sizeof(*hdr));
1503		if (skb) {
1504			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1505					    HCI_SOCK_TRUSTED, NULL);
1506			kfree_skb(skb);
1507		}
1508	}
1509
1510	if (opcode >= chan->handler_count ||
1511	    chan->handlers[opcode].func == NULL) {
1512		BT_DBG("Unknown op %u", opcode);
1513		err = mgmt_cmd_status(sk, index, opcode,
1514				      MGMT_STATUS_UNKNOWN_COMMAND);
1515		goto done;
1516	}
1517
1518	handler = &chan->handlers[opcode];
1519
1520	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1521	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1522		err = mgmt_cmd_status(sk, index, opcode,
1523				      MGMT_STATUS_PERMISSION_DENIED);
1524		goto done;
1525	}
1526
1527	if (index != MGMT_INDEX_NONE) {
1528		hdev = hci_dev_get(index);
1529		if (!hdev) {
1530			err = mgmt_cmd_status(sk, index, opcode,
1531					      MGMT_STATUS_INVALID_INDEX);
1532			goto done;
1533		}
1534
1535		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1536		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1537		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1538			err = mgmt_cmd_status(sk, index, opcode,
1539					      MGMT_STATUS_INVALID_INDEX);
1540			goto done;
1541		}
1542
1543		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1544		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1545			err = mgmt_cmd_status(sk, index, opcode,
1546					      MGMT_STATUS_INVALID_INDEX);
1547			goto done;
1548		}
1549	}
1550
1551	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1552	if (no_hdev != !hdev) {
1553		err = mgmt_cmd_status(sk, index, opcode,
1554				      MGMT_STATUS_INVALID_INDEX);
1555		goto done;
1556	}
1557
1558	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1559	if ((var_len && len < handler->data_len) ||
1560	    (!var_len && len != handler->data_len)) {
1561		err = mgmt_cmd_status(sk, index, opcode,
1562				      MGMT_STATUS_INVALID_PARAMS);
1563		goto done;
1564	}
1565
1566	if (hdev && chan->hdev_init)
1567		chan->hdev_init(sk, hdev);
1568
1569	cp = buf + sizeof(*hdr);
1570
1571	err = handler->func(sk, hdev, cp, len);
1572	if (err < 0)
1573		goto done;
1574
1575	err = msglen;
1576
1577done:
1578	if (hdev)
1579		hci_dev_put(hdev);
1580
1581	kfree(buf);
1582	return err;
1583}
1584
1585static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1586{
1587	struct hci_mon_hdr *hdr;
1588	struct sk_buff *skb;
1589	struct hci_dev *hdev;
1590	u16 index;
1591	int err;
1592
1593	/* The logging frame consists at minimum of the standard header,
1594	 * the priority byte, the ident length byte and at least one string
1595	 * terminator NUL byte. Anything shorter are invalid packets.
1596	 */
1597	if (len < sizeof(*hdr) + 3)
1598		return -EINVAL;
1599
1600	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1601	if (!skb)
1602		return err;
1603
1604	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1605		err = -EFAULT;
1606		goto drop;
1607	}
1608
1609	hdr = (void *)skb->data;
1610
1611	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1612		err = -EINVAL;
1613		goto drop;
1614	}
1615
1616	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1617		__u8 priority = skb->data[sizeof(*hdr)];
1618		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1619
1620		/* Only the priorities 0-7 are valid and with that any other
1621		 * value results in an invalid packet.
1622		 *
1623		 * The priority byte is followed by an ident length byte and
1624		 * the NUL terminated ident string. Check that the ident
1625		 * length is not overflowing the packet and also that the
1626		 * ident string itself is NUL terminated. In case the ident
1627		 * length is zero, the length value actually doubles as NUL
1628		 * terminator identifier.
1629		 *
1630		 * The message follows the ident string (if present) and
1631		 * must be NUL terminated. Otherwise it is not a valid packet.
1632		 */
1633		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1634		    ident_len > len - sizeof(*hdr) - 3 ||
1635		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1636			err = -EINVAL;
1637			goto drop;
1638		}
1639	} else {
1640		err = -EINVAL;
1641		goto drop;
1642	}
1643
1644	index = __le16_to_cpu(hdr->index);
1645
1646	if (index != MGMT_INDEX_NONE) {
1647		hdev = hci_dev_get(index);
1648		if (!hdev) {
1649			err = -ENODEV;
1650			goto drop;
1651		}
1652	} else {
1653		hdev = NULL;
1654	}
1655
1656	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1657
1658	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1659	err = len;
1660
1661	if (hdev)
1662		hci_dev_put(hdev);
1663
1664drop:
1665	kfree_skb(skb);
1666	return err;
1667}
1668
1669static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1670			    size_t len)
1671{
1672	struct sock *sk = sock->sk;
1673	struct hci_mgmt_chan *chan;
1674	struct hci_dev *hdev;
1675	struct sk_buff *skb;
1676	int err;
1677
1678	BT_DBG("sock %p sk %p", sock, sk);
1679
1680	if (msg->msg_flags & MSG_OOB)
1681		return -EOPNOTSUPP;
1682
1683	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
 
1684		return -EINVAL;
1685
1686	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1687		return -EINVAL;
1688
1689	lock_sock(sk);
1690
1691	switch (hci_pi(sk)->channel) {
1692	case HCI_CHANNEL_RAW:
1693	case HCI_CHANNEL_USER:
1694		break;
1695	case HCI_CHANNEL_MONITOR:
1696		err = -EOPNOTSUPP;
1697		goto done;
1698	case HCI_CHANNEL_LOGGING:
1699		err = hci_logging_frame(sk, msg, len);
1700		goto done;
1701	default:
1702		mutex_lock(&mgmt_chan_list_lock);
1703		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1704		if (chan)
1705			err = hci_mgmt_cmd(chan, sk, msg, len);
1706		else
1707			err = -EINVAL;
1708
1709		mutex_unlock(&mgmt_chan_list_lock);
1710		goto done;
1711	}
1712
1713	hdev = hci_pi(sk)->hdev;
1714	if (!hdev) {
1715		err = -EBADFD;
1716		goto done;
1717	}
1718
1719	if (!test_bit(HCI_UP, &hdev->flags)) {
1720		err = -ENETDOWN;
1721		goto done;
1722	}
1723
1724	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1725	if (!skb)
1726		goto done;
1727
1728	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1729		err = -EFAULT;
1730		goto drop;
1731	}
1732
1733	hci_skb_pkt_type(skb) = skb->data[0];
1734	skb_pull(skb, 1);
1735
1736	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1737		/* No permission check is needed for user channel
1738		 * since that gets enforced when binding the socket.
1739		 *
1740		 * However check that the packet type is valid.
1741		 */
1742		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1743		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1744		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1745			err = -EINVAL;
1746			goto drop;
1747		}
1748
1749		skb_queue_tail(&hdev->raw_q, skb);
1750		queue_work(hdev->workqueue, &hdev->tx_work);
1751	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1752		u16 opcode = get_unaligned_le16(skb->data);
1753		u16 ogf = hci_opcode_ogf(opcode);
1754		u16 ocf = hci_opcode_ocf(opcode);
1755
1756		if (((ogf > HCI_SFLT_MAX_OGF) ||
1757		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1758				   &hci_sec_filter.ocf_mask[ogf])) &&
1759		    !capable(CAP_NET_RAW)) {
1760			err = -EPERM;
1761			goto drop;
1762		}
1763
1764		/* Since the opcode has already been extracted here, store
1765		 * a copy of the value for later use by the drivers.
1766		 */
1767		hci_skb_opcode(skb) = opcode;
1768
1769		if (ogf == 0x3f) {
1770			skb_queue_tail(&hdev->raw_q, skb);
1771			queue_work(hdev->workqueue, &hdev->tx_work);
1772		} else {
1773			/* Stand-alone HCI commands must be flagged as
1774			 * single-command requests.
1775			 */
1776			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1777
1778			skb_queue_tail(&hdev->cmd_q, skb);
1779			queue_work(hdev->workqueue, &hdev->cmd_work);
1780		}
1781	} else {
1782		if (!capable(CAP_NET_RAW)) {
1783			err = -EPERM;
1784			goto drop;
1785		}
1786
1787		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1788		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1789			err = -EINVAL;
1790			goto drop;
1791		}
1792
1793		skb_queue_tail(&hdev->raw_q, skb);
1794		queue_work(hdev->workqueue, &hdev->tx_work);
1795	}
1796
1797	err = len;
1798
1799done:
1800	release_sock(sk);
1801	return err;
1802
1803drop:
1804	kfree_skb(skb);
1805	goto done;
1806}
1807
1808static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1809			       char __user *optval, unsigned int len)
1810{
1811	struct hci_ufilter uf = { .opcode = 0 };
1812	struct sock *sk = sock->sk;
1813	int err = 0, opt = 0;
1814
1815	BT_DBG("sk %p, opt %d", sk, optname);
1816
1817	if (level != SOL_HCI)
1818		return -ENOPROTOOPT;
1819
1820	lock_sock(sk);
1821
1822	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1823		err = -EBADFD;
1824		goto done;
1825	}
1826
1827	switch (optname) {
1828	case HCI_DATA_DIR:
1829		if (get_user(opt, (int __user *)optval)) {
1830			err = -EFAULT;
1831			break;
1832		}
1833
1834		if (opt)
1835			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1836		else
1837			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1838		break;
1839
1840	case HCI_TIME_STAMP:
1841		if (get_user(opt, (int __user *)optval)) {
1842			err = -EFAULT;
1843			break;
1844		}
1845
1846		if (opt)
1847			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1848		else
1849			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1850		break;
1851
1852	case HCI_FILTER:
1853		{
1854			struct hci_filter *f = &hci_pi(sk)->filter;
1855
1856			uf.type_mask = f->type_mask;
1857			uf.opcode    = f->opcode;
1858			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1859			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1860		}
1861
1862		len = min_t(unsigned int, len, sizeof(uf));
1863		if (copy_from_user(&uf, optval, len)) {
1864			err = -EFAULT;
1865			break;
1866		}
1867
1868		if (!capable(CAP_NET_RAW)) {
1869			uf.type_mask &= hci_sec_filter.type_mask;
1870			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1871			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1872		}
1873
1874		{
1875			struct hci_filter *f = &hci_pi(sk)->filter;
1876
1877			f->type_mask = uf.type_mask;
1878			f->opcode    = uf.opcode;
1879			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1880			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1881		}
1882		break;
1883
1884	default:
1885		err = -ENOPROTOOPT;
1886		break;
1887	}
1888
1889done:
1890	release_sock(sk);
1891	return err;
1892}
1893
1894static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1895			       char __user *optval, int __user *optlen)
1896{
1897	struct hci_ufilter uf;
1898	struct sock *sk = sock->sk;
1899	int len, opt, err = 0;
1900
1901	BT_DBG("sk %p, opt %d", sk, optname);
1902
1903	if (level != SOL_HCI)
1904		return -ENOPROTOOPT;
1905
1906	if (get_user(len, optlen))
1907		return -EFAULT;
1908
1909	lock_sock(sk);
1910
1911	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1912		err = -EBADFD;
1913		goto done;
1914	}
1915
1916	switch (optname) {
1917	case HCI_DATA_DIR:
1918		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1919			opt = 1;
1920		else
1921			opt = 0;
1922
1923		if (put_user(opt, optval))
1924			err = -EFAULT;
1925		break;
1926
1927	case HCI_TIME_STAMP:
1928		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1929			opt = 1;
1930		else
1931			opt = 0;
1932
1933		if (put_user(opt, optval))
1934			err = -EFAULT;
1935		break;
1936
1937	case HCI_FILTER:
1938		{
1939			struct hci_filter *f = &hci_pi(sk)->filter;
1940
1941			memset(&uf, 0, sizeof(uf));
1942			uf.type_mask = f->type_mask;
1943			uf.opcode    = f->opcode;
1944			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1945			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1946		}
1947
1948		len = min_t(unsigned int, len, sizeof(uf));
1949		if (copy_to_user(optval, &uf, len))
1950			err = -EFAULT;
1951		break;
1952
1953	default:
1954		err = -ENOPROTOOPT;
1955		break;
1956	}
1957
1958done:
1959	release_sock(sk);
1960	return err;
1961}
1962
1963static const struct proto_ops hci_sock_ops = {
1964	.family		= PF_BLUETOOTH,
1965	.owner		= THIS_MODULE,
1966	.release	= hci_sock_release,
1967	.bind		= hci_sock_bind,
1968	.getname	= hci_sock_getname,
1969	.sendmsg	= hci_sock_sendmsg,
1970	.recvmsg	= hci_sock_recvmsg,
1971	.ioctl		= hci_sock_ioctl,
1972	.poll		= datagram_poll,
1973	.listen		= sock_no_listen,
1974	.shutdown	= sock_no_shutdown,
1975	.setsockopt	= hci_sock_setsockopt,
1976	.getsockopt	= hci_sock_getsockopt,
1977	.connect	= sock_no_connect,
1978	.socketpair	= sock_no_socketpair,
1979	.accept		= sock_no_accept,
1980	.mmap		= sock_no_mmap
1981};
1982
1983static struct proto hci_sk_proto = {
1984	.name		= "HCI",
1985	.owner		= THIS_MODULE,
1986	.obj_size	= sizeof(struct hci_pinfo)
1987};
1988
1989static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1990			   int kern)
1991{
1992	struct sock *sk;
1993
1994	BT_DBG("sock %p", sock);
1995
1996	if (sock->type != SOCK_RAW)
1997		return -ESOCKTNOSUPPORT;
1998
1999	sock->ops = &hci_sock_ops;
2000
2001	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2002	if (!sk)
2003		return -ENOMEM;
2004
2005	sock_init_data(sock, sk);
2006
2007	sock_reset_flag(sk, SOCK_ZAPPED);
2008
2009	sk->sk_protocol = protocol;
2010
2011	sock->state = SS_UNCONNECTED;
2012	sk->sk_state = BT_OPEN;
2013
2014	bt_sock_link(&hci_sk_list, sk);
2015	return 0;
2016}
2017
2018static const struct net_proto_family hci_sock_family_ops = {
2019	.family	= PF_BLUETOOTH,
2020	.owner	= THIS_MODULE,
2021	.create	= hci_sock_create,
2022};
2023
2024int __init hci_sock_init(void)
2025{
2026	int err;
2027
2028	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2029
2030	err = proto_register(&hci_sk_proto, 0);
2031	if (err < 0)
2032		return err;
2033
2034	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2035	if (err < 0) {
2036		BT_ERR("HCI socket registration failed");
2037		goto error;
2038	}
2039
2040	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2041	if (err < 0) {
2042		BT_ERR("Failed to create HCI proc file");
2043		bt_sock_unregister(BTPROTO_HCI);
2044		goto error;
2045	}
2046
2047	BT_INFO("HCI socket layer initialized");
2048
2049	return 0;
2050
2051error:
2052	proto_unregister(&hci_sk_proto);
2053	return err;
2054}
2055
2056void hci_sock_cleanup(void)
2057{
2058	bt_procfs_cleanup(&init_net, "hci");
2059	bt_sock_unregister(BTPROTO_HCI);
2060	proto_unregister(&hci_sk_proto);
2061}