Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
 
  29#include <asm/unaligned.h>
  30
  31#include <net/bluetooth/bluetooth.h>
  32#include <net/bluetooth/hci_core.h>
  33#include <net/bluetooth/hci_mon.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "mgmt_util.h"
  37
  38static LIST_HEAD(mgmt_chan_list);
  39static DEFINE_MUTEX(mgmt_chan_list_lock);
  40
 
 
  41static atomic_t monitor_promisc = ATOMIC_INIT(0);
  42
  43/* ----- HCI socket interface ----- */
  44
  45/* Socket info */
  46#define hci_pi(sk) ((struct hci_pinfo *) sk)
  47
  48struct hci_pinfo {
  49	struct bt_sock    bt;
  50	struct hci_dev    *hdev;
  51	struct hci_filter filter;
  52	__u32             cmsg_mask;
  53	unsigned short    channel;
  54	unsigned long     flags;
 
 
  55};
  56
  57void hci_sock_set_flag(struct sock *sk, int nr)
  58{
  59	set_bit(nr, &hci_pi(sk)->flags);
  60}
  61
  62void hci_sock_clear_flag(struct sock *sk, int nr)
  63{
  64	clear_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67int hci_sock_test_flag(struct sock *sk, int nr)
  68{
  69	return test_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72unsigned short hci_sock_get_channel(struct sock *sk)
  73{
  74	return hci_pi(sk)->channel;
  75}
  76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77static inline int hci_test_bit(int nr, const void *addr)
  78{
  79	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
  80}
  81
  82/* Security filter */
  83#define HCI_SFLT_MAX_OGF  5
  84
  85struct hci_sec_filter {
  86	__u32 type_mask;
  87	__u32 event_mask[2];
  88	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
  89};
  90
  91static const struct hci_sec_filter hci_sec_filter = {
  92	/* Packet types */
  93	0x10,
  94	/* Events */
  95	{ 0x1000d9fe, 0x0000b00c },
  96	/* Commands */
  97	{
  98		{ 0x0 },
  99		/* OGF_LINK_CTL */
 100		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 101		/* OGF_LINK_POLICY */
 102		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 103		/* OGF_HOST_CTL */
 104		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 105		/* OGF_INFO_PARAM */
 106		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 107		/* OGF_STATUS_PARAM */
 108		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 109	}
 110};
 111
 112static struct bt_sock_list hci_sk_list = {
 113	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 114};
 115
 116static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 117{
 118	struct hci_filter *flt;
 119	int flt_type, flt_event;
 120
 121	/* Apply filter */
 122	flt = &hci_pi(sk)->filter;
 123
 124	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 125
 126	if (!test_bit(flt_type, &flt->type_mask))
 127		return true;
 128
 129	/* Extra filter for event packets only */
 130	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 131		return false;
 132
 133	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 134
 135	if (!hci_test_bit(flt_event, &flt->event_mask))
 136		return true;
 137
 138	/* Check filter only when opcode is set */
 139	if (!flt->opcode)
 140		return false;
 141
 142	if (flt_event == HCI_EV_CMD_COMPLETE &&
 143	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 144		return true;
 145
 146	if (flt_event == HCI_EV_CMD_STATUS &&
 147	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 148		return true;
 149
 150	return false;
 151}
 152
 153/* Send frame to RAW socket */
 154void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 155{
 156	struct sock *sk;
 157	struct sk_buff *skb_copy = NULL;
 158
 159	BT_DBG("hdev %p len %d", hdev, skb->len);
 160
 161	read_lock(&hci_sk_list.lock);
 162
 163	sk_for_each(sk, &hci_sk_list.head) {
 164		struct sk_buff *nskb;
 165
 166		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 167			continue;
 168
 169		/* Don't send frame to the socket it came from */
 170		if (skb->sk == sk)
 171			continue;
 172
 173		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 174			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 175			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 176			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 177			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 178				continue;
 179			if (is_filtered_packet(sk, skb))
 180				continue;
 181		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 182			if (!bt_cb(skb)->incoming)
 183				continue;
 184			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 185			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 186			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 187				continue;
 188		} else {
 189			/* Don't send frame to other channel types */
 190			continue;
 191		}
 192
 193		if (!skb_copy) {
 194			/* Create a private copy with headroom */
 195			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 196			if (!skb_copy)
 197				continue;
 198
 199			/* Put type byte before the data */
 200			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 201		}
 202
 203		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 204		if (!nskb)
 205			continue;
 206
 207		if (sock_queue_rcv_skb(sk, nskb))
 208			kfree_skb(nskb);
 209	}
 210
 211	read_unlock(&hci_sk_list.lock);
 212
 213	kfree_skb(skb_copy);
 214}
 215
 216/* Send frame to sockets with specific channel */
 217void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 218			 int flag, struct sock *skip_sk)
 219{
 220	struct sock *sk;
 221
 222	BT_DBG("channel %u len %d", channel, skb->len);
 223
 224	read_lock(&hci_sk_list.lock);
 225
 226	sk_for_each(sk, &hci_sk_list.head) {
 227		struct sk_buff *nskb;
 228
 229		/* Ignore socket without the flag set */
 230		if (!hci_sock_test_flag(sk, flag))
 231			continue;
 232
 233		/* Skip the original socket */
 234		if (sk == skip_sk)
 235			continue;
 236
 237		if (sk->sk_state != BT_BOUND)
 238			continue;
 239
 240		if (hci_pi(sk)->channel != channel)
 241			continue;
 242
 243		nskb = skb_clone(skb, GFP_ATOMIC);
 244		if (!nskb)
 245			continue;
 246
 247		if (sock_queue_rcv_skb(sk, nskb))
 248			kfree_skb(nskb);
 249	}
 250
 
 
 
 
 
 
 
 251	read_unlock(&hci_sk_list.lock);
 252}
 253
 254/* Send frame to monitor socket */
 255void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 256{
 257	struct sk_buff *skb_copy = NULL;
 258	struct hci_mon_hdr *hdr;
 259	__le16 opcode;
 260
 261	if (!atomic_read(&monitor_promisc))
 262		return;
 263
 264	BT_DBG("hdev %p len %d", hdev, skb->len);
 265
 266	switch (hci_skb_pkt_type(skb)) {
 267	case HCI_COMMAND_PKT:
 268		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 269		break;
 270	case HCI_EVENT_PKT:
 271		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 272		break;
 273	case HCI_ACLDATA_PKT:
 274		if (bt_cb(skb)->incoming)
 275			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 276		else
 277			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 278		break;
 279	case HCI_SCODATA_PKT:
 280		if (bt_cb(skb)->incoming)
 281			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 282		else
 283			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 284		break;
 
 
 
 
 
 
 285	case HCI_DIAG_PKT:
 286		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 287		break;
 288	default:
 289		return;
 290	}
 291
 292	/* Create a private copy with headroom */
 293	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 294	if (!skb_copy)
 295		return;
 296
 297	/* Put header before the data */
 298	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 299	hdr->opcode = opcode;
 300	hdr->index = cpu_to_le16(hdev->id);
 301	hdr->len = cpu_to_le16(skb->len);
 302
 303	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 304			    HCI_SOCK_TRUSTED, NULL);
 305	kfree_skb(skb_copy);
 306}
 307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 309{
 310	struct hci_mon_hdr *hdr;
 311	struct hci_mon_new_index *ni;
 312	struct hci_mon_index_info *ii;
 313	struct sk_buff *skb;
 314	__le16 opcode;
 315
 316	switch (event) {
 317	case HCI_DEV_REG:
 318		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 319		if (!skb)
 320			return NULL;
 321
 322		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 323		ni->type = hdev->dev_type;
 324		ni->bus = hdev->bus;
 325		bacpy(&ni->bdaddr, &hdev->bdaddr);
 326		memcpy(ni->name, hdev->name, 8);
 327
 328		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 329		break;
 330
 331	case HCI_DEV_UNREG:
 332		skb = bt_skb_alloc(0, GFP_ATOMIC);
 333		if (!skb)
 334			return NULL;
 335
 336		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 337		break;
 338
 339	case HCI_DEV_SETUP:
 340		if (hdev->manufacturer == 0xffff)
 341			return NULL;
 342
 343		/* fall through */
 344
 345	case HCI_DEV_UP:
 346		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 347		if (!skb)
 348			return NULL;
 349
 350		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 351		bacpy(&ii->bdaddr, &hdev->bdaddr);
 352		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 353
 354		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 355		break;
 356
 357	case HCI_DEV_OPEN:
 358		skb = bt_skb_alloc(0, GFP_ATOMIC);
 359		if (!skb)
 360			return NULL;
 361
 362		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 363		break;
 364
 365	case HCI_DEV_CLOSE:
 366		skb = bt_skb_alloc(0, GFP_ATOMIC);
 367		if (!skb)
 368			return NULL;
 369
 370		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 371		break;
 372
 373	default:
 374		return NULL;
 375	}
 376
 377	__net_timestamp(skb);
 378
 379	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 380	hdr->opcode = opcode;
 381	hdr->index = cpu_to_le16(hdev->id);
 382	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 383
 384	return skb;
 385}
 386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387static void __printf(2, 3)
 388send_monitor_note(struct sock *sk, const char *fmt, ...)
 389{
 390	size_t len;
 391	struct hci_mon_hdr *hdr;
 392	struct sk_buff *skb;
 393	va_list args;
 394
 395	va_start(args, fmt);
 396	len = vsnprintf(NULL, 0, fmt, args);
 397	va_end(args);
 398
 399	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 400	if (!skb)
 401		return;
 402
 403	va_start(args, fmt);
 404	vsprintf(skb_put(skb, len), fmt, args);
 405	*skb_put(skb, 1) = 0;
 406	va_end(args);
 407
 408	__net_timestamp(skb);
 409
 410	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 411	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 412	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 413	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 414
 415	if (sock_queue_rcv_skb(sk, skb))
 416		kfree_skb(skb);
 417}
 418
 419static void send_monitor_replay(struct sock *sk)
 420{
 421	struct hci_dev *hdev;
 422
 423	read_lock(&hci_dev_list_lock);
 424
 425	list_for_each_entry(hdev, &hci_dev_list, list) {
 426		struct sk_buff *skb;
 427
 428		skb = create_monitor_event(hdev, HCI_DEV_REG);
 429		if (!skb)
 430			continue;
 431
 432		if (sock_queue_rcv_skb(sk, skb))
 433			kfree_skb(skb);
 434
 435		if (!test_bit(HCI_RUNNING, &hdev->flags))
 436			continue;
 437
 438		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 439		if (!skb)
 440			continue;
 441
 442		if (sock_queue_rcv_skb(sk, skb))
 443			kfree_skb(skb);
 444
 445		if (test_bit(HCI_UP, &hdev->flags))
 446			skb = create_monitor_event(hdev, HCI_DEV_UP);
 447		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 448			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 449		else
 450			skb = NULL;
 451
 452		if (skb) {
 453			if (sock_queue_rcv_skb(sk, skb))
 454				kfree_skb(skb);
 455		}
 456	}
 457
 458	read_unlock(&hci_dev_list_lock);
 459}
 460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461/* Generate internal stack event */
 462static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 463{
 464	struct hci_event_hdr *hdr;
 465	struct hci_ev_stack_internal *ev;
 466	struct sk_buff *skb;
 467
 468	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 469	if (!skb)
 470		return;
 471
 472	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 473	hdr->evt  = HCI_EV_STACK_INTERNAL;
 474	hdr->plen = sizeof(*ev) + dlen;
 475
 476	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 477	ev->type = type;
 478	memcpy(ev->data, data, dlen);
 479
 480	bt_cb(skb)->incoming = 1;
 481	__net_timestamp(skb);
 482
 483	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 484	hci_send_to_sock(hdev, skb);
 485	kfree_skb(skb);
 486}
 487
 488void hci_sock_dev_event(struct hci_dev *hdev, int event)
 489{
 490	BT_DBG("hdev %s event %d", hdev->name, event);
 491
 492	if (atomic_read(&monitor_promisc)) {
 493		struct sk_buff *skb;
 494
 495		/* Send event to monitor */
 496		skb = create_monitor_event(hdev, event);
 497		if (skb) {
 498			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 499					    HCI_SOCK_TRUSTED, NULL);
 500			kfree_skb(skb);
 501		}
 502	}
 503
 504	if (event <= HCI_DEV_DOWN) {
 505		struct hci_ev_si_device ev;
 506
 507		/* Send event to sockets */
 508		ev.event  = event;
 509		ev.dev_id = hdev->id;
 510		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 511	}
 512
 513	if (event == HCI_DEV_UNREG) {
 514		struct sock *sk;
 515
 516		/* Detach sockets from device */
 517		read_lock(&hci_sk_list.lock);
 518		sk_for_each(sk, &hci_sk_list.head) {
 519			bh_lock_sock_nested(sk);
 520			if (hci_pi(sk)->hdev == hdev) {
 521				hci_pi(sk)->hdev = NULL;
 522				sk->sk_err = EPIPE;
 523				sk->sk_state = BT_OPEN;
 524				sk->sk_state_change(sk);
 525
 526				hci_dev_put(hdev);
 527			}
 528			bh_unlock_sock(sk);
 529		}
 530		read_unlock(&hci_sk_list.lock);
 531	}
 532}
 533
 534static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 535{
 536	struct hci_mgmt_chan *c;
 537
 538	list_for_each_entry(c, &mgmt_chan_list, list) {
 539		if (c->channel == channel)
 540			return c;
 541	}
 542
 543	return NULL;
 544}
 545
 546static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 547{
 548	struct hci_mgmt_chan *c;
 549
 550	mutex_lock(&mgmt_chan_list_lock);
 551	c = __hci_mgmt_chan_find(channel);
 552	mutex_unlock(&mgmt_chan_list_lock);
 553
 554	return c;
 555}
 556
 557int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 558{
 559	if (c->channel < HCI_CHANNEL_CONTROL)
 560		return -EINVAL;
 561
 562	mutex_lock(&mgmt_chan_list_lock);
 563	if (__hci_mgmt_chan_find(c->channel)) {
 564		mutex_unlock(&mgmt_chan_list_lock);
 565		return -EALREADY;
 566	}
 567
 568	list_add_tail(&c->list, &mgmt_chan_list);
 569
 570	mutex_unlock(&mgmt_chan_list_lock);
 571
 572	return 0;
 573}
 574EXPORT_SYMBOL(hci_mgmt_chan_register);
 575
 576void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 577{
 578	mutex_lock(&mgmt_chan_list_lock);
 579	list_del(&c->list);
 580	mutex_unlock(&mgmt_chan_list_lock);
 581}
 582EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 583
 584static int hci_sock_release(struct socket *sock)
 585{
 586	struct sock *sk = sock->sk;
 587	struct hci_dev *hdev;
 
 588
 589	BT_DBG("sock %p sk %p", sock, sk);
 590
 591	if (!sk)
 592		return 0;
 593
 594	hdev = hci_pi(sk)->hdev;
 595
 596	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
 
 597		atomic_dec(&monitor_promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598
 599	bt_sock_unlink(&hci_sk_list, sk);
 600
 
 601	if (hdev) {
 602		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 603			/* When releasing an user channel exclusive access,
 604			 * call hci_dev_do_close directly instead of calling
 605			 * hci_dev_close to ensure the exclusive access will
 606			 * be released and the controller brought back down.
 607			 *
 608			 * The checking of HCI_AUTO_OFF is not needed in this
 609			 * case since it will have been cleared already when
 610			 * opening the user channel.
 611			 */
 612			hci_dev_do_close(hdev);
 613			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 614			mgmt_index_added(hdev);
 615		}
 616
 617		atomic_dec(&hdev->promisc);
 618		hci_dev_put(hdev);
 619	}
 620
 621	sock_orphan(sk);
 622
 623	skb_queue_purge(&sk->sk_receive_queue);
 624	skb_queue_purge(&sk->sk_write_queue);
 625
 
 626	sock_put(sk);
 627	return 0;
 628}
 629
 630static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 631{
 632	bdaddr_t bdaddr;
 633	int err;
 634
 635	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 636		return -EFAULT;
 637
 638	hci_dev_lock(hdev);
 639
 640	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 641
 642	hci_dev_unlock(hdev);
 643
 644	return err;
 645}
 646
 647static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 648{
 649	bdaddr_t bdaddr;
 650	int err;
 651
 652	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 653		return -EFAULT;
 654
 655	hci_dev_lock(hdev);
 656
 657	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 658
 659	hci_dev_unlock(hdev);
 660
 661	return err;
 662}
 663
 664/* Ioctls that require bound socket */
 665static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 666				unsigned long arg)
 667{
 668	struct hci_dev *hdev = hci_pi(sk)->hdev;
 669
 670	if (!hdev)
 671		return -EBADFD;
 672
 673	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 674		return -EBUSY;
 675
 676	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 677		return -EOPNOTSUPP;
 678
 679	if (hdev->dev_type != HCI_BREDR)
 680		return -EOPNOTSUPP;
 681
 682	switch (cmd) {
 683	case HCISETRAW:
 684		if (!capable(CAP_NET_ADMIN))
 685			return -EPERM;
 686		return -EOPNOTSUPP;
 687
 688	case HCIGETCONNINFO:
 689		return hci_get_conn_info(hdev, (void __user *)arg);
 690
 691	case HCIGETAUTHINFO:
 692		return hci_get_auth_info(hdev, (void __user *)arg);
 693
 694	case HCIBLOCKADDR:
 695		if (!capable(CAP_NET_ADMIN))
 696			return -EPERM;
 697		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 698
 699	case HCIUNBLOCKADDR:
 700		if (!capable(CAP_NET_ADMIN))
 701			return -EPERM;
 702		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 703	}
 704
 705	return -ENOIOCTLCMD;
 706}
 707
 708static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 709			  unsigned long arg)
 710{
 711	void __user *argp = (void __user *)arg;
 712	struct sock *sk = sock->sk;
 713	int err;
 714
 715	BT_DBG("cmd %x arg %lx", cmd, arg);
 716
 717	lock_sock(sk);
 718
 719	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 720		err = -EBADFD;
 721		goto done;
 722	}
 723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724	release_sock(sk);
 725
 726	switch (cmd) {
 727	case HCIGETDEVLIST:
 728		return hci_get_dev_list(argp);
 729
 730	case HCIGETDEVINFO:
 731		return hci_get_dev_info(argp);
 732
 733	case HCIGETCONNLIST:
 734		return hci_get_conn_list(argp);
 735
 736	case HCIDEVUP:
 737		if (!capable(CAP_NET_ADMIN))
 738			return -EPERM;
 739		return hci_dev_open(arg);
 740
 741	case HCIDEVDOWN:
 742		if (!capable(CAP_NET_ADMIN))
 743			return -EPERM;
 744		return hci_dev_close(arg);
 745
 746	case HCIDEVRESET:
 747		if (!capable(CAP_NET_ADMIN))
 748			return -EPERM;
 749		return hci_dev_reset(arg);
 750
 751	case HCIDEVRESTAT:
 752		if (!capable(CAP_NET_ADMIN))
 753			return -EPERM;
 754		return hci_dev_reset_stat(arg);
 755
 756	case HCISETSCAN:
 757	case HCISETAUTH:
 758	case HCISETENCRYPT:
 759	case HCISETPTYPE:
 760	case HCISETLINKPOL:
 761	case HCISETLINKMODE:
 762	case HCISETACLMTU:
 763	case HCISETSCOMTU:
 764		if (!capable(CAP_NET_ADMIN))
 765			return -EPERM;
 766		return hci_dev_cmd(cmd, argp);
 767
 768	case HCIINQUIRY:
 769		return hci_inquiry(argp);
 770	}
 771
 772	lock_sock(sk);
 773
 774	err = hci_sock_bound_ioctl(sk, cmd, arg);
 775
 776done:
 777	release_sock(sk);
 778	return err;
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 782			 int addr_len)
 783{
 784	struct sockaddr_hci haddr;
 785	struct sock *sk = sock->sk;
 786	struct hci_dev *hdev = NULL;
 
 787	int len, err = 0;
 788
 789	BT_DBG("sock %p sk %p", sock, sk);
 790
 791	if (!addr)
 792		return -EINVAL;
 793
 794	memset(&haddr, 0, sizeof(haddr));
 795	len = min_t(unsigned int, sizeof(haddr), addr_len);
 796	memcpy(&haddr, addr, len);
 797
 798	if (haddr.hci_family != AF_BLUETOOTH)
 799		return -EINVAL;
 800
 801	lock_sock(sk);
 802
 803	if (sk->sk_state == BT_BOUND) {
 804		err = -EALREADY;
 805		goto done;
 806	}
 807
 808	switch (haddr.hci_channel) {
 809	case HCI_CHANNEL_RAW:
 810		if (hci_pi(sk)->hdev) {
 811			err = -EALREADY;
 812			goto done;
 813		}
 814
 815		if (haddr.hci_dev != HCI_DEV_NONE) {
 816			hdev = hci_dev_get(haddr.hci_dev);
 817			if (!hdev) {
 818				err = -ENODEV;
 819				goto done;
 820			}
 821
 822			atomic_inc(&hdev->promisc);
 823		}
 824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 826		break;
 827
 828	case HCI_CHANNEL_USER:
 829		if (hci_pi(sk)->hdev) {
 830			err = -EALREADY;
 831			goto done;
 832		}
 833
 834		if (haddr.hci_dev == HCI_DEV_NONE) {
 835			err = -EINVAL;
 836			goto done;
 837		}
 838
 839		if (!capable(CAP_NET_ADMIN)) {
 840			err = -EPERM;
 841			goto done;
 842		}
 843
 844		hdev = hci_dev_get(haddr.hci_dev);
 845		if (!hdev) {
 846			err = -ENODEV;
 847			goto done;
 848		}
 849
 850		if (test_bit(HCI_INIT, &hdev->flags) ||
 851		    hci_dev_test_flag(hdev, HCI_SETUP) ||
 852		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 853		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
 854		     test_bit(HCI_UP, &hdev->flags))) {
 855			err = -EBUSY;
 856			hci_dev_put(hdev);
 857			goto done;
 858		}
 859
 860		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
 861			err = -EUSERS;
 862			hci_dev_put(hdev);
 863			goto done;
 864		}
 865
 866		mgmt_index_removed(hdev);
 867
 868		err = hci_dev_open(hdev->id);
 869		if (err) {
 870			if (err == -EALREADY) {
 871				/* In case the transport is already up and
 872				 * running, clear the error here.
 873				 *
 874				 * This can happen when opening an user
 875				 * channel and HCI_AUTO_OFF grace period
 876				 * is still active.
 877				 */
 878				err = 0;
 879			} else {
 880				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 881				mgmt_index_added(hdev);
 882				hci_dev_put(hdev);
 883				goto done;
 884			}
 885		}
 886
 887		atomic_inc(&hdev->promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 888
 889		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 
 
 890		break;
 891
 892	case HCI_CHANNEL_MONITOR:
 893		if (haddr.hci_dev != HCI_DEV_NONE) {
 894			err = -EINVAL;
 895			goto done;
 896		}
 897
 898		if (!capable(CAP_NET_RAW)) {
 899			err = -EPERM;
 900			goto done;
 901		}
 902
 
 
 903		/* The monitor interface is restricted to CAP_NET_RAW
 904		 * capabilities and with that implicitly trusted.
 905		 */
 906		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 907
 908		send_monitor_note(sk, "Linux version %s (%s)",
 909				  init_utsname()->release,
 910				  init_utsname()->machine);
 911		send_monitor_note(sk, "Bluetooth subsystem version %s",
 912				  BT_SUBSYS_VERSION);
 913		send_monitor_replay(sk);
 
 914
 915		atomic_inc(&monitor_promisc);
 916		break;
 917
 918	case HCI_CHANNEL_LOGGING:
 919		if (haddr.hci_dev != HCI_DEV_NONE) {
 920			err = -EINVAL;
 921			goto done;
 922		}
 923
 924		if (!capable(CAP_NET_ADMIN)) {
 925			err = -EPERM;
 926			goto done;
 927		}
 
 
 928		break;
 929
 930	default:
 931		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
 932			err = -EINVAL;
 933			goto done;
 934		}
 935
 936		if (haddr.hci_dev != HCI_DEV_NONE) {
 937			err = -EINVAL;
 938			goto done;
 939		}
 940
 941		/* Users with CAP_NET_ADMIN capabilities are allowed
 942		 * access to all management commands and events. For
 943		 * untrusted users the interface is restricted and
 944		 * also only untrusted events are sent.
 945		 */
 946		if (capable(CAP_NET_ADMIN))
 947			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 948
 
 
 949		/* At the moment the index and unconfigured index events
 950		 * are enabled unconditionally. Setting them on each
 951		 * socket when binding keeps this functionality. They
 952		 * however might be cleared later and then sending of these
 953		 * events will be disabled, but that is then intentional.
 954		 *
 955		 * This also enables generic events that are safe to be
 956		 * received by untrusted users. Example for such events
 957		 * are changes to settings, class of device, name etc.
 958		 */
 959		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
 961			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
 962			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
 
 
 
 963		}
 964		break;
 965	}
 966
 967
 968	hci_pi(sk)->channel = haddr.hci_channel;
 969	sk->sk_state = BT_BOUND;
 970
 971done:
 972	release_sock(sk);
 973	return err;
 974}
 975
 976static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 977			    int *addr_len, int peer)
 978{
 979	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
 980	struct sock *sk = sock->sk;
 981	struct hci_dev *hdev;
 982	int err = 0;
 983
 984	BT_DBG("sock %p sk %p", sock, sk);
 985
 986	if (peer)
 987		return -EOPNOTSUPP;
 988
 989	lock_sock(sk);
 990
 991	hdev = hci_pi(sk)->hdev;
 992	if (!hdev) {
 993		err = -EBADFD;
 994		goto done;
 995	}
 996
 997	*addr_len = sizeof(*haddr);
 998	haddr->hci_family = AF_BLUETOOTH;
 999	haddr->hci_dev    = hdev->id;
1000	haddr->hci_channel= hci_pi(sk)->channel;
 
1001
1002done:
1003	release_sock(sk);
1004	return err;
1005}
1006
1007static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1008			  struct sk_buff *skb)
1009{
1010	__u32 mask = hci_pi(sk)->cmsg_mask;
1011
1012	if (mask & HCI_CMSG_DIR) {
1013		int incoming = bt_cb(skb)->incoming;
1014		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1015			 &incoming);
1016	}
1017
1018	if (mask & HCI_CMSG_TSTAMP) {
1019#ifdef CONFIG_COMPAT
1020		struct compat_timeval ctv;
1021#endif
1022		struct timeval tv;
1023		void *data;
1024		int len;
1025
1026		skb_get_timestamp(skb, &tv);
1027
1028		data = &tv;
1029		len = sizeof(tv);
1030#ifdef CONFIG_COMPAT
1031		if (!COMPAT_USE_64BIT_TIME &&
1032		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1033			ctv.tv_sec = tv.tv_sec;
1034			ctv.tv_usec = tv.tv_usec;
1035			data = &ctv;
1036			len = sizeof(ctv);
1037		}
1038#endif
1039
1040		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1041	}
1042}
1043
1044static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1045			    size_t len, int flags)
1046{
1047	int noblock = flags & MSG_DONTWAIT;
1048	struct sock *sk = sock->sk;
1049	struct sk_buff *skb;
1050	int copied, err;
 
1051
1052	BT_DBG("sock %p, sk %p", sock, sk);
1053
1054	if (flags & MSG_OOB)
1055		return -EOPNOTSUPP;
1056
1057	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1058		return -EOPNOTSUPP;
1059
1060	if (sk->sk_state == BT_CLOSED)
1061		return 0;
1062
1063	skb = skb_recv_datagram(sk, flags, noblock, &err);
1064	if (!skb)
1065		return err;
1066
 
1067	copied = skb->len;
1068	if (len < copied) {
1069		msg->msg_flags |= MSG_TRUNC;
1070		copied = len;
1071	}
1072
1073	skb_reset_transport_header(skb);
1074	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1075
1076	switch (hci_pi(sk)->channel) {
1077	case HCI_CHANNEL_RAW:
1078		hci_sock_cmsg(sk, msg, skb);
1079		break;
1080	case HCI_CHANNEL_USER:
1081	case HCI_CHANNEL_MONITOR:
1082		sock_recv_timestamp(msg, sk, skb);
1083		break;
1084	default:
1085		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1086			sock_recv_timestamp(msg, sk, skb);
1087		break;
1088	}
1089
1090	skb_free_datagram(sk, skb);
1091
 
 
 
1092	return err ? : copied;
1093}
1094
1095static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1096			struct msghdr *msg, size_t msglen)
1097{
1098	void *buf;
1099	u8 *cp;
1100	struct mgmt_hdr *hdr;
1101	u16 opcode, index, len;
1102	struct hci_dev *hdev = NULL;
1103	const struct hci_mgmt_handler *handler;
1104	bool var_len, no_hdev;
1105	int err;
1106
1107	BT_DBG("got %zu bytes", msglen);
1108
1109	if (msglen < sizeof(*hdr))
1110		return -EINVAL;
1111
1112	buf = kmalloc(msglen, GFP_KERNEL);
1113	if (!buf)
1114		return -ENOMEM;
1115
1116	if (memcpy_from_msg(buf, msg, msglen)) {
1117		err = -EFAULT;
1118		goto done;
1119	}
1120
1121	hdr = buf;
1122	opcode = __le16_to_cpu(hdr->opcode);
1123	index = __le16_to_cpu(hdr->index);
1124	len = __le16_to_cpu(hdr->len);
1125
1126	if (len != msglen - sizeof(*hdr)) {
1127		err = -EINVAL;
1128		goto done;
1129	}
1130
 
 
 
 
 
 
 
 
 
 
 
 
 
1131	if (opcode >= chan->handler_count ||
1132	    chan->handlers[opcode].func == NULL) {
1133		BT_DBG("Unknown op %u", opcode);
1134		err = mgmt_cmd_status(sk, index, opcode,
1135				      MGMT_STATUS_UNKNOWN_COMMAND);
1136		goto done;
1137	}
1138
1139	handler = &chan->handlers[opcode];
1140
1141	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1142	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1143		err = mgmt_cmd_status(sk, index, opcode,
1144				      MGMT_STATUS_PERMISSION_DENIED);
1145		goto done;
1146	}
1147
1148	if (index != MGMT_INDEX_NONE) {
1149		hdev = hci_dev_get(index);
1150		if (!hdev) {
1151			err = mgmt_cmd_status(sk, index, opcode,
1152					      MGMT_STATUS_INVALID_INDEX);
1153			goto done;
1154		}
1155
1156		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1157		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1158		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1159			err = mgmt_cmd_status(sk, index, opcode,
1160					      MGMT_STATUS_INVALID_INDEX);
1161			goto done;
1162		}
1163
1164		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1165		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1166			err = mgmt_cmd_status(sk, index, opcode,
1167					      MGMT_STATUS_INVALID_INDEX);
1168			goto done;
1169		}
1170	}
1171
1172	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1173	if (no_hdev != !hdev) {
1174		err = mgmt_cmd_status(sk, index, opcode,
1175				      MGMT_STATUS_INVALID_INDEX);
1176		goto done;
 
 
1177	}
1178
1179	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1180	if ((var_len && len < handler->data_len) ||
1181	    (!var_len && len != handler->data_len)) {
1182		err = mgmt_cmd_status(sk, index, opcode,
1183				      MGMT_STATUS_INVALID_PARAMS);
1184		goto done;
1185	}
1186
1187	if (hdev && chan->hdev_init)
1188		chan->hdev_init(sk, hdev);
1189
1190	cp = buf + sizeof(*hdr);
1191
1192	err = handler->func(sk, hdev, cp, len);
1193	if (err < 0)
1194		goto done;
1195
1196	err = msglen;
1197
1198done:
1199	if (hdev)
1200		hci_dev_put(hdev);
1201
1202	kfree(buf);
1203	return err;
1204}
1205
1206static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1207{
1208	struct hci_mon_hdr *hdr;
1209	struct sk_buff *skb;
1210	struct hci_dev *hdev;
1211	u16 index;
1212	int err;
1213
1214	/* The logging frame consists at minimum of the standard header,
1215	 * the priority byte, the ident length byte and at least one string
1216	 * terminator NUL byte. Anything shorter are invalid packets.
1217	 */
1218	if (len < sizeof(*hdr) + 3)
1219		return -EINVAL;
1220
1221	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1222	if (!skb)
1223		return err;
1224
1225	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1226		err = -EFAULT;
1227		goto drop;
1228	}
1229
1230	hdr = (void *)skb->data;
1231
1232	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1233		err = -EINVAL;
1234		goto drop;
1235	}
1236
1237	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1238		__u8 priority = skb->data[sizeof(*hdr)];
1239		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1240
1241		/* Only the priorities 0-7 are valid and with that any other
1242		 * value results in an invalid packet.
1243		 *
1244		 * The priority byte is followed by an ident length byte and
1245		 * the NUL terminated ident string. Check that the ident
1246		 * length is not overflowing the packet and also that the
1247		 * ident string itself is NUL terminated. In case the ident
1248		 * length is zero, the length value actually doubles as NUL
1249		 * terminator identifier.
1250		 *
1251		 * The message follows the ident string (if present) and
1252		 * must be NUL terminated. Otherwise it is not a valid packet.
1253		 */
1254		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1255		    ident_len > len - sizeof(*hdr) - 3 ||
1256		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1257			err = -EINVAL;
1258			goto drop;
1259		}
1260	} else {
1261		err = -EINVAL;
1262		goto drop;
1263	}
1264
1265	index = __le16_to_cpu(hdr->index);
1266
1267	if (index != MGMT_INDEX_NONE) {
1268		hdev = hci_dev_get(index);
1269		if (!hdev) {
1270			err = -ENODEV;
1271			goto drop;
1272		}
1273	} else {
1274		hdev = NULL;
1275	}
1276
1277	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1278
1279	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1280	err = len;
1281
1282	if (hdev)
1283		hci_dev_put(hdev);
1284
1285drop:
1286	kfree_skb(skb);
1287	return err;
1288}
1289
1290static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1291			    size_t len)
1292{
1293	struct sock *sk = sock->sk;
1294	struct hci_mgmt_chan *chan;
1295	struct hci_dev *hdev;
1296	struct sk_buff *skb;
1297	int err;
1298
1299	BT_DBG("sock %p sk %p", sock, sk);
1300
1301	if (msg->msg_flags & MSG_OOB)
1302		return -EOPNOTSUPP;
1303
1304	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
 
1305		return -EINVAL;
1306
1307	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1308		return -EINVAL;
1309
1310	lock_sock(sk);
1311
1312	switch (hci_pi(sk)->channel) {
1313	case HCI_CHANNEL_RAW:
1314	case HCI_CHANNEL_USER:
1315		break;
1316	case HCI_CHANNEL_MONITOR:
1317		err = -EOPNOTSUPP;
1318		goto done;
1319	case HCI_CHANNEL_LOGGING:
1320		err = hci_logging_frame(sk, msg, len);
1321		goto done;
1322	default:
1323		mutex_lock(&mgmt_chan_list_lock);
1324		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1325		if (chan)
1326			err = hci_mgmt_cmd(chan, sk, msg, len);
1327		else
1328			err = -EINVAL;
1329
1330		mutex_unlock(&mgmt_chan_list_lock);
1331		goto done;
1332	}
1333
1334	hdev = hci_pi(sk)->hdev;
1335	if (!hdev) {
1336		err = -EBADFD;
1337		goto done;
1338	}
1339
1340	if (!test_bit(HCI_UP, &hdev->flags)) {
1341		err = -ENETDOWN;
1342		goto done;
1343	}
1344
1345	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1346	if (!skb)
1347		goto done;
1348
1349	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1350		err = -EFAULT;
1351		goto drop;
1352	}
1353
1354	hci_skb_pkt_type(skb) = skb->data[0];
1355	skb_pull(skb, 1);
1356
1357	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1358		/* No permission check is needed for user channel
1359		 * since that gets enforced when binding the socket.
1360		 *
1361		 * However check that the packet type is valid.
1362		 */
1363		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1364		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1365		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1366			err = -EINVAL;
1367			goto drop;
1368		}
1369
1370		skb_queue_tail(&hdev->raw_q, skb);
1371		queue_work(hdev->workqueue, &hdev->tx_work);
1372	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1373		u16 opcode = get_unaligned_le16(skb->data);
1374		u16 ogf = hci_opcode_ogf(opcode);
1375		u16 ocf = hci_opcode_ocf(opcode);
1376
1377		if (((ogf > HCI_SFLT_MAX_OGF) ||
1378		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1379				   &hci_sec_filter.ocf_mask[ogf])) &&
1380		    !capable(CAP_NET_RAW)) {
1381			err = -EPERM;
1382			goto drop;
1383		}
1384
1385		/* Since the opcode has already been extracted here, store
1386		 * a copy of the value for later use by the drivers.
1387		 */
1388		hci_skb_opcode(skb) = opcode;
1389
1390		if (ogf == 0x3f) {
1391			skb_queue_tail(&hdev->raw_q, skb);
1392			queue_work(hdev->workqueue, &hdev->tx_work);
1393		} else {
1394			/* Stand-alone HCI commands must be flagged as
1395			 * single-command requests.
1396			 */
1397			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1398
1399			skb_queue_tail(&hdev->cmd_q, skb);
1400			queue_work(hdev->workqueue, &hdev->cmd_work);
1401		}
1402	} else {
1403		if (!capable(CAP_NET_RAW)) {
1404			err = -EPERM;
1405			goto drop;
1406		}
1407
1408		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1409		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1410			err = -EINVAL;
1411			goto drop;
1412		}
1413
1414		skb_queue_tail(&hdev->raw_q, skb);
1415		queue_work(hdev->workqueue, &hdev->tx_work);
1416	}
1417
1418	err = len;
1419
1420done:
1421	release_sock(sk);
1422	return err;
1423
1424drop:
1425	kfree_skb(skb);
1426	goto done;
1427}
1428
1429static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1430			       char __user *optval, unsigned int len)
1431{
1432	struct hci_ufilter uf = { .opcode = 0 };
1433	struct sock *sk = sock->sk;
1434	int err = 0, opt = 0;
1435
1436	BT_DBG("sk %p, opt %d", sk, optname);
1437
 
 
 
1438	lock_sock(sk);
1439
1440	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1441		err = -EBADFD;
1442		goto done;
1443	}
1444
1445	switch (optname) {
1446	case HCI_DATA_DIR:
1447		if (get_user(opt, (int __user *)optval)) {
1448			err = -EFAULT;
1449			break;
1450		}
1451
1452		if (opt)
1453			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1454		else
1455			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1456		break;
1457
1458	case HCI_TIME_STAMP:
1459		if (get_user(opt, (int __user *)optval)) {
1460			err = -EFAULT;
1461			break;
1462		}
1463
1464		if (opt)
1465			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1466		else
1467			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1468		break;
1469
1470	case HCI_FILTER:
1471		{
1472			struct hci_filter *f = &hci_pi(sk)->filter;
1473
1474			uf.type_mask = f->type_mask;
1475			uf.opcode    = f->opcode;
1476			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1477			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1478		}
1479
1480		len = min_t(unsigned int, len, sizeof(uf));
1481		if (copy_from_user(&uf, optval, len)) {
1482			err = -EFAULT;
1483			break;
1484		}
1485
1486		if (!capable(CAP_NET_RAW)) {
1487			uf.type_mask &= hci_sec_filter.type_mask;
1488			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1489			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1490		}
1491
1492		{
1493			struct hci_filter *f = &hci_pi(sk)->filter;
1494
1495			f->type_mask = uf.type_mask;
1496			f->opcode    = uf.opcode;
1497			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1498			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1499		}
1500		break;
1501
1502	default:
1503		err = -ENOPROTOOPT;
1504		break;
1505	}
1506
1507done:
1508	release_sock(sk);
1509	return err;
1510}
1511
1512static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1513			       char __user *optval, int __user *optlen)
1514{
1515	struct hci_ufilter uf;
1516	struct sock *sk = sock->sk;
1517	int len, opt, err = 0;
1518
1519	BT_DBG("sk %p, opt %d", sk, optname);
1520
 
 
 
1521	if (get_user(len, optlen))
1522		return -EFAULT;
1523
1524	lock_sock(sk);
1525
1526	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1527		err = -EBADFD;
1528		goto done;
1529	}
1530
1531	switch (optname) {
1532	case HCI_DATA_DIR:
1533		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1534			opt = 1;
1535		else
1536			opt = 0;
1537
1538		if (put_user(opt, optval))
1539			err = -EFAULT;
1540		break;
1541
1542	case HCI_TIME_STAMP:
1543		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1544			opt = 1;
1545		else
1546			opt = 0;
1547
1548		if (put_user(opt, optval))
1549			err = -EFAULT;
1550		break;
1551
1552	case HCI_FILTER:
1553		{
1554			struct hci_filter *f = &hci_pi(sk)->filter;
1555
1556			memset(&uf, 0, sizeof(uf));
1557			uf.type_mask = f->type_mask;
1558			uf.opcode    = f->opcode;
1559			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1560			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1561		}
1562
1563		len = min_t(unsigned int, len, sizeof(uf));
1564		if (copy_to_user(optval, &uf, len))
1565			err = -EFAULT;
1566		break;
1567
1568	default:
1569		err = -ENOPROTOOPT;
1570		break;
1571	}
1572
1573done:
1574	release_sock(sk);
1575	return err;
1576}
1577
1578static const struct proto_ops hci_sock_ops = {
1579	.family		= PF_BLUETOOTH,
1580	.owner		= THIS_MODULE,
1581	.release	= hci_sock_release,
1582	.bind		= hci_sock_bind,
1583	.getname	= hci_sock_getname,
1584	.sendmsg	= hci_sock_sendmsg,
1585	.recvmsg	= hci_sock_recvmsg,
1586	.ioctl		= hci_sock_ioctl,
 
 
 
1587	.poll		= datagram_poll,
1588	.listen		= sock_no_listen,
1589	.shutdown	= sock_no_shutdown,
1590	.setsockopt	= hci_sock_setsockopt,
1591	.getsockopt	= hci_sock_getsockopt,
1592	.connect	= sock_no_connect,
1593	.socketpair	= sock_no_socketpair,
1594	.accept		= sock_no_accept,
1595	.mmap		= sock_no_mmap
1596};
1597
1598static struct proto hci_sk_proto = {
1599	.name		= "HCI",
1600	.owner		= THIS_MODULE,
1601	.obj_size	= sizeof(struct hci_pinfo)
1602};
1603
1604static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1605			   int kern)
1606{
1607	struct sock *sk;
1608
1609	BT_DBG("sock %p", sock);
1610
1611	if (sock->type != SOCK_RAW)
1612		return -ESOCKTNOSUPPORT;
1613
1614	sock->ops = &hci_sock_ops;
1615
1616	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1617	if (!sk)
1618		return -ENOMEM;
1619
1620	sock_init_data(sock, sk);
1621
1622	sock_reset_flag(sk, SOCK_ZAPPED);
1623
1624	sk->sk_protocol = protocol;
1625
1626	sock->state = SS_UNCONNECTED;
1627	sk->sk_state = BT_OPEN;
1628
1629	bt_sock_link(&hci_sk_list, sk);
1630	return 0;
1631}
1632
1633static const struct net_proto_family hci_sock_family_ops = {
1634	.family	= PF_BLUETOOTH,
1635	.owner	= THIS_MODULE,
1636	.create	= hci_sock_create,
1637};
1638
1639int __init hci_sock_init(void)
1640{
1641	int err;
1642
1643	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1644
1645	err = proto_register(&hci_sk_proto, 0);
1646	if (err < 0)
1647		return err;
1648
1649	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1650	if (err < 0) {
1651		BT_ERR("HCI socket registration failed");
1652		goto error;
1653	}
1654
1655	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1656	if (err < 0) {
1657		BT_ERR("Failed to create HCI proc file");
1658		bt_sock_unregister(BTPROTO_HCI);
1659		goto error;
1660	}
1661
1662	BT_INFO("HCI socket layer initialized");
1663
1664	return 0;
1665
1666error:
1667	proto_unregister(&hci_sk_proto);
1668	return err;
1669}
1670
1671void hci_sock_cleanup(void)
1672{
1673	bt_procfs_cleanup(&init_net, "hci");
1674	bt_sock_unregister(BTPROTO_HCI);
1675	proto_unregister(&hci_sk_proto);
1676}
v5.9
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26#include <linux/compat.h>
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u8              cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
  60};
  61
  62void hci_sock_set_flag(struct sock *sk, int nr)
  63{
  64	set_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67void hci_sock_clear_flag(struct sock *sk, int nr)
  68{
  69	clear_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72int hci_sock_test_flag(struct sock *sk, int nr)
  73{
  74	return test_bit(nr, &hci_pi(sk)->flags);
  75}
  76
  77unsigned short hci_sock_get_channel(struct sock *sk)
  78{
  79	return hci_pi(sk)->channel;
  80}
  81
  82u32 hci_sock_get_cookie(struct sock *sk)
  83{
  84	return hci_pi(sk)->cookie;
  85}
  86
  87static bool hci_sock_gen_cookie(struct sock *sk)
  88{
  89	int id = hci_pi(sk)->cookie;
  90
  91	if (!id) {
  92		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
  93		if (id < 0)
  94			id = 0xffffffff;
  95
  96		hci_pi(sk)->cookie = id;
  97		get_task_comm(hci_pi(sk)->comm, current);
  98		return true;
  99	}
 100
 101	return false;
 102}
 103
 104static void hci_sock_free_cookie(struct sock *sk)
 105{
 106	int id = hci_pi(sk)->cookie;
 107
 108	if (id) {
 109		hci_pi(sk)->cookie = 0xffffffff;
 110		ida_simple_remove(&sock_cookie_ida, id);
 111	}
 112}
 113
 114static inline int hci_test_bit(int nr, const void *addr)
 115{
 116	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 117}
 118
 119/* Security filter */
 120#define HCI_SFLT_MAX_OGF  5
 121
 122struct hci_sec_filter {
 123	__u32 type_mask;
 124	__u32 event_mask[2];
 125	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 126};
 127
 128static const struct hci_sec_filter hci_sec_filter = {
 129	/* Packet types */
 130	0x10,
 131	/* Events */
 132	{ 0x1000d9fe, 0x0000b00c },
 133	/* Commands */
 134	{
 135		{ 0x0 },
 136		/* OGF_LINK_CTL */
 137		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 138		/* OGF_LINK_POLICY */
 139		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 140		/* OGF_HOST_CTL */
 141		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 142		/* OGF_INFO_PARAM */
 143		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 144		/* OGF_STATUS_PARAM */
 145		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 146	}
 147};
 148
 149static struct bt_sock_list hci_sk_list = {
 150	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 151};
 152
 153static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 154{
 155	struct hci_filter *flt;
 156	int flt_type, flt_event;
 157
 158	/* Apply filter */
 159	flt = &hci_pi(sk)->filter;
 160
 161	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 162
 163	if (!test_bit(flt_type, &flt->type_mask))
 164		return true;
 165
 166	/* Extra filter for event packets only */
 167	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 168		return false;
 169
 170	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 171
 172	if (!hci_test_bit(flt_event, &flt->event_mask))
 173		return true;
 174
 175	/* Check filter only when opcode is set */
 176	if (!flt->opcode)
 177		return false;
 178
 179	if (flt_event == HCI_EV_CMD_COMPLETE &&
 180	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 181		return true;
 182
 183	if (flt_event == HCI_EV_CMD_STATUS &&
 184	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 185		return true;
 186
 187	return false;
 188}
 189
 190/* Send frame to RAW socket */
 191void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 192{
 193	struct sock *sk;
 194	struct sk_buff *skb_copy = NULL;
 195
 196	BT_DBG("hdev %p len %d", hdev, skb->len);
 197
 198	read_lock(&hci_sk_list.lock);
 199
 200	sk_for_each(sk, &hci_sk_list.head) {
 201		struct sk_buff *nskb;
 202
 203		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 204			continue;
 205
 206		/* Don't send frame to the socket it came from */
 207		if (skb->sk == sk)
 208			continue;
 209
 210		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 211			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 212			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 213			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 214			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 215			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 216				continue;
 217			if (is_filtered_packet(sk, skb))
 218				continue;
 219		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 220			if (!bt_cb(skb)->incoming)
 221				continue;
 222			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 223			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 224			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 225			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 226				continue;
 227		} else {
 228			/* Don't send frame to other channel types */
 229			continue;
 230		}
 231
 232		if (!skb_copy) {
 233			/* Create a private copy with headroom */
 234			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 235			if (!skb_copy)
 236				continue;
 237
 238			/* Put type byte before the data */
 239			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 240		}
 241
 242		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 243		if (!nskb)
 244			continue;
 245
 246		if (sock_queue_rcv_skb(sk, nskb))
 247			kfree_skb(nskb);
 248	}
 249
 250	read_unlock(&hci_sk_list.lock);
 251
 252	kfree_skb(skb_copy);
 253}
 254
 255/* Send frame to sockets with specific channel */
 256static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 257				  int flag, struct sock *skip_sk)
 258{
 259	struct sock *sk;
 260
 261	BT_DBG("channel %u len %d", channel, skb->len);
 262
 
 
 263	sk_for_each(sk, &hci_sk_list.head) {
 264		struct sk_buff *nskb;
 265
 266		/* Ignore socket without the flag set */
 267		if (!hci_sock_test_flag(sk, flag))
 268			continue;
 269
 270		/* Skip the original socket */
 271		if (sk == skip_sk)
 272			continue;
 273
 274		if (sk->sk_state != BT_BOUND)
 275			continue;
 276
 277		if (hci_pi(sk)->channel != channel)
 278			continue;
 279
 280		nskb = skb_clone(skb, GFP_ATOMIC);
 281		if (!nskb)
 282			continue;
 283
 284		if (sock_queue_rcv_skb(sk, nskb))
 285			kfree_skb(nskb);
 286	}
 287
 288}
 289
 290void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 291			 int flag, struct sock *skip_sk)
 292{
 293	read_lock(&hci_sk_list.lock);
 294	__hci_send_to_channel(channel, skb, flag, skip_sk);
 295	read_unlock(&hci_sk_list.lock);
 296}
 297
 298/* Send frame to monitor socket */
 299void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 300{
 301	struct sk_buff *skb_copy = NULL;
 302	struct hci_mon_hdr *hdr;
 303	__le16 opcode;
 304
 305	if (!atomic_read(&monitor_promisc))
 306		return;
 307
 308	BT_DBG("hdev %p len %d", hdev, skb->len);
 309
 310	switch (hci_skb_pkt_type(skb)) {
 311	case HCI_COMMAND_PKT:
 312		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 313		break;
 314	case HCI_EVENT_PKT:
 315		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 316		break;
 317	case HCI_ACLDATA_PKT:
 318		if (bt_cb(skb)->incoming)
 319			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 320		else
 321			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 322		break;
 323	case HCI_SCODATA_PKT:
 324		if (bt_cb(skb)->incoming)
 325			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 326		else
 327			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 328		break;
 329	case HCI_ISODATA_PKT:
 330		if (bt_cb(skb)->incoming)
 331			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
 332		else
 333			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
 334		break;
 335	case HCI_DIAG_PKT:
 336		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 337		break;
 338	default:
 339		return;
 340	}
 341
 342	/* Create a private copy with headroom */
 343	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 344	if (!skb_copy)
 345		return;
 346
 347	/* Put header before the data */
 348	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
 349	hdr->opcode = opcode;
 350	hdr->index = cpu_to_le16(hdev->id);
 351	hdr->len = cpu_to_le16(skb->len);
 352
 353	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 354			    HCI_SOCK_TRUSTED, NULL);
 355	kfree_skb(skb_copy);
 356}
 357
 358void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 359				 void *data, u16 data_len, ktime_t tstamp,
 360				 int flag, struct sock *skip_sk)
 361{
 362	struct sock *sk;
 363	__le16 index;
 364
 365	if (hdev)
 366		index = cpu_to_le16(hdev->id);
 367	else
 368		index = cpu_to_le16(MGMT_INDEX_NONE);
 369
 370	read_lock(&hci_sk_list.lock);
 371
 372	sk_for_each(sk, &hci_sk_list.head) {
 373		struct hci_mon_hdr *hdr;
 374		struct sk_buff *skb;
 375
 376		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 377			continue;
 378
 379		/* Ignore socket without the flag set */
 380		if (!hci_sock_test_flag(sk, flag))
 381			continue;
 382
 383		/* Skip the original socket */
 384		if (sk == skip_sk)
 385			continue;
 386
 387		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 388		if (!skb)
 389			continue;
 390
 391		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 392		put_unaligned_le16(event, skb_put(skb, 2));
 393
 394		if (data)
 395			skb_put_data(skb, data, data_len);
 396
 397		skb->tstamp = tstamp;
 398
 399		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 400		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 401		hdr->index = index;
 402		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 403
 404		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 405				      HCI_SOCK_TRUSTED, NULL);
 406		kfree_skb(skb);
 407	}
 408
 409	read_unlock(&hci_sk_list.lock);
 410}
 411
 412static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 413{
 414	struct hci_mon_hdr *hdr;
 415	struct hci_mon_new_index *ni;
 416	struct hci_mon_index_info *ii;
 417	struct sk_buff *skb;
 418	__le16 opcode;
 419
 420	switch (event) {
 421	case HCI_DEV_REG:
 422		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 423		if (!skb)
 424			return NULL;
 425
 426		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 427		ni->type = hdev->dev_type;
 428		ni->bus = hdev->bus;
 429		bacpy(&ni->bdaddr, &hdev->bdaddr);
 430		memcpy(ni->name, hdev->name, 8);
 431
 432		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 433		break;
 434
 435	case HCI_DEV_UNREG:
 436		skb = bt_skb_alloc(0, GFP_ATOMIC);
 437		if (!skb)
 438			return NULL;
 439
 440		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 441		break;
 442
 443	case HCI_DEV_SETUP:
 444		if (hdev->manufacturer == 0xffff)
 445			return NULL;
 446		fallthrough;
 
 447
 448	case HCI_DEV_UP:
 449		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 450		if (!skb)
 451			return NULL;
 452
 453		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 454		bacpy(&ii->bdaddr, &hdev->bdaddr);
 455		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 456
 457		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 458		break;
 459
 460	case HCI_DEV_OPEN:
 461		skb = bt_skb_alloc(0, GFP_ATOMIC);
 462		if (!skb)
 463			return NULL;
 464
 465		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 466		break;
 467
 468	case HCI_DEV_CLOSE:
 469		skb = bt_skb_alloc(0, GFP_ATOMIC);
 470		if (!skb)
 471			return NULL;
 472
 473		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 474		break;
 475
 476	default:
 477		return NULL;
 478	}
 479
 480	__net_timestamp(skb);
 481
 482	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 483	hdr->opcode = opcode;
 484	hdr->index = cpu_to_le16(hdev->id);
 485	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 486
 487	return skb;
 488}
 489
 490static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 491{
 492	struct hci_mon_hdr *hdr;
 493	struct sk_buff *skb;
 494	u16 format;
 495	u8 ver[3];
 496	u32 flags;
 497
 498	/* No message needed when cookie is not present */
 499	if (!hci_pi(sk)->cookie)
 500		return NULL;
 501
 502	switch (hci_pi(sk)->channel) {
 503	case HCI_CHANNEL_RAW:
 504		format = 0x0000;
 505		ver[0] = BT_SUBSYS_VERSION;
 506		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 507		break;
 508	case HCI_CHANNEL_USER:
 509		format = 0x0001;
 510		ver[0] = BT_SUBSYS_VERSION;
 511		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 512		break;
 513	case HCI_CHANNEL_CONTROL:
 514		format = 0x0002;
 515		mgmt_fill_version_info(ver);
 516		break;
 517	default:
 518		/* No message for unsupported format */
 519		return NULL;
 520	}
 521
 522	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
 523	if (!skb)
 524		return NULL;
 525
 526	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 527
 528	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 529	put_unaligned_le16(format, skb_put(skb, 2));
 530	skb_put_data(skb, ver, sizeof(ver));
 531	put_unaligned_le32(flags, skb_put(skb, 4));
 532	skb_put_u8(skb, TASK_COMM_LEN);
 533	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
 534
 535	__net_timestamp(skb);
 536
 537	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 538	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 539	if (hci_pi(sk)->hdev)
 540		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 541	else
 542		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 543	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 544
 545	return skb;
 546}
 547
 548static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 549{
 550	struct hci_mon_hdr *hdr;
 551	struct sk_buff *skb;
 552
 553	/* No message needed when cookie is not present */
 554	if (!hci_pi(sk)->cookie)
 555		return NULL;
 556
 557	switch (hci_pi(sk)->channel) {
 558	case HCI_CHANNEL_RAW:
 559	case HCI_CHANNEL_USER:
 560	case HCI_CHANNEL_CONTROL:
 561		break;
 562	default:
 563		/* No message for unsupported format */
 564		return NULL;
 565	}
 566
 567	skb = bt_skb_alloc(4, GFP_ATOMIC);
 568	if (!skb)
 569		return NULL;
 570
 571	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 572
 573	__net_timestamp(skb);
 574
 575	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 576	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 577	if (hci_pi(sk)->hdev)
 578		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 579	else
 580		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 581	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 582
 583	return skb;
 584}
 585
 586static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 587						   u16 opcode, u16 len,
 588						   const void *buf)
 589{
 590	struct hci_mon_hdr *hdr;
 591	struct sk_buff *skb;
 592
 593	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 594	if (!skb)
 595		return NULL;
 596
 597	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 598	put_unaligned_le16(opcode, skb_put(skb, 2));
 599
 600	if (buf)
 601		skb_put_data(skb, buf, len);
 602
 603	__net_timestamp(skb);
 604
 605	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 606	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 607	hdr->index = cpu_to_le16(index);
 608	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 609
 610	return skb;
 611}
 612
 613static void __printf(2, 3)
 614send_monitor_note(struct sock *sk, const char *fmt, ...)
 615{
 616	size_t len;
 617	struct hci_mon_hdr *hdr;
 618	struct sk_buff *skb;
 619	va_list args;
 620
 621	va_start(args, fmt);
 622	len = vsnprintf(NULL, 0, fmt, args);
 623	va_end(args);
 624
 625	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 626	if (!skb)
 627		return;
 628
 629	va_start(args, fmt);
 630	vsprintf(skb_put(skb, len), fmt, args);
 631	*(u8 *)skb_put(skb, 1) = 0;
 632	va_end(args);
 633
 634	__net_timestamp(skb);
 635
 636	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 637	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 638	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 639	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 640
 641	if (sock_queue_rcv_skb(sk, skb))
 642		kfree_skb(skb);
 643}
 644
 645static void send_monitor_replay(struct sock *sk)
 646{
 647	struct hci_dev *hdev;
 648
 649	read_lock(&hci_dev_list_lock);
 650
 651	list_for_each_entry(hdev, &hci_dev_list, list) {
 652		struct sk_buff *skb;
 653
 654		skb = create_monitor_event(hdev, HCI_DEV_REG);
 655		if (!skb)
 656			continue;
 657
 658		if (sock_queue_rcv_skb(sk, skb))
 659			kfree_skb(skb);
 660
 661		if (!test_bit(HCI_RUNNING, &hdev->flags))
 662			continue;
 663
 664		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 665		if (!skb)
 666			continue;
 667
 668		if (sock_queue_rcv_skb(sk, skb))
 669			kfree_skb(skb);
 670
 671		if (test_bit(HCI_UP, &hdev->flags))
 672			skb = create_monitor_event(hdev, HCI_DEV_UP);
 673		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 674			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 675		else
 676			skb = NULL;
 677
 678		if (skb) {
 679			if (sock_queue_rcv_skb(sk, skb))
 680				kfree_skb(skb);
 681		}
 682	}
 683
 684	read_unlock(&hci_dev_list_lock);
 685}
 686
 687static void send_monitor_control_replay(struct sock *mon_sk)
 688{
 689	struct sock *sk;
 690
 691	read_lock(&hci_sk_list.lock);
 692
 693	sk_for_each(sk, &hci_sk_list.head) {
 694		struct sk_buff *skb;
 695
 696		skb = create_monitor_ctrl_open(sk);
 697		if (!skb)
 698			continue;
 699
 700		if (sock_queue_rcv_skb(mon_sk, skb))
 701			kfree_skb(skb);
 702	}
 703
 704	read_unlock(&hci_sk_list.lock);
 705}
 706
 707/* Generate internal stack event */
 708static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 709{
 710	struct hci_event_hdr *hdr;
 711	struct hci_ev_stack_internal *ev;
 712	struct sk_buff *skb;
 713
 714	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 715	if (!skb)
 716		return;
 717
 718	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
 719	hdr->evt  = HCI_EV_STACK_INTERNAL;
 720	hdr->plen = sizeof(*ev) + dlen;
 721
 722	ev = skb_put(skb, sizeof(*ev) + dlen);
 723	ev->type = type;
 724	memcpy(ev->data, data, dlen);
 725
 726	bt_cb(skb)->incoming = 1;
 727	__net_timestamp(skb);
 728
 729	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 730	hci_send_to_sock(hdev, skb);
 731	kfree_skb(skb);
 732}
 733
 734void hci_sock_dev_event(struct hci_dev *hdev, int event)
 735{
 736	BT_DBG("hdev %s event %d", hdev->name, event);
 737
 738	if (atomic_read(&monitor_promisc)) {
 739		struct sk_buff *skb;
 740
 741		/* Send event to monitor */
 742		skb = create_monitor_event(hdev, event);
 743		if (skb) {
 744			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 745					    HCI_SOCK_TRUSTED, NULL);
 746			kfree_skb(skb);
 747		}
 748	}
 749
 750	if (event <= HCI_DEV_DOWN) {
 751		struct hci_ev_si_device ev;
 752
 753		/* Send event to sockets */
 754		ev.event  = event;
 755		ev.dev_id = hdev->id;
 756		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 757	}
 758
 759	if (event == HCI_DEV_UNREG) {
 760		struct sock *sk;
 761
 762		/* Detach sockets from device */
 763		read_lock(&hci_sk_list.lock);
 764		sk_for_each(sk, &hci_sk_list.head) {
 765			bh_lock_sock_nested(sk);
 766			if (hci_pi(sk)->hdev == hdev) {
 767				hci_pi(sk)->hdev = NULL;
 768				sk->sk_err = EPIPE;
 769				sk->sk_state = BT_OPEN;
 770				sk->sk_state_change(sk);
 771
 772				hci_dev_put(hdev);
 773			}
 774			bh_unlock_sock(sk);
 775		}
 776		read_unlock(&hci_sk_list.lock);
 777	}
 778}
 779
 780static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 781{
 782	struct hci_mgmt_chan *c;
 783
 784	list_for_each_entry(c, &mgmt_chan_list, list) {
 785		if (c->channel == channel)
 786			return c;
 787	}
 788
 789	return NULL;
 790}
 791
 792static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 793{
 794	struct hci_mgmt_chan *c;
 795
 796	mutex_lock(&mgmt_chan_list_lock);
 797	c = __hci_mgmt_chan_find(channel);
 798	mutex_unlock(&mgmt_chan_list_lock);
 799
 800	return c;
 801}
 802
 803int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 804{
 805	if (c->channel < HCI_CHANNEL_CONTROL)
 806		return -EINVAL;
 807
 808	mutex_lock(&mgmt_chan_list_lock);
 809	if (__hci_mgmt_chan_find(c->channel)) {
 810		mutex_unlock(&mgmt_chan_list_lock);
 811		return -EALREADY;
 812	}
 813
 814	list_add_tail(&c->list, &mgmt_chan_list);
 815
 816	mutex_unlock(&mgmt_chan_list_lock);
 817
 818	return 0;
 819}
 820EXPORT_SYMBOL(hci_mgmt_chan_register);
 821
 822void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 823{
 824	mutex_lock(&mgmt_chan_list_lock);
 825	list_del(&c->list);
 826	mutex_unlock(&mgmt_chan_list_lock);
 827}
 828EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 829
 830static int hci_sock_release(struct socket *sock)
 831{
 832	struct sock *sk = sock->sk;
 833	struct hci_dev *hdev;
 834	struct sk_buff *skb;
 835
 836	BT_DBG("sock %p sk %p", sock, sk);
 837
 838	if (!sk)
 839		return 0;
 840
 841	lock_sock(sk);
 842
 843	switch (hci_pi(sk)->channel) {
 844	case HCI_CHANNEL_MONITOR:
 845		atomic_dec(&monitor_promisc);
 846		break;
 847	case HCI_CHANNEL_RAW:
 848	case HCI_CHANNEL_USER:
 849	case HCI_CHANNEL_CONTROL:
 850		/* Send event to monitor */
 851		skb = create_monitor_ctrl_close(sk);
 852		if (skb) {
 853			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 854					    HCI_SOCK_TRUSTED, NULL);
 855			kfree_skb(skb);
 856		}
 857
 858		hci_sock_free_cookie(sk);
 859		break;
 860	}
 861
 862	bt_sock_unlink(&hci_sk_list, sk);
 863
 864	hdev = hci_pi(sk)->hdev;
 865	if (hdev) {
 866		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 867			/* When releasing a user channel exclusive access,
 868			 * call hci_dev_do_close directly instead of calling
 869			 * hci_dev_close to ensure the exclusive access will
 870			 * be released and the controller brought back down.
 871			 *
 872			 * The checking of HCI_AUTO_OFF is not needed in this
 873			 * case since it will have been cleared already when
 874			 * opening the user channel.
 875			 */
 876			hci_dev_do_close(hdev);
 877			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 878			mgmt_index_added(hdev);
 879		}
 880
 881		atomic_dec(&hdev->promisc);
 882		hci_dev_put(hdev);
 883	}
 884
 885	sock_orphan(sk);
 886
 887	skb_queue_purge(&sk->sk_receive_queue);
 888	skb_queue_purge(&sk->sk_write_queue);
 889
 890	release_sock(sk);
 891	sock_put(sk);
 892	return 0;
 893}
 894
 895static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 896{
 897	bdaddr_t bdaddr;
 898	int err;
 899
 900	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 901		return -EFAULT;
 902
 903	hci_dev_lock(hdev);
 904
 905	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 906
 907	hci_dev_unlock(hdev);
 908
 909	return err;
 910}
 911
 912static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 913{
 914	bdaddr_t bdaddr;
 915	int err;
 916
 917	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 918		return -EFAULT;
 919
 920	hci_dev_lock(hdev);
 921
 922	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 923
 924	hci_dev_unlock(hdev);
 925
 926	return err;
 927}
 928
 929/* Ioctls that require bound socket */
 930static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 931				unsigned long arg)
 932{
 933	struct hci_dev *hdev = hci_pi(sk)->hdev;
 934
 935	if (!hdev)
 936		return -EBADFD;
 937
 938	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 939		return -EBUSY;
 940
 941	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 942		return -EOPNOTSUPP;
 943
 944	if (hdev->dev_type != HCI_PRIMARY)
 945		return -EOPNOTSUPP;
 946
 947	switch (cmd) {
 948	case HCISETRAW:
 949		if (!capable(CAP_NET_ADMIN))
 950			return -EPERM;
 951		return -EOPNOTSUPP;
 952
 953	case HCIGETCONNINFO:
 954		return hci_get_conn_info(hdev, (void __user *)arg);
 955
 956	case HCIGETAUTHINFO:
 957		return hci_get_auth_info(hdev, (void __user *)arg);
 958
 959	case HCIBLOCKADDR:
 960		if (!capable(CAP_NET_ADMIN))
 961			return -EPERM;
 962		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 963
 964	case HCIUNBLOCKADDR:
 965		if (!capable(CAP_NET_ADMIN))
 966			return -EPERM;
 967		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 968	}
 969
 970	return -ENOIOCTLCMD;
 971}
 972
 973static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 974			  unsigned long arg)
 975{
 976	void __user *argp = (void __user *)arg;
 977	struct sock *sk = sock->sk;
 978	int err;
 979
 980	BT_DBG("cmd %x arg %lx", cmd, arg);
 981
 982	lock_sock(sk);
 983
 984	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 985		err = -EBADFD;
 986		goto done;
 987	}
 988
 989	/* When calling an ioctl on an unbound raw socket, then ensure
 990	 * that the monitor gets informed. Ensure that the resulting event
 991	 * is only send once by checking if the cookie exists or not. The
 992	 * socket cookie will be only ever generated once for the lifetime
 993	 * of a given socket.
 994	 */
 995	if (hci_sock_gen_cookie(sk)) {
 996		struct sk_buff *skb;
 997
 998		if (capable(CAP_NET_ADMIN))
 999			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1000
1001		/* Send event to monitor */
1002		skb = create_monitor_ctrl_open(sk);
1003		if (skb) {
1004			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1005					    HCI_SOCK_TRUSTED, NULL);
1006			kfree_skb(skb);
1007		}
1008	}
1009
1010	release_sock(sk);
1011
1012	switch (cmd) {
1013	case HCIGETDEVLIST:
1014		return hci_get_dev_list(argp);
1015
1016	case HCIGETDEVINFO:
1017		return hci_get_dev_info(argp);
1018
1019	case HCIGETCONNLIST:
1020		return hci_get_conn_list(argp);
1021
1022	case HCIDEVUP:
1023		if (!capable(CAP_NET_ADMIN))
1024			return -EPERM;
1025		return hci_dev_open(arg);
1026
1027	case HCIDEVDOWN:
1028		if (!capable(CAP_NET_ADMIN))
1029			return -EPERM;
1030		return hci_dev_close(arg);
1031
1032	case HCIDEVRESET:
1033		if (!capable(CAP_NET_ADMIN))
1034			return -EPERM;
1035		return hci_dev_reset(arg);
1036
1037	case HCIDEVRESTAT:
1038		if (!capable(CAP_NET_ADMIN))
1039			return -EPERM;
1040		return hci_dev_reset_stat(arg);
1041
1042	case HCISETSCAN:
1043	case HCISETAUTH:
1044	case HCISETENCRYPT:
1045	case HCISETPTYPE:
1046	case HCISETLINKPOL:
1047	case HCISETLINKMODE:
1048	case HCISETACLMTU:
1049	case HCISETSCOMTU:
1050		if (!capable(CAP_NET_ADMIN))
1051			return -EPERM;
1052		return hci_dev_cmd(cmd, argp);
1053
1054	case HCIINQUIRY:
1055		return hci_inquiry(argp);
1056	}
1057
1058	lock_sock(sk);
1059
1060	err = hci_sock_bound_ioctl(sk, cmd, arg);
1061
1062done:
1063	release_sock(sk);
1064	return err;
1065}
1066
1067#ifdef CONFIG_COMPAT
1068static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1069				 unsigned long arg)
1070{
1071	switch (cmd) {
1072	case HCIDEVUP:
1073	case HCIDEVDOWN:
1074	case HCIDEVRESET:
1075	case HCIDEVRESTAT:
1076		return hci_sock_ioctl(sock, cmd, arg);
1077	}
1078
1079	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1080}
1081#endif
1082
1083static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1084			 int addr_len)
1085{
1086	struct sockaddr_hci haddr;
1087	struct sock *sk = sock->sk;
1088	struct hci_dev *hdev = NULL;
1089	struct sk_buff *skb;
1090	int len, err = 0;
1091
1092	BT_DBG("sock %p sk %p", sock, sk);
1093
1094	if (!addr)
1095		return -EINVAL;
1096
1097	memset(&haddr, 0, sizeof(haddr));
1098	len = min_t(unsigned int, sizeof(haddr), addr_len);
1099	memcpy(&haddr, addr, len);
1100
1101	if (haddr.hci_family != AF_BLUETOOTH)
1102		return -EINVAL;
1103
1104	lock_sock(sk);
1105
1106	if (sk->sk_state == BT_BOUND) {
1107		err = -EALREADY;
1108		goto done;
1109	}
1110
1111	switch (haddr.hci_channel) {
1112	case HCI_CHANNEL_RAW:
1113		if (hci_pi(sk)->hdev) {
1114			err = -EALREADY;
1115			goto done;
1116		}
1117
1118		if (haddr.hci_dev != HCI_DEV_NONE) {
1119			hdev = hci_dev_get(haddr.hci_dev);
1120			if (!hdev) {
1121				err = -ENODEV;
1122				goto done;
1123			}
1124
1125			atomic_inc(&hdev->promisc);
1126		}
1127
1128		hci_pi(sk)->channel = haddr.hci_channel;
1129
1130		if (!hci_sock_gen_cookie(sk)) {
1131			/* In the case when a cookie has already been assigned,
1132			 * then there has been already an ioctl issued against
1133			 * an unbound socket and with that triggerd an open
1134			 * notification. Send a close notification first to
1135			 * allow the state transition to bounded.
1136			 */
1137			skb = create_monitor_ctrl_close(sk);
1138			if (skb) {
1139				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1140						    HCI_SOCK_TRUSTED, NULL);
1141				kfree_skb(skb);
1142			}
1143		}
1144
1145		if (capable(CAP_NET_ADMIN))
1146			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1147
1148		hci_pi(sk)->hdev = hdev;
1149
1150		/* Send event to monitor */
1151		skb = create_monitor_ctrl_open(sk);
1152		if (skb) {
1153			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1154					    HCI_SOCK_TRUSTED, NULL);
1155			kfree_skb(skb);
1156		}
1157		break;
1158
1159	case HCI_CHANNEL_USER:
1160		if (hci_pi(sk)->hdev) {
1161			err = -EALREADY;
1162			goto done;
1163		}
1164
1165		if (haddr.hci_dev == HCI_DEV_NONE) {
1166			err = -EINVAL;
1167			goto done;
1168		}
1169
1170		if (!capable(CAP_NET_ADMIN)) {
1171			err = -EPERM;
1172			goto done;
1173		}
1174
1175		hdev = hci_dev_get(haddr.hci_dev);
1176		if (!hdev) {
1177			err = -ENODEV;
1178			goto done;
1179		}
1180
1181		if (test_bit(HCI_INIT, &hdev->flags) ||
1182		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1183		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1184		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1185		     test_bit(HCI_UP, &hdev->flags))) {
1186			err = -EBUSY;
1187			hci_dev_put(hdev);
1188			goto done;
1189		}
1190
1191		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1192			err = -EUSERS;
1193			hci_dev_put(hdev);
1194			goto done;
1195		}
1196
1197		mgmt_index_removed(hdev);
1198
1199		err = hci_dev_open(hdev->id);
1200		if (err) {
1201			if (err == -EALREADY) {
1202				/* In case the transport is already up and
1203				 * running, clear the error here.
1204				 *
1205				 * This can happen when opening a user
1206				 * channel and HCI_AUTO_OFF grace period
1207				 * is still active.
1208				 */
1209				err = 0;
1210			} else {
1211				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1212				mgmt_index_added(hdev);
1213				hci_dev_put(hdev);
1214				goto done;
1215			}
1216		}
1217
1218		hci_pi(sk)->channel = haddr.hci_channel;
1219
1220		if (!hci_sock_gen_cookie(sk)) {
1221			/* In the case when a cookie has already been assigned,
1222			 * this socket will transition from a raw socket into
1223			 * a user channel socket. For a clean transition, send
1224			 * the close notification first.
1225			 */
1226			skb = create_monitor_ctrl_close(sk);
1227			if (skb) {
1228				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1229						    HCI_SOCK_TRUSTED, NULL);
1230				kfree_skb(skb);
1231			}
1232		}
1233
1234		/* The user channel is restricted to CAP_NET_ADMIN
1235		 * capabilities and with that implicitly trusted.
1236		 */
1237		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1238
1239		hci_pi(sk)->hdev = hdev;
1240
1241		/* Send event to monitor */
1242		skb = create_monitor_ctrl_open(sk);
1243		if (skb) {
1244			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1245					    HCI_SOCK_TRUSTED, NULL);
1246			kfree_skb(skb);
1247		}
1248
1249		atomic_inc(&hdev->promisc);
1250		break;
1251
1252	case HCI_CHANNEL_MONITOR:
1253		if (haddr.hci_dev != HCI_DEV_NONE) {
1254			err = -EINVAL;
1255			goto done;
1256		}
1257
1258		if (!capable(CAP_NET_RAW)) {
1259			err = -EPERM;
1260			goto done;
1261		}
1262
1263		hci_pi(sk)->channel = haddr.hci_channel;
1264
1265		/* The monitor interface is restricted to CAP_NET_RAW
1266		 * capabilities and with that implicitly trusted.
1267		 */
1268		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1269
1270		send_monitor_note(sk, "Linux version %s (%s)",
1271				  init_utsname()->release,
1272				  init_utsname()->machine);
1273		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1274				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1275		send_monitor_replay(sk);
1276		send_monitor_control_replay(sk);
1277
1278		atomic_inc(&monitor_promisc);
1279		break;
1280
1281	case HCI_CHANNEL_LOGGING:
1282		if (haddr.hci_dev != HCI_DEV_NONE) {
1283			err = -EINVAL;
1284			goto done;
1285		}
1286
1287		if (!capable(CAP_NET_ADMIN)) {
1288			err = -EPERM;
1289			goto done;
1290		}
1291
1292		hci_pi(sk)->channel = haddr.hci_channel;
1293		break;
1294
1295	default:
1296		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1297			err = -EINVAL;
1298			goto done;
1299		}
1300
1301		if (haddr.hci_dev != HCI_DEV_NONE) {
1302			err = -EINVAL;
1303			goto done;
1304		}
1305
1306		/* Users with CAP_NET_ADMIN capabilities are allowed
1307		 * access to all management commands and events. For
1308		 * untrusted users the interface is restricted and
1309		 * also only untrusted events are sent.
1310		 */
1311		if (capable(CAP_NET_ADMIN))
1312			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1313
1314		hci_pi(sk)->channel = haddr.hci_channel;
1315
1316		/* At the moment the index and unconfigured index events
1317		 * are enabled unconditionally. Setting them on each
1318		 * socket when binding keeps this functionality. They
1319		 * however might be cleared later and then sending of these
1320		 * events will be disabled, but that is then intentional.
1321		 *
1322		 * This also enables generic events that are safe to be
1323		 * received by untrusted users. Example for such events
1324		 * are changes to settings, class of device, name etc.
1325		 */
1326		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1327			if (!hci_sock_gen_cookie(sk)) {
1328				/* In the case when a cookie has already been
1329				 * assigned, this socket will transtion from
1330				 * a raw socket into a control socket. To
1331				 * allow for a clean transtion, send the
1332				 * close notification first.
1333				 */
1334				skb = create_monitor_ctrl_close(sk);
1335				if (skb) {
1336					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1337							    HCI_SOCK_TRUSTED, NULL);
1338					kfree_skb(skb);
1339				}
1340			}
1341
1342			/* Send event to monitor */
1343			skb = create_monitor_ctrl_open(sk);
1344			if (skb) {
1345				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1346						    HCI_SOCK_TRUSTED, NULL);
1347				kfree_skb(skb);
1348			}
1349
1350			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1351			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1352			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1353			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1354			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1355			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1356		}
1357		break;
1358	}
1359
 
 
1360	sk->sk_state = BT_BOUND;
1361
1362done:
1363	release_sock(sk);
1364	return err;
1365}
1366
1367static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1368			    int peer)
1369{
1370	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1371	struct sock *sk = sock->sk;
1372	struct hci_dev *hdev;
1373	int err = 0;
1374
1375	BT_DBG("sock %p sk %p", sock, sk);
1376
1377	if (peer)
1378		return -EOPNOTSUPP;
1379
1380	lock_sock(sk);
1381
1382	hdev = hci_pi(sk)->hdev;
1383	if (!hdev) {
1384		err = -EBADFD;
1385		goto done;
1386	}
1387
 
1388	haddr->hci_family = AF_BLUETOOTH;
1389	haddr->hci_dev    = hdev->id;
1390	haddr->hci_channel= hci_pi(sk)->channel;
1391	err = sizeof(*haddr);
1392
1393done:
1394	release_sock(sk);
1395	return err;
1396}
1397
1398static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1399			  struct sk_buff *skb)
1400{
1401	__u8 mask = hci_pi(sk)->cmsg_mask;
1402
1403	if (mask & HCI_CMSG_DIR) {
1404		int incoming = bt_cb(skb)->incoming;
1405		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1406			 &incoming);
1407	}
1408
1409	if (mask & HCI_CMSG_TSTAMP) {
1410#ifdef CONFIG_COMPAT
1411		struct old_timeval32 ctv;
1412#endif
1413		struct __kernel_old_timeval tv;
1414		void *data;
1415		int len;
1416
1417		skb_get_timestamp(skb, &tv);
1418
1419		data = &tv;
1420		len = sizeof(tv);
1421#ifdef CONFIG_COMPAT
1422		if (!COMPAT_USE_64BIT_TIME &&
1423		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1424			ctv.tv_sec = tv.tv_sec;
1425			ctv.tv_usec = tv.tv_usec;
1426			data = &ctv;
1427			len = sizeof(ctv);
1428		}
1429#endif
1430
1431		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1432	}
1433}
1434
1435static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1436			    size_t len, int flags)
1437{
1438	int noblock = flags & MSG_DONTWAIT;
1439	struct sock *sk = sock->sk;
1440	struct sk_buff *skb;
1441	int copied, err;
1442	unsigned int skblen;
1443
1444	BT_DBG("sock %p, sk %p", sock, sk);
1445
1446	if (flags & MSG_OOB)
1447		return -EOPNOTSUPP;
1448
1449	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1450		return -EOPNOTSUPP;
1451
1452	if (sk->sk_state == BT_CLOSED)
1453		return 0;
1454
1455	skb = skb_recv_datagram(sk, flags, noblock, &err);
1456	if (!skb)
1457		return err;
1458
1459	skblen = skb->len;
1460	copied = skb->len;
1461	if (len < copied) {
1462		msg->msg_flags |= MSG_TRUNC;
1463		copied = len;
1464	}
1465
1466	skb_reset_transport_header(skb);
1467	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1468
1469	switch (hci_pi(sk)->channel) {
1470	case HCI_CHANNEL_RAW:
1471		hci_sock_cmsg(sk, msg, skb);
1472		break;
1473	case HCI_CHANNEL_USER:
1474	case HCI_CHANNEL_MONITOR:
1475		sock_recv_timestamp(msg, sk, skb);
1476		break;
1477	default:
1478		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1479			sock_recv_timestamp(msg, sk, skb);
1480		break;
1481	}
1482
1483	skb_free_datagram(sk, skb);
1484
1485	if (flags & MSG_TRUNC)
1486		copied = skblen;
1487
1488	return err ? : copied;
1489}
1490
1491static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1492			struct msghdr *msg, size_t msglen)
1493{
1494	void *buf;
1495	u8 *cp;
1496	struct mgmt_hdr *hdr;
1497	u16 opcode, index, len;
1498	struct hci_dev *hdev = NULL;
1499	const struct hci_mgmt_handler *handler;
1500	bool var_len, no_hdev;
1501	int err;
1502
1503	BT_DBG("got %zu bytes", msglen);
1504
1505	if (msglen < sizeof(*hdr))
1506		return -EINVAL;
1507
1508	buf = kmalloc(msglen, GFP_KERNEL);
1509	if (!buf)
1510		return -ENOMEM;
1511
1512	if (memcpy_from_msg(buf, msg, msglen)) {
1513		err = -EFAULT;
1514		goto done;
1515	}
1516
1517	hdr = buf;
1518	opcode = __le16_to_cpu(hdr->opcode);
1519	index = __le16_to_cpu(hdr->index);
1520	len = __le16_to_cpu(hdr->len);
1521
1522	if (len != msglen - sizeof(*hdr)) {
1523		err = -EINVAL;
1524		goto done;
1525	}
1526
1527	if (chan->channel == HCI_CHANNEL_CONTROL) {
1528		struct sk_buff *skb;
1529
1530		/* Send event to monitor */
1531		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1532						  buf + sizeof(*hdr));
1533		if (skb) {
1534			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1535					    HCI_SOCK_TRUSTED, NULL);
1536			kfree_skb(skb);
1537		}
1538	}
1539
1540	if (opcode >= chan->handler_count ||
1541	    chan->handlers[opcode].func == NULL) {
1542		BT_DBG("Unknown op %u", opcode);
1543		err = mgmt_cmd_status(sk, index, opcode,
1544				      MGMT_STATUS_UNKNOWN_COMMAND);
1545		goto done;
1546	}
1547
1548	handler = &chan->handlers[opcode];
1549
1550	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1551	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1552		err = mgmt_cmd_status(sk, index, opcode,
1553				      MGMT_STATUS_PERMISSION_DENIED);
1554		goto done;
1555	}
1556
1557	if (index != MGMT_INDEX_NONE) {
1558		hdev = hci_dev_get(index);
1559		if (!hdev) {
1560			err = mgmt_cmd_status(sk, index, opcode,
1561					      MGMT_STATUS_INVALID_INDEX);
1562			goto done;
1563		}
1564
1565		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1566		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1567		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1568			err = mgmt_cmd_status(sk, index, opcode,
1569					      MGMT_STATUS_INVALID_INDEX);
1570			goto done;
1571		}
1572
1573		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1574		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1575			err = mgmt_cmd_status(sk, index, opcode,
1576					      MGMT_STATUS_INVALID_INDEX);
1577			goto done;
1578		}
1579	}
1580
1581	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1582		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1583		if (no_hdev != !hdev) {
1584			err = mgmt_cmd_status(sk, index, opcode,
1585					      MGMT_STATUS_INVALID_INDEX);
1586			goto done;
1587		}
1588	}
1589
1590	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1591	if ((var_len && len < handler->data_len) ||
1592	    (!var_len && len != handler->data_len)) {
1593		err = mgmt_cmd_status(sk, index, opcode,
1594				      MGMT_STATUS_INVALID_PARAMS);
1595		goto done;
1596	}
1597
1598	if (hdev && chan->hdev_init)
1599		chan->hdev_init(sk, hdev);
1600
1601	cp = buf + sizeof(*hdr);
1602
1603	err = handler->func(sk, hdev, cp, len);
1604	if (err < 0)
1605		goto done;
1606
1607	err = msglen;
1608
1609done:
1610	if (hdev)
1611		hci_dev_put(hdev);
1612
1613	kfree(buf);
1614	return err;
1615}
1616
1617static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1618{
1619	struct hci_mon_hdr *hdr;
1620	struct sk_buff *skb;
1621	struct hci_dev *hdev;
1622	u16 index;
1623	int err;
1624
1625	/* The logging frame consists at minimum of the standard header,
1626	 * the priority byte, the ident length byte and at least one string
1627	 * terminator NUL byte. Anything shorter are invalid packets.
1628	 */
1629	if (len < sizeof(*hdr) + 3)
1630		return -EINVAL;
1631
1632	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1633	if (!skb)
1634		return err;
1635
1636	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1637		err = -EFAULT;
1638		goto drop;
1639	}
1640
1641	hdr = (void *)skb->data;
1642
1643	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1644		err = -EINVAL;
1645		goto drop;
1646	}
1647
1648	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1649		__u8 priority = skb->data[sizeof(*hdr)];
1650		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1651
1652		/* Only the priorities 0-7 are valid and with that any other
1653		 * value results in an invalid packet.
1654		 *
1655		 * The priority byte is followed by an ident length byte and
1656		 * the NUL terminated ident string. Check that the ident
1657		 * length is not overflowing the packet and also that the
1658		 * ident string itself is NUL terminated. In case the ident
1659		 * length is zero, the length value actually doubles as NUL
1660		 * terminator identifier.
1661		 *
1662		 * The message follows the ident string (if present) and
1663		 * must be NUL terminated. Otherwise it is not a valid packet.
1664		 */
1665		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1666		    ident_len > len - sizeof(*hdr) - 3 ||
1667		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1668			err = -EINVAL;
1669			goto drop;
1670		}
1671	} else {
1672		err = -EINVAL;
1673		goto drop;
1674	}
1675
1676	index = __le16_to_cpu(hdr->index);
1677
1678	if (index != MGMT_INDEX_NONE) {
1679		hdev = hci_dev_get(index);
1680		if (!hdev) {
1681			err = -ENODEV;
1682			goto drop;
1683		}
1684	} else {
1685		hdev = NULL;
1686	}
1687
1688	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1689
1690	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1691	err = len;
1692
1693	if (hdev)
1694		hci_dev_put(hdev);
1695
1696drop:
1697	kfree_skb(skb);
1698	return err;
1699}
1700
1701static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1702			    size_t len)
1703{
1704	struct sock *sk = sock->sk;
1705	struct hci_mgmt_chan *chan;
1706	struct hci_dev *hdev;
1707	struct sk_buff *skb;
1708	int err;
1709
1710	BT_DBG("sock %p sk %p", sock, sk);
1711
1712	if (msg->msg_flags & MSG_OOB)
1713		return -EOPNOTSUPP;
1714
1715	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1716			       MSG_CMSG_COMPAT))
1717		return -EINVAL;
1718
1719	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1720		return -EINVAL;
1721
1722	lock_sock(sk);
1723
1724	switch (hci_pi(sk)->channel) {
1725	case HCI_CHANNEL_RAW:
1726	case HCI_CHANNEL_USER:
1727		break;
1728	case HCI_CHANNEL_MONITOR:
1729		err = -EOPNOTSUPP;
1730		goto done;
1731	case HCI_CHANNEL_LOGGING:
1732		err = hci_logging_frame(sk, msg, len);
1733		goto done;
1734	default:
1735		mutex_lock(&mgmt_chan_list_lock);
1736		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1737		if (chan)
1738			err = hci_mgmt_cmd(chan, sk, msg, len);
1739		else
1740			err = -EINVAL;
1741
1742		mutex_unlock(&mgmt_chan_list_lock);
1743		goto done;
1744	}
1745
1746	hdev = hci_pi(sk)->hdev;
1747	if (!hdev) {
1748		err = -EBADFD;
1749		goto done;
1750	}
1751
1752	if (!test_bit(HCI_UP, &hdev->flags)) {
1753		err = -ENETDOWN;
1754		goto done;
1755	}
1756
1757	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1758	if (!skb)
1759		goto done;
1760
1761	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1762		err = -EFAULT;
1763		goto drop;
1764	}
1765
1766	hci_skb_pkt_type(skb) = skb->data[0];
1767	skb_pull(skb, 1);
1768
1769	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1770		/* No permission check is needed for user channel
1771		 * since that gets enforced when binding the socket.
1772		 *
1773		 * However check that the packet type is valid.
1774		 */
1775		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1776		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1777		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1778		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1779			err = -EINVAL;
1780			goto drop;
1781		}
1782
1783		skb_queue_tail(&hdev->raw_q, skb);
1784		queue_work(hdev->workqueue, &hdev->tx_work);
1785	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1786		u16 opcode = get_unaligned_le16(skb->data);
1787		u16 ogf = hci_opcode_ogf(opcode);
1788		u16 ocf = hci_opcode_ocf(opcode);
1789
1790		if (((ogf > HCI_SFLT_MAX_OGF) ||
1791		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1792				   &hci_sec_filter.ocf_mask[ogf])) &&
1793		    !capable(CAP_NET_RAW)) {
1794			err = -EPERM;
1795			goto drop;
1796		}
1797
1798		/* Since the opcode has already been extracted here, store
1799		 * a copy of the value for later use by the drivers.
1800		 */
1801		hci_skb_opcode(skb) = opcode;
1802
1803		if (ogf == 0x3f) {
1804			skb_queue_tail(&hdev->raw_q, skb);
1805			queue_work(hdev->workqueue, &hdev->tx_work);
1806		} else {
1807			/* Stand-alone HCI commands must be flagged as
1808			 * single-command requests.
1809			 */
1810			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1811
1812			skb_queue_tail(&hdev->cmd_q, skb);
1813			queue_work(hdev->workqueue, &hdev->cmd_work);
1814		}
1815	} else {
1816		if (!capable(CAP_NET_RAW)) {
1817			err = -EPERM;
1818			goto drop;
1819		}
1820
1821		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1822		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1823		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1824			err = -EINVAL;
1825			goto drop;
1826		}
1827
1828		skb_queue_tail(&hdev->raw_q, skb);
1829		queue_work(hdev->workqueue, &hdev->tx_work);
1830	}
1831
1832	err = len;
1833
1834done:
1835	release_sock(sk);
1836	return err;
1837
1838drop:
1839	kfree_skb(skb);
1840	goto done;
1841}
1842
1843static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1844			       sockptr_t optval, unsigned int len)
1845{
1846	struct hci_ufilter uf = { .opcode = 0 };
1847	struct sock *sk = sock->sk;
1848	int err = 0, opt = 0;
1849
1850	BT_DBG("sk %p, opt %d", sk, optname);
1851
1852	if (level != SOL_HCI)
1853		return -ENOPROTOOPT;
1854
1855	lock_sock(sk);
1856
1857	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1858		err = -EBADFD;
1859		goto done;
1860	}
1861
1862	switch (optname) {
1863	case HCI_DATA_DIR:
1864		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1865			err = -EFAULT;
1866			break;
1867		}
1868
1869		if (opt)
1870			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1871		else
1872			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1873		break;
1874
1875	case HCI_TIME_STAMP:
1876		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1877			err = -EFAULT;
1878			break;
1879		}
1880
1881		if (opt)
1882			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1883		else
1884			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1885		break;
1886
1887	case HCI_FILTER:
1888		{
1889			struct hci_filter *f = &hci_pi(sk)->filter;
1890
1891			uf.type_mask = f->type_mask;
1892			uf.opcode    = f->opcode;
1893			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1894			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1895		}
1896
1897		len = min_t(unsigned int, len, sizeof(uf));
1898		if (copy_from_sockptr(&uf, optval, len)) {
1899			err = -EFAULT;
1900			break;
1901		}
1902
1903		if (!capable(CAP_NET_RAW)) {
1904			uf.type_mask &= hci_sec_filter.type_mask;
1905			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1906			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1907		}
1908
1909		{
1910			struct hci_filter *f = &hci_pi(sk)->filter;
1911
1912			f->type_mask = uf.type_mask;
1913			f->opcode    = uf.opcode;
1914			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1915			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1916		}
1917		break;
1918
1919	default:
1920		err = -ENOPROTOOPT;
1921		break;
1922	}
1923
1924done:
1925	release_sock(sk);
1926	return err;
1927}
1928
1929static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1930			       char __user *optval, int __user *optlen)
1931{
1932	struct hci_ufilter uf;
1933	struct sock *sk = sock->sk;
1934	int len, opt, err = 0;
1935
1936	BT_DBG("sk %p, opt %d", sk, optname);
1937
1938	if (level != SOL_HCI)
1939		return -ENOPROTOOPT;
1940
1941	if (get_user(len, optlen))
1942		return -EFAULT;
1943
1944	lock_sock(sk);
1945
1946	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1947		err = -EBADFD;
1948		goto done;
1949	}
1950
1951	switch (optname) {
1952	case HCI_DATA_DIR:
1953		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1954			opt = 1;
1955		else
1956			opt = 0;
1957
1958		if (put_user(opt, optval))
1959			err = -EFAULT;
1960		break;
1961
1962	case HCI_TIME_STAMP:
1963		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1964			opt = 1;
1965		else
1966			opt = 0;
1967
1968		if (put_user(opt, optval))
1969			err = -EFAULT;
1970		break;
1971
1972	case HCI_FILTER:
1973		{
1974			struct hci_filter *f = &hci_pi(sk)->filter;
1975
1976			memset(&uf, 0, sizeof(uf));
1977			uf.type_mask = f->type_mask;
1978			uf.opcode    = f->opcode;
1979			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1980			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1981		}
1982
1983		len = min_t(unsigned int, len, sizeof(uf));
1984		if (copy_to_user(optval, &uf, len))
1985			err = -EFAULT;
1986		break;
1987
1988	default:
1989		err = -ENOPROTOOPT;
1990		break;
1991	}
1992
1993done:
1994	release_sock(sk);
1995	return err;
1996}
1997
1998static const struct proto_ops hci_sock_ops = {
1999	.family		= PF_BLUETOOTH,
2000	.owner		= THIS_MODULE,
2001	.release	= hci_sock_release,
2002	.bind		= hci_sock_bind,
2003	.getname	= hci_sock_getname,
2004	.sendmsg	= hci_sock_sendmsg,
2005	.recvmsg	= hci_sock_recvmsg,
2006	.ioctl		= hci_sock_ioctl,
2007#ifdef CONFIG_COMPAT
2008	.compat_ioctl	= hci_sock_compat_ioctl,
2009#endif
2010	.poll		= datagram_poll,
2011	.listen		= sock_no_listen,
2012	.shutdown	= sock_no_shutdown,
2013	.setsockopt	= hci_sock_setsockopt,
2014	.getsockopt	= hci_sock_getsockopt,
2015	.connect	= sock_no_connect,
2016	.socketpair	= sock_no_socketpair,
2017	.accept		= sock_no_accept,
2018	.mmap		= sock_no_mmap
2019};
2020
2021static struct proto hci_sk_proto = {
2022	.name		= "HCI",
2023	.owner		= THIS_MODULE,
2024	.obj_size	= sizeof(struct hci_pinfo)
2025};
2026
2027static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2028			   int kern)
2029{
2030	struct sock *sk;
2031
2032	BT_DBG("sock %p", sock);
2033
2034	if (sock->type != SOCK_RAW)
2035		return -ESOCKTNOSUPPORT;
2036
2037	sock->ops = &hci_sock_ops;
2038
2039	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2040	if (!sk)
2041		return -ENOMEM;
2042
2043	sock_init_data(sock, sk);
2044
2045	sock_reset_flag(sk, SOCK_ZAPPED);
2046
2047	sk->sk_protocol = protocol;
2048
2049	sock->state = SS_UNCONNECTED;
2050	sk->sk_state = BT_OPEN;
2051
2052	bt_sock_link(&hci_sk_list, sk);
2053	return 0;
2054}
2055
2056static const struct net_proto_family hci_sock_family_ops = {
2057	.family	= PF_BLUETOOTH,
2058	.owner	= THIS_MODULE,
2059	.create	= hci_sock_create,
2060};
2061
2062int __init hci_sock_init(void)
2063{
2064	int err;
2065
2066	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2067
2068	err = proto_register(&hci_sk_proto, 0);
2069	if (err < 0)
2070		return err;
2071
2072	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2073	if (err < 0) {
2074		BT_ERR("HCI socket registration failed");
2075		goto error;
2076	}
2077
2078	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2079	if (err < 0) {
2080		BT_ERR("Failed to create HCI proc file");
2081		bt_sock_unregister(BTPROTO_HCI);
2082		goto error;
2083	}
2084
2085	BT_INFO("HCI socket layer initialized");
2086
2087	return 0;
2088
2089error:
2090	proto_unregister(&hci_sk_proto);
2091	return err;
2092}
2093
2094void hci_sock_cleanup(void)
2095{
2096	bt_procfs_cleanup(&init_net, "hci");
2097	bt_sock_unregister(BTPROTO_HCI);
2098	proto_unregister(&hci_sk_proto);
2099}