Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
 
  29#include <asm/unaligned.h>
  30
  31#include <net/bluetooth/bluetooth.h>
  32#include <net/bluetooth/hci_core.h>
  33#include <net/bluetooth/hci_mon.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "mgmt_util.h"
  37
  38static LIST_HEAD(mgmt_chan_list);
  39static DEFINE_MUTEX(mgmt_chan_list_lock);
  40
 
 
  41static atomic_t monitor_promisc = ATOMIC_INIT(0);
  42
  43/* ----- HCI socket interface ----- */
  44
  45/* Socket info */
  46#define hci_pi(sk) ((struct hci_pinfo *) sk)
  47
  48struct hci_pinfo {
  49	struct bt_sock    bt;
  50	struct hci_dev    *hdev;
  51	struct hci_filter filter;
  52	__u32             cmsg_mask;
  53	unsigned short    channel;
  54	unsigned long     flags;
 
 
  55};
  56
  57void hci_sock_set_flag(struct sock *sk, int nr)
  58{
  59	set_bit(nr, &hci_pi(sk)->flags);
  60}
  61
  62void hci_sock_clear_flag(struct sock *sk, int nr)
  63{
  64	clear_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67int hci_sock_test_flag(struct sock *sk, int nr)
  68{
  69	return test_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72unsigned short hci_sock_get_channel(struct sock *sk)
  73{
  74	return hci_pi(sk)->channel;
  75}
  76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77static inline int hci_test_bit(int nr, const void *addr)
  78{
  79	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
  80}
  81
  82/* Security filter */
  83#define HCI_SFLT_MAX_OGF  5
  84
  85struct hci_sec_filter {
  86	__u32 type_mask;
  87	__u32 event_mask[2];
  88	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
  89};
  90
  91static const struct hci_sec_filter hci_sec_filter = {
  92	/* Packet types */
  93	0x10,
  94	/* Events */
  95	{ 0x1000d9fe, 0x0000b00c },
  96	/* Commands */
  97	{
  98		{ 0x0 },
  99		/* OGF_LINK_CTL */
 100		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 101		/* OGF_LINK_POLICY */
 102		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 103		/* OGF_HOST_CTL */
 104		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 105		/* OGF_INFO_PARAM */
 106		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 107		/* OGF_STATUS_PARAM */
 108		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 109	}
 110};
 111
 112static struct bt_sock_list hci_sk_list = {
 113	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 114};
 115
 116static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 117{
 118	struct hci_filter *flt;
 119	int flt_type, flt_event;
 120
 121	/* Apply filter */
 122	flt = &hci_pi(sk)->filter;
 123
 124	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 125
 126	if (!test_bit(flt_type, &flt->type_mask))
 127		return true;
 128
 129	/* Extra filter for event packets only */
 130	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 131		return false;
 132
 133	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 134
 135	if (!hci_test_bit(flt_event, &flt->event_mask))
 136		return true;
 137
 138	/* Check filter only when opcode is set */
 139	if (!flt->opcode)
 140		return false;
 141
 142	if (flt_event == HCI_EV_CMD_COMPLETE &&
 143	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 144		return true;
 145
 146	if (flt_event == HCI_EV_CMD_STATUS &&
 147	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 148		return true;
 149
 150	return false;
 151}
 152
 153/* Send frame to RAW socket */
 154void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 155{
 156	struct sock *sk;
 157	struct sk_buff *skb_copy = NULL;
 158
 159	BT_DBG("hdev %p len %d", hdev, skb->len);
 160
 161	read_lock(&hci_sk_list.lock);
 162
 163	sk_for_each(sk, &hci_sk_list.head) {
 164		struct sk_buff *nskb;
 165
 166		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 167			continue;
 168
 169		/* Don't send frame to the socket it came from */
 170		if (skb->sk == sk)
 171			continue;
 172
 173		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 174			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 175			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 176			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 177			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 178				continue;
 179			if (is_filtered_packet(sk, skb))
 180				continue;
 181		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 182			if (!bt_cb(skb)->incoming)
 183				continue;
 184			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 185			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 186			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 187				continue;
 188		} else {
 189			/* Don't send frame to other channel types */
 190			continue;
 191		}
 192
 193		if (!skb_copy) {
 194			/* Create a private copy with headroom */
 195			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 196			if (!skb_copy)
 197				continue;
 198
 199			/* Put type byte before the data */
 200			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 201		}
 202
 203		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 204		if (!nskb)
 205			continue;
 206
 207		if (sock_queue_rcv_skb(sk, nskb))
 208			kfree_skb(nskb);
 209	}
 210
 211	read_unlock(&hci_sk_list.lock);
 212
 213	kfree_skb(skb_copy);
 214}
 215
 216/* Send frame to sockets with specific channel */
 217void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 218			 int flag, struct sock *skip_sk)
 219{
 220	struct sock *sk;
 221
 222	BT_DBG("channel %u len %d", channel, skb->len);
 223
 224	read_lock(&hci_sk_list.lock);
 225
 226	sk_for_each(sk, &hci_sk_list.head) {
 227		struct sk_buff *nskb;
 228
 229		/* Ignore socket without the flag set */
 230		if (!hci_sock_test_flag(sk, flag))
 231			continue;
 232
 233		/* Skip the original socket */
 234		if (sk == skip_sk)
 235			continue;
 236
 237		if (sk->sk_state != BT_BOUND)
 238			continue;
 239
 240		if (hci_pi(sk)->channel != channel)
 241			continue;
 242
 243		nskb = skb_clone(skb, GFP_ATOMIC);
 244		if (!nskb)
 245			continue;
 246
 247		if (sock_queue_rcv_skb(sk, nskb))
 248			kfree_skb(nskb);
 249	}
 250
 
 
 
 
 
 
 
 251	read_unlock(&hci_sk_list.lock);
 252}
 253
 254/* Send frame to monitor socket */
 255void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 256{
 257	struct sk_buff *skb_copy = NULL;
 258	struct hci_mon_hdr *hdr;
 259	__le16 opcode;
 260
 261	if (!atomic_read(&monitor_promisc))
 262		return;
 263
 264	BT_DBG("hdev %p len %d", hdev, skb->len);
 265
 266	switch (hci_skb_pkt_type(skb)) {
 267	case HCI_COMMAND_PKT:
 268		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 269		break;
 270	case HCI_EVENT_PKT:
 271		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 272		break;
 273	case HCI_ACLDATA_PKT:
 274		if (bt_cb(skb)->incoming)
 275			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 276		else
 277			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 278		break;
 279	case HCI_SCODATA_PKT:
 280		if (bt_cb(skb)->incoming)
 281			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 282		else
 283			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 284		break;
 285	case HCI_DIAG_PKT:
 286		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 287		break;
 288	default:
 289		return;
 290	}
 291
 292	/* Create a private copy with headroom */
 293	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 294	if (!skb_copy)
 295		return;
 296
 297	/* Put header before the data */
 298	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 299	hdr->opcode = opcode;
 300	hdr->index = cpu_to_le16(hdev->id);
 301	hdr->len = cpu_to_le16(skb->len);
 302
 303	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 304			    HCI_SOCK_TRUSTED, NULL);
 305	kfree_skb(skb_copy);
 306}
 307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 309{
 310	struct hci_mon_hdr *hdr;
 311	struct hci_mon_new_index *ni;
 312	struct hci_mon_index_info *ii;
 313	struct sk_buff *skb;
 314	__le16 opcode;
 315
 316	switch (event) {
 317	case HCI_DEV_REG:
 318		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 319		if (!skb)
 320			return NULL;
 321
 322		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 323		ni->type = hdev->dev_type;
 324		ni->bus = hdev->bus;
 325		bacpy(&ni->bdaddr, &hdev->bdaddr);
 326		memcpy(ni->name, hdev->name, 8);
 327
 328		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 329		break;
 330
 331	case HCI_DEV_UNREG:
 332		skb = bt_skb_alloc(0, GFP_ATOMIC);
 333		if (!skb)
 334			return NULL;
 335
 336		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 337		break;
 338
 339	case HCI_DEV_SETUP:
 340		if (hdev->manufacturer == 0xffff)
 341			return NULL;
 342
 343		/* fall through */
 344
 345	case HCI_DEV_UP:
 346		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 347		if (!skb)
 348			return NULL;
 349
 350		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 351		bacpy(&ii->bdaddr, &hdev->bdaddr);
 352		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 353
 354		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 355		break;
 356
 357	case HCI_DEV_OPEN:
 358		skb = bt_skb_alloc(0, GFP_ATOMIC);
 359		if (!skb)
 360			return NULL;
 361
 362		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 363		break;
 364
 365	case HCI_DEV_CLOSE:
 366		skb = bt_skb_alloc(0, GFP_ATOMIC);
 367		if (!skb)
 368			return NULL;
 369
 370		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 371		break;
 372
 373	default:
 374		return NULL;
 375	}
 376
 377	__net_timestamp(skb);
 378
 379	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 380	hdr->opcode = opcode;
 381	hdr->index = cpu_to_le16(hdev->id);
 382	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 383
 384	return skb;
 385}
 386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387static void __printf(2, 3)
 388send_monitor_note(struct sock *sk, const char *fmt, ...)
 389{
 390	size_t len;
 391	struct hci_mon_hdr *hdr;
 392	struct sk_buff *skb;
 393	va_list args;
 394
 395	va_start(args, fmt);
 396	len = vsnprintf(NULL, 0, fmt, args);
 397	va_end(args);
 398
 399	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 400	if (!skb)
 401		return;
 402
 403	va_start(args, fmt);
 404	vsprintf(skb_put(skb, len), fmt, args);
 405	*skb_put(skb, 1) = 0;
 406	va_end(args);
 407
 408	__net_timestamp(skb);
 409
 410	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 411	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 412	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 413	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 414
 415	if (sock_queue_rcv_skb(sk, skb))
 416		kfree_skb(skb);
 417}
 418
 419static void send_monitor_replay(struct sock *sk)
 420{
 421	struct hci_dev *hdev;
 422
 423	read_lock(&hci_dev_list_lock);
 424
 425	list_for_each_entry(hdev, &hci_dev_list, list) {
 426		struct sk_buff *skb;
 427
 428		skb = create_monitor_event(hdev, HCI_DEV_REG);
 429		if (!skb)
 430			continue;
 431
 432		if (sock_queue_rcv_skb(sk, skb))
 433			kfree_skb(skb);
 434
 435		if (!test_bit(HCI_RUNNING, &hdev->flags))
 436			continue;
 437
 438		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 439		if (!skb)
 440			continue;
 441
 442		if (sock_queue_rcv_skb(sk, skb))
 443			kfree_skb(skb);
 444
 445		if (test_bit(HCI_UP, &hdev->flags))
 446			skb = create_monitor_event(hdev, HCI_DEV_UP);
 447		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 448			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 449		else
 450			skb = NULL;
 451
 452		if (skb) {
 453			if (sock_queue_rcv_skb(sk, skb))
 454				kfree_skb(skb);
 455		}
 456	}
 457
 458	read_unlock(&hci_dev_list_lock);
 459}
 460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461/* Generate internal stack event */
 462static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 463{
 464	struct hci_event_hdr *hdr;
 465	struct hci_ev_stack_internal *ev;
 466	struct sk_buff *skb;
 467
 468	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 469	if (!skb)
 470		return;
 471
 472	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 473	hdr->evt  = HCI_EV_STACK_INTERNAL;
 474	hdr->plen = sizeof(*ev) + dlen;
 475
 476	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 477	ev->type = type;
 478	memcpy(ev->data, data, dlen);
 479
 480	bt_cb(skb)->incoming = 1;
 481	__net_timestamp(skb);
 482
 483	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 484	hci_send_to_sock(hdev, skb);
 485	kfree_skb(skb);
 486}
 487
 488void hci_sock_dev_event(struct hci_dev *hdev, int event)
 489{
 490	BT_DBG("hdev %s event %d", hdev->name, event);
 491
 492	if (atomic_read(&monitor_promisc)) {
 493		struct sk_buff *skb;
 494
 495		/* Send event to monitor */
 496		skb = create_monitor_event(hdev, event);
 497		if (skb) {
 498			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 499					    HCI_SOCK_TRUSTED, NULL);
 500			kfree_skb(skb);
 501		}
 502	}
 503
 504	if (event <= HCI_DEV_DOWN) {
 505		struct hci_ev_si_device ev;
 506
 507		/* Send event to sockets */
 508		ev.event  = event;
 509		ev.dev_id = hdev->id;
 510		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 511	}
 512
 513	if (event == HCI_DEV_UNREG) {
 514		struct sock *sk;
 515
 516		/* Detach sockets from device */
 517		read_lock(&hci_sk_list.lock);
 518		sk_for_each(sk, &hci_sk_list.head) {
 519			bh_lock_sock_nested(sk);
 520			if (hci_pi(sk)->hdev == hdev) {
 521				hci_pi(sk)->hdev = NULL;
 522				sk->sk_err = EPIPE;
 523				sk->sk_state = BT_OPEN;
 524				sk->sk_state_change(sk);
 525
 526				hci_dev_put(hdev);
 527			}
 528			bh_unlock_sock(sk);
 529		}
 530		read_unlock(&hci_sk_list.lock);
 531	}
 532}
 533
 534static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 535{
 536	struct hci_mgmt_chan *c;
 537
 538	list_for_each_entry(c, &mgmt_chan_list, list) {
 539		if (c->channel == channel)
 540			return c;
 541	}
 542
 543	return NULL;
 544}
 545
 546static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 547{
 548	struct hci_mgmt_chan *c;
 549
 550	mutex_lock(&mgmt_chan_list_lock);
 551	c = __hci_mgmt_chan_find(channel);
 552	mutex_unlock(&mgmt_chan_list_lock);
 553
 554	return c;
 555}
 556
 557int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 558{
 559	if (c->channel < HCI_CHANNEL_CONTROL)
 560		return -EINVAL;
 561
 562	mutex_lock(&mgmt_chan_list_lock);
 563	if (__hci_mgmt_chan_find(c->channel)) {
 564		mutex_unlock(&mgmt_chan_list_lock);
 565		return -EALREADY;
 566	}
 567
 568	list_add_tail(&c->list, &mgmt_chan_list);
 569
 570	mutex_unlock(&mgmt_chan_list_lock);
 571
 572	return 0;
 573}
 574EXPORT_SYMBOL(hci_mgmt_chan_register);
 575
 576void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 577{
 578	mutex_lock(&mgmt_chan_list_lock);
 579	list_del(&c->list);
 580	mutex_unlock(&mgmt_chan_list_lock);
 581}
 582EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 583
 584static int hci_sock_release(struct socket *sock)
 585{
 586	struct sock *sk = sock->sk;
 587	struct hci_dev *hdev;
 
 588
 589	BT_DBG("sock %p sk %p", sock, sk);
 590
 591	if (!sk)
 592		return 0;
 593
 594	hdev = hci_pi(sk)->hdev;
 595
 596	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
 
 597		atomic_dec(&monitor_promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598
 599	bt_sock_unlink(&hci_sk_list, sk);
 600
 601	if (hdev) {
 602		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 603			/* When releasing an user channel exclusive access,
 604			 * call hci_dev_do_close directly instead of calling
 605			 * hci_dev_close to ensure the exclusive access will
 606			 * be released and the controller brought back down.
 607			 *
 608			 * The checking of HCI_AUTO_OFF is not needed in this
 609			 * case since it will have been cleared already when
 610			 * opening the user channel.
 611			 */
 612			hci_dev_do_close(hdev);
 613			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 614			mgmt_index_added(hdev);
 615		}
 616
 617		atomic_dec(&hdev->promisc);
 618		hci_dev_put(hdev);
 619	}
 620
 621	sock_orphan(sk);
 622
 623	skb_queue_purge(&sk->sk_receive_queue);
 624	skb_queue_purge(&sk->sk_write_queue);
 625
 626	sock_put(sk);
 627	return 0;
 628}
 629
 630static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 631{
 632	bdaddr_t bdaddr;
 633	int err;
 634
 635	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 636		return -EFAULT;
 637
 638	hci_dev_lock(hdev);
 639
 640	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 641
 642	hci_dev_unlock(hdev);
 643
 644	return err;
 645}
 646
 647static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 648{
 649	bdaddr_t bdaddr;
 650	int err;
 651
 652	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 653		return -EFAULT;
 654
 655	hci_dev_lock(hdev);
 656
 657	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 658
 659	hci_dev_unlock(hdev);
 660
 661	return err;
 662}
 663
 664/* Ioctls that require bound socket */
 665static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 666				unsigned long arg)
 667{
 668	struct hci_dev *hdev = hci_pi(sk)->hdev;
 669
 670	if (!hdev)
 671		return -EBADFD;
 672
 673	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 674		return -EBUSY;
 675
 676	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 677		return -EOPNOTSUPP;
 678
 679	if (hdev->dev_type != HCI_BREDR)
 680		return -EOPNOTSUPP;
 681
 682	switch (cmd) {
 683	case HCISETRAW:
 684		if (!capable(CAP_NET_ADMIN))
 685			return -EPERM;
 686		return -EOPNOTSUPP;
 687
 688	case HCIGETCONNINFO:
 689		return hci_get_conn_info(hdev, (void __user *)arg);
 690
 691	case HCIGETAUTHINFO:
 692		return hci_get_auth_info(hdev, (void __user *)arg);
 693
 694	case HCIBLOCKADDR:
 695		if (!capable(CAP_NET_ADMIN))
 696			return -EPERM;
 697		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 698
 699	case HCIUNBLOCKADDR:
 700		if (!capable(CAP_NET_ADMIN))
 701			return -EPERM;
 702		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 703	}
 704
 705	return -ENOIOCTLCMD;
 706}
 707
 708static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 709			  unsigned long arg)
 710{
 711	void __user *argp = (void __user *)arg;
 712	struct sock *sk = sock->sk;
 713	int err;
 714
 715	BT_DBG("cmd %x arg %lx", cmd, arg);
 716
 717	lock_sock(sk);
 718
 719	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 720		err = -EBADFD;
 721		goto done;
 722	}
 723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724	release_sock(sk);
 725
 726	switch (cmd) {
 727	case HCIGETDEVLIST:
 728		return hci_get_dev_list(argp);
 729
 730	case HCIGETDEVINFO:
 731		return hci_get_dev_info(argp);
 732
 733	case HCIGETCONNLIST:
 734		return hci_get_conn_list(argp);
 735
 736	case HCIDEVUP:
 737		if (!capable(CAP_NET_ADMIN))
 738			return -EPERM;
 739		return hci_dev_open(arg);
 740
 741	case HCIDEVDOWN:
 742		if (!capable(CAP_NET_ADMIN))
 743			return -EPERM;
 744		return hci_dev_close(arg);
 745
 746	case HCIDEVRESET:
 747		if (!capable(CAP_NET_ADMIN))
 748			return -EPERM;
 749		return hci_dev_reset(arg);
 750
 751	case HCIDEVRESTAT:
 752		if (!capable(CAP_NET_ADMIN))
 753			return -EPERM;
 754		return hci_dev_reset_stat(arg);
 755
 756	case HCISETSCAN:
 757	case HCISETAUTH:
 758	case HCISETENCRYPT:
 759	case HCISETPTYPE:
 760	case HCISETLINKPOL:
 761	case HCISETLINKMODE:
 762	case HCISETACLMTU:
 763	case HCISETSCOMTU:
 764		if (!capable(CAP_NET_ADMIN))
 765			return -EPERM;
 766		return hci_dev_cmd(cmd, argp);
 767
 768	case HCIINQUIRY:
 769		return hci_inquiry(argp);
 770	}
 771
 772	lock_sock(sk);
 773
 774	err = hci_sock_bound_ioctl(sk, cmd, arg);
 775
 776done:
 777	release_sock(sk);
 778	return err;
 779}
 780
 781static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 782			 int addr_len)
 783{
 784	struct sockaddr_hci haddr;
 785	struct sock *sk = sock->sk;
 786	struct hci_dev *hdev = NULL;
 
 787	int len, err = 0;
 788
 789	BT_DBG("sock %p sk %p", sock, sk);
 790
 791	if (!addr)
 792		return -EINVAL;
 793
 794	memset(&haddr, 0, sizeof(haddr));
 795	len = min_t(unsigned int, sizeof(haddr), addr_len);
 796	memcpy(&haddr, addr, len);
 797
 798	if (haddr.hci_family != AF_BLUETOOTH)
 799		return -EINVAL;
 800
 801	lock_sock(sk);
 802
 803	if (sk->sk_state == BT_BOUND) {
 804		err = -EALREADY;
 805		goto done;
 806	}
 807
 808	switch (haddr.hci_channel) {
 809	case HCI_CHANNEL_RAW:
 810		if (hci_pi(sk)->hdev) {
 811			err = -EALREADY;
 812			goto done;
 813		}
 814
 815		if (haddr.hci_dev != HCI_DEV_NONE) {
 816			hdev = hci_dev_get(haddr.hci_dev);
 817			if (!hdev) {
 818				err = -ENODEV;
 819				goto done;
 820			}
 821
 822			atomic_inc(&hdev->promisc);
 823		}
 824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 826		break;
 827
 828	case HCI_CHANNEL_USER:
 829		if (hci_pi(sk)->hdev) {
 830			err = -EALREADY;
 831			goto done;
 832		}
 833
 834		if (haddr.hci_dev == HCI_DEV_NONE) {
 835			err = -EINVAL;
 836			goto done;
 837		}
 838
 839		if (!capable(CAP_NET_ADMIN)) {
 840			err = -EPERM;
 841			goto done;
 842		}
 843
 844		hdev = hci_dev_get(haddr.hci_dev);
 845		if (!hdev) {
 846			err = -ENODEV;
 847			goto done;
 848		}
 849
 850		if (test_bit(HCI_INIT, &hdev->flags) ||
 851		    hci_dev_test_flag(hdev, HCI_SETUP) ||
 852		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 853		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
 854		     test_bit(HCI_UP, &hdev->flags))) {
 855			err = -EBUSY;
 856			hci_dev_put(hdev);
 857			goto done;
 858		}
 859
 860		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
 861			err = -EUSERS;
 862			hci_dev_put(hdev);
 863			goto done;
 864		}
 865
 866		mgmt_index_removed(hdev);
 867
 868		err = hci_dev_open(hdev->id);
 869		if (err) {
 870			if (err == -EALREADY) {
 871				/* In case the transport is already up and
 872				 * running, clear the error here.
 873				 *
 874				 * This can happen when opening an user
 875				 * channel and HCI_AUTO_OFF grace period
 876				 * is still active.
 877				 */
 878				err = 0;
 879			} else {
 880				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 881				mgmt_index_added(hdev);
 882				hci_dev_put(hdev);
 883				goto done;
 884			}
 885		}
 886
 887		atomic_inc(&hdev->promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 888
 889		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 
 
 890		break;
 891
 892	case HCI_CHANNEL_MONITOR:
 893		if (haddr.hci_dev != HCI_DEV_NONE) {
 894			err = -EINVAL;
 895			goto done;
 896		}
 897
 898		if (!capable(CAP_NET_RAW)) {
 899			err = -EPERM;
 900			goto done;
 901		}
 902
 
 
 903		/* The monitor interface is restricted to CAP_NET_RAW
 904		 * capabilities and with that implicitly trusted.
 905		 */
 906		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 907
 908		send_monitor_note(sk, "Linux version %s (%s)",
 909				  init_utsname()->release,
 910				  init_utsname()->machine);
 911		send_monitor_note(sk, "Bluetooth subsystem version %s",
 912				  BT_SUBSYS_VERSION);
 913		send_monitor_replay(sk);
 
 914
 915		atomic_inc(&monitor_promisc);
 916		break;
 917
 918	case HCI_CHANNEL_LOGGING:
 919		if (haddr.hci_dev != HCI_DEV_NONE) {
 920			err = -EINVAL;
 921			goto done;
 922		}
 923
 924		if (!capable(CAP_NET_ADMIN)) {
 925			err = -EPERM;
 926			goto done;
 927		}
 
 
 928		break;
 929
 930	default:
 931		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
 932			err = -EINVAL;
 933			goto done;
 934		}
 935
 936		if (haddr.hci_dev != HCI_DEV_NONE) {
 937			err = -EINVAL;
 938			goto done;
 939		}
 940
 941		/* Users with CAP_NET_ADMIN capabilities are allowed
 942		 * access to all management commands and events. For
 943		 * untrusted users the interface is restricted and
 944		 * also only untrusted events are sent.
 945		 */
 946		if (capable(CAP_NET_ADMIN))
 947			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 948
 
 
 949		/* At the moment the index and unconfigured index events
 950		 * are enabled unconditionally. Setting them on each
 951		 * socket when binding keeps this functionality. They
 952		 * however might be cleared later and then sending of these
 953		 * events will be disabled, but that is then intentional.
 954		 *
 955		 * This also enables generic events that are safe to be
 956		 * received by untrusted users. Example for such events
 957		 * are changes to settings, class of device, name etc.
 958		 */
 959		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
 961			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
 962			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
 
 
 
 963		}
 964		break;
 965	}
 966
 967
 968	hci_pi(sk)->channel = haddr.hci_channel;
 969	sk->sk_state = BT_BOUND;
 970
 971done:
 972	release_sock(sk);
 973	return err;
 974}
 975
 976static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 977			    int *addr_len, int peer)
 978{
 979	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
 980	struct sock *sk = sock->sk;
 981	struct hci_dev *hdev;
 982	int err = 0;
 983
 984	BT_DBG("sock %p sk %p", sock, sk);
 985
 986	if (peer)
 987		return -EOPNOTSUPP;
 988
 989	lock_sock(sk);
 990
 991	hdev = hci_pi(sk)->hdev;
 992	if (!hdev) {
 993		err = -EBADFD;
 994		goto done;
 995	}
 996
 997	*addr_len = sizeof(*haddr);
 998	haddr->hci_family = AF_BLUETOOTH;
 999	haddr->hci_dev    = hdev->id;
1000	haddr->hci_channel= hci_pi(sk)->channel;
 
1001
1002done:
1003	release_sock(sk);
1004	return err;
1005}
1006
1007static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1008			  struct sk_buff *skb)
1009{
1010	__u32 mask = hci_pi(sk)->cmsg_mask;
1011
1012	if (mask & HCI_CMSG_DIR) {
1013		int incoming = bt_cb(skb)->incoming;
1014		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1015			 &incoming);
1016	}
1017
1018	if (mask & HCI_CMSG_TSTAMP) {
1019#ifdef CONFIG_COMPAT
1020		struct compat_timeval ctv;
1021#endif
1022		struct timeval tv;
1023		void *data;
1024		int len;
1025
1026		skb_get_timestamp(skb, &tv);
1027
1028		data = &tv;
1029		len = sizeof(tv);
1030#ifdef CONFIG_COMPAT
1031		if (!COMPAT_USE_64BIT_TIME &&
1032		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1033			ctv.tv_sec = tv.tv_sec;
1034			ctv.tv_usec = tv.tv_usec;
1035			data = &ctv;
1036			len = sizeof(ctv);
1037		}
1038#endif
1039
1040		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1041	}
1042}
1043
1044static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1045			    size_t len, int flags)
1046{
1047	int noblock = flags & MSG_DONTWAIT;
1048	struct sock *sk = sock->sk;
1049	struct sk_buff *skb;
1050	int copied, err;
 
1051
1052	BT_DBG("sock %p, sk %p", sock, sk);
1053
1054	if (flags & MSG_OOB)
1055		return -EOPNOTSUPP;
1056
1057	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1058		return -EOPNOTSUPP;
1059
1060	if (sk->sk_state == BT_CLOSED)
1061		return 0;
1062
1063	skb = skb_recv_datagram(sk, flags, noblock, &err);
1064	if (!skb)
1065		return err;
1066
 
1067	copied = skb->len;
1068	if (len < copied) {
1069		msg->msg_flags |= MSG_TRUNC;
1070		copied = len;
1071	}
1072
1073	skb_reset_transport_header(skb);
1074	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1075
1076	switch (hci_pi(sk)->channel) {
1077	case HCI_CHANNEL_RAW:
1078		hci_sock_cmsg(sk, msg, skb);
1079		break;
1080	case HCI_CHANNEL_USER:
1081	case HCI_CHANNEL_MONITOR:
1082		sock_recv_timestamp(msg, sk, skb);
1083		break;
1084	default:
1085		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1086			sock_recv_timestamp(msg, sk, skb);
1087		break;
1088	}
1089
1090	skb_free_datagram(sk, skb);
1091
 
 
 
1092	return err ? : copied;
1093}
1094
1095static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1096			struct msghdr *msg, size_t msglen)
1097{
1098	void *buf;
1099	u8 *cp;
1100	struct mgmt_hdr *hdr;
1101	u16 opcode, index, len;
1102	struct hci_dev *hdev = NULL;
1103	const struct hci_mgmt_handler *handler;
1104	bool var_len, no_hdev;
1105	int err;
1106
1107	BT_DBG("got %zu bytes", msglen);
1108
1109	if (msglen < sizeof(*hdr))
1110		return -EINVAL;
1111
1112	buf = kmalloc(msglen, GFP_KERNEL);
1113	if (!buf)
1114		return -ENOMEM;
1115
1116	if (memcpy_from_msg(buf, msg, msglen)) {
1117		err = -EFAULT;
1118		goto done;
1119	}
1120
1121	hdr = buf;
1122	opcode = __le16_to_cpu(hdr->opcode);
1123	index = __le16_to_cpu(hdr->index);
1124	len = __le16_to_cpu(hdr->len);
1125
1126	if (len != msglen - sizeof(*hdr)) {
1127		err = -EINVAL;
1128		goto done;
1129	}
1130
 
 
 
 
 
 
 
 
 
 
 
 
 
1131	if (opcode >= chan->handler_count ||
1132	    chan->handlers[opcode].func == NULL) {
1133		BT_DBG("Unknown op %u", opcode);
1134		err = mgmt_cmd_status(sk, index, opcode,
1135				      MGMT_STATUS_UNKNOWN_COMMAND);
1136		goto done;
1137	}
1138
1139	handler = &chan->handlers[opcode];
1140
1141	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1142	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1143		err = mgmt_cmd_status(sk, index, opcode,
1144				      MGMT_STATUS_PERMISSION_DENIED);
1145		goto done;
1146	}
1147
1148	if (index != MGMT_INDEX_NONE) {
1149		hdev = hci_dev_get(index);
1150		if (!hdev) {
1151			err = mgmt_cmd_status(sk, index, opcode,
1152					      MGMT_STATUS_INVALID_INDEX);
1153			goto done;
1154		}
1155
1156		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1157		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1158		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1159			err = mgmt_cmd_status(sk, index, opcode,
1160					      MGMT_STATUS_INVALID_INDEX);
1161			goto done;
1162		}
1163
1164		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1165		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1166			err = mgmt_cmd_status(sk, index, opcode,
1167					      MGMT_STATUS_INVALID_INDEX);
1168			goto done;
1169		}
1170	}
1171
1172	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1173	if (no_hdev != !hdev) {
1174		err = mgmt_cmd_status(sk, index, opcode,
1175				      MGMT_STATUS_INVALID_INDEX);
1176		goto done;
1177	}
1178
1179	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1180	if ((var_len && len < handler->data_len) ||
1181	    (!var_len && len != handler->data_len)) {
1182		err = mgmt_cmd_status(sk, index, opcode,
1183				      MGMT_STATUS_INVALID_PARAMS);
1184		goto done;
1185	}
1186
1187	if (hdev && chan->hdev_init)
1188		chan->hdev_init(sk, hdev);
1189
1190	cp = buf + sizeof(*hdr);
1191
1192	err = handler->func(sk, hdev, cp, len);
1193	if (err < 0)
1194		goto done;
1195
1196	err = msglen;
1197
1198done:
1199	if (hdev)
1200		hci_dev_put(hdev);
1201
1202	kfree(buf);
1203	return err;
1204}
1205
1206static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1207{
1208	struct hci_mon_hdr *hdr;
1209	struct sk_buff *skb;
1210	struct hci_dev *hdev;
1211	u16 index;
1212	int err;
1213
1214	/* The logging frame consists at minimum of the standard header,
1215	 * the priority byte, the ident length byte and at least one string
1216	 * terminator NUL byte. Anything shorter are invalid packets.
1217	 */
1218	if (len < sizeof(*hdr) + 3)
1219		return -EINVAL;
1220
1221	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1222	if (!skb)
1223		return err;
1224
1225	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1226		err = -EFAULT;
1227		goto drop;
1228	}
1229
1230	hdr = (void *)skb->data;
1231
1232	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1233		err = -EINVAL;
1234		goto drop;
1235	}
1236
1237	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1238		__u8 priority = skb->data[sizeof(*hdr)];
1239		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1240
1241		/* Only the priorities 0-7 are valid and with that any other
1242		 * value results in an invalid packet.
1243		 *
1244		 * The priority byte is followed by an ident length byte and
1245		 * the NUL terminated ident string. Check that the ident
1246		 * length is not overflowing the packet and also that the
1247		 * ident string itself is NUL terminated. In case the ident
1248		 * length is zero, the length value actually doubles as NUL
1249		 * terminator identifier.
1250		 *
1251		 * The message follows the ident string (if present) and
1252		 * must be NUL terminated. Otherwise it is not a valid packet.
1253		 */
1254		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1255		    ident_len > len - sizeof(*hdr) - 3 ||
1256		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1257			err = -EINVAL;
1258			goto drop;
1259		}
1260	} else {
1261		err = -EINVAL;
1262		goto drop;
1263	}
1264
1265	index = __le16_to_cpu(hdr->index);
1266
1267	if (index != MGMT_INDEX_NONE) {
1268		hdev = hci_dev_get(index);
1269		if (!hdev) {
1270			err = -ENODEV;
1271			goto drop;
1272		}
1273	} else {
1274		hdev = NULL;
1275	}
1276
1277	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1278
1279	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1280	err = len;
1281
1282	if (hdev)
1283		hci_dev_put(hdev);
1284
1285drop:
1286	kfree_skb(skb);
1287	return err;
1288}
1289
1290static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1291			    size_t len)
1292{
1293	struct sock *sk = sock->sk;
1294	struct hci_mgmt_chan *chan;
1295	struct hci_dev *hdev;
1296	struct sk_buff *skb;
1297	int err;
1298
1299	BT_DBG("sock %p sk %p", sock, sk);
1300
1301	if (msg->msg_flags & MSG_OOB)
1302		return -EOPNOTSUPP;
1303
1304	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
 
1305		return -EINVAL;
1306
1307	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1308		return -EINVAL;
1309
1310	lock_sock(sk);
1311
1312	switch (hci_pi(sk)->channel) {
1313	case HCI_CHANNEL_RAW:
1314	case HCI_CHANNEL_USER:
1315		break;
1316	case HCI_CHANNEL_MONITOR:
1317		err = -EOPNOTSUPP;
1318		goto done;
1319	case HCI_CHANNEL_LOGGING:
1320		err = hci_logging_frame(sk, msg, len);
1321		goto done;
1322	default:
1323		mutex_lock(&mgmt_chan_list_lock);
1324		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1325		if (chan)
1326			err = hci_mgmt_cmd(chan, sk, msg, len);
1327		else
1328			err = -EINVAL;
1329
1330		mutex_unlock(&mgmt_chan_list_lock);
1331		goto done;
1332	}
1333
1334	hdev = hci_pi(sk)->hdev;
1335	if (!hdev) {
1336		err = -EBADFD;
1337		goto done;
1338	}
1339
1340	if (!test_bit(HCI_UP, &hdev->flags)) {
1341		err = -ENETDOWN;
1342		goto done;
1343	}
1344
1345	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1346	if (!skb)
1347		goto done;
1348
1349	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1350		err = -EFAULT;
1351		goto drop;
1352	}
1353
1354	hci_skb_pkt_type(skb) = skb->data[0];
1355	skb_pull(skb, 1);
1356
1357	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1358		/* No permission check is needed for user channel
1359		 * since that gets enforced when binding the socket.
1360		 *
1361		 * However check that the packet type is valid.
1362		 */
1363		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1364		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1365		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1366			err = -EINVAL;
1367			goto drop;
1368		}
1369
1370		skb_queue_tail(&hdev->raw_q, skb);
1371		queue_work(hdev->workqueue, &hdev->tx_work);
1372	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1373		u16 opcode = get_unaligned_le16(skb->data);
1374		u16 ogf = hci_opcode_ogf(opcode);
1375		u16 ocf = hci_opcode_ocf(opcode);
1376
1377		if (((ogf > HCI_SFLT_MAX_OGF) ||
1378		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1379				   &hci_sec_filter.ocf_mask[ogf])) &&
1380		    !capable(CAP_NET_RAW)) {
1381			err = -EPERM;
1382			goto drop;
1383		}
1384
1385		/* Since the opcode has already been extracted here, store
1386		 * a copy of the value for later use by the drivers.
1387		 */
1388		hci_skb_opcode(skb) = opcode;
1389
1390		if (ogf == 0x3f) {
1391			skb_queue_tail(&hdev->raw_q, skb);
1392			queue_work(hdev->workqueue, &hdev->tx_work);
1393		} else {
1394			/* Stand-alone HCI commands must be flagged as
1395			 * single-command requests.
1396			 */
1397			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1398
1399			skb_queue_tail(&hdev->cmd_q, skb);
1400			queue_work(hdev->workqueue, &hdev->cmd_work);
1401		}
1402	} else {
1403		if (!capable(CAP_NET_RAW)) {
1404			err = -EPERM;
1405			goto drop;
1406		}
1407
1408		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1409		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1410			err = -EINVAL;
1411			goto drop;
1412		}
1413
1414		skb_queue_tail(&hdev->raw_q, skb);
1415		queue_work(hdev->workqueue, &hdev->tx_work);
1416	}
1417
1418	err = len;
1419
1420done:
1421	release_sock(sk);
1422	return err;
1423
1424drop:
1425	kfree_skb(skb);
1426	goto done;
1427}
1428
1429static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1430			       char __user *optval, unsigned int len)
1431{
1432	struct hci_ufilter uf = { .opcode = 0 };
1433	struct sock *sk = sock->sk;
1434	int err = 0, opt = 0;
1435
1436	BT_DBG("sk %p, opt %d", sk, optname);
1437
 
 
 
1438	lock_sock(sk);
1439
1440	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1441		err = -EBADFD;
1442		goto done;
1443	}
1444
1445	switch (optname) {
1446	case HCI_DATA_DIR:
1447		if (get_user(opt, (int __user *)optval)) {
1448			err = -EFAULT;
1449			break;
1450		}
1451
1452		if (opt)
1453			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1454		else
1455			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1456		break;
1457
1458	case HCI_TIME_STAMP:
1459		if (get_user(opt, (int __user *)optval)) {
1460			err = -EFAULT;
1461			break;
1462		}
1463
1464		if (opt)
1465			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1466		else
1467			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1468		break;
1469
1470	case HCI_FILTER:
1471		{
1472			struct hci_filter *f = &hci_pi(sk)->filter;
1473
1474			uf.type_mask = f->type_mask;
1475			uf.opcode    = f->opcode;
1476			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1477			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1478		}
1479
1480		len = min_t(unsigned int, len, sizeof(uf));
1481		if (copy_from_user(&uf, optval, len)) {
1482			err = -EFAULT;
1483			break;
1484		}
1485
1486		if (!capable(CAP_NET_RAW)) {
1487			uf.type_mask &= hci_sec_filter.type_mask;
1488			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1489			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1490		}
1491
1492		{
1493			struct hci_filter *f = &hci_pi(sk)->filter;
1494
1495			f->type_mask = uf.type_mask;
1496			f->opcode    = uf.opcode;
1497			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1498			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1499		}
1500		break;
1501
1502	default:
1503		err = -ENOPROTOOPT;
1504		break;
1505	}
1506
1507done:
1508	release_sock(sk);
1509	return err;
1510}
1511
1512static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1513			       char __user *optval, int __user *optlen)
1514{
1515	struct hci_ufilter uf;
1516	struct sock *sk = sock->sk;
1517	int len, opt, err = 0;
1518
1519	BT_DBG("sk %p, opt %d", sk, optname);
 
 
 
1520
1521	if (get_user(len, optlen))
1522		return -EFAULT;
1523
1524	lock_sock(sk);
1525
1526	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1527		err = -EBADFD;
1528		goto done;
1529	}
1530
1531	switch (optname) {
1532	case HCI_DATA_DIR:
1533		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1534			opt = 1;
1535		else
1536			opt = 0;
1537
1538		if (put_user(opt, optval))
1539			err = -EFAULT;
1540		break;
1541
1542	case HCI_TIME_STAMP:
1543		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1544			opt = 1;
1545		else
1546			opt = 0;
1547
1548		if (put_user(opt, optval))
1549			err = -EFAULT;
1550		break;
1551
1552	case HCI_FILTER:
1553		{
1554			struct hci_filter *f = &hci_pi(sk)->filter;
1555
1556			memset(&uf, 0, sizeof(uf));
1557			uf.type_mask = f->type_mask;
1558			uf.opcode    = f->opcode;
1559			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1560			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1561		}
1562
1563		len = min_t(unsigned int, len, sizeof(uf));
1564		if (copy_to_user(optval, &uf, len))
1565			err = -EFAULT;
1566		break;
1567
1568	default:
1569		err = -ENOPROTOOPT;
1570		break;
1571	}
1572
1573done:
1574	release_sock(sk);
1575	return err;
1576}
1577
1578static const struct proto_ops hci_sock_ops = {
1579	.family		= PF_BLUETOOTH,
1580	.owner		= THIS_MODULE,
1581	.release	= hci_sock_release,
1582	.bind		= hci_sock_bind,
1583	.getname	= hci_sock_getname,
1584	.sendmsg	= hci_sock_sendmsg,
1585	.recvmsg	= hci_sock_recvmsg,
1586	.ioctl		= hci_sock_ioctl,
1587	.poll		= datagram_poll,
1588	.listen		= sock_no_listen,
1589	.shutdown	= sock_no_shutdown,
1590	.setsockopt	= hci_sock_setsockopt,
1591	.getsockopt	= hci_sock_getsockopt,
1592	.connect	= sock_no_connect,
1593	.socketpair	= sock_no_socketpair,
1594	.accept		= sock_no_accept,
1595	.mmap		= sock_no_mmap
1596};
1597
1598static struct proto hci_sk_proto = {
1599	.name		= "HCI",
1600	.owner		= THIS_MODULE,
1601	.obj_size	= sizeof(struct hci_pinfo)
1602};
1603
1604static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1605			   int kern)
1606{
1607	struct sock *sk;
1608
1609	BT_DBG("sock %p", sock);
1610
1611	if (sock->type != SOCK_RAW)
1612		return -ESOCKTNOSUPPORT;
1613
1614	sock->ops = &hci_sock_ops;
1615
1616	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1617	if (!sk)
1618		return -ENOMEM;
1619
1620	sock_init_data(sock, sk);
1621
1622	sock_reset_flag(sk, SOCK_ZAPPED);
1623
1624	sk->sk_protocol = protocol;
1625
1626	sock->state = SS_UNCONNECTED;
1627	sk->sk_state = BT_OPEN;
1628
1629	bt_sock_link(&hci_sk_list, sk);
1630	return 0;
1631}
1632
1633static const struct net_proto_family hci_sock_family_ops = {
1634	.family	= PF_BLUETOOTH,
1635	.owner	= THIS_MODULE,
1636	.create	= hci_sock_create,
1637};
1638
1639int __init hci_sock_init(void)
1640{
1641	int err;
1642
1643	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1644
1645	err = proto_register(&hci_sk_proto, 0);
1646	if (err < 0)
1647		return err;
1648
1649	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1650	if (err < 0) {
1651		BT_ERR("HCI socket registration failed");
1652		goto error;
1653	}
1654
1655	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1656	if (err < 0) {
1657		BT_ERR("Failed to create HCI proc file");
1658		bt_sock_unregister(BTPROTO_HCI);
1659		goto error;
1660	}
1661
1662	BT_INFO("HCI socket layer initialized");
1663
1664	return 0;
1665
1666error:
1667	proto_unregister(&hci_sk_proto);
1668	return err;
1669}
1670
1671void hci_sock_cleanup(void)
1672{
1673	bt_procfs_cleanup(&init_net, "hci");
1674	bt_sock_unregister(BTPROTO_HCI);
1675	proto_unregister(&hci_sk_proto);
1676}
v4.17
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u32             cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
  60};
  61
  62void hci_sock_set_flag(struct sock *sk, int nr)
  63{
  64	set_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67void hci_sock_clear_flag(struct sock *sk, int nr)
  68{
  69	clear_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72int hci_sock_test_flag(struct sock *sk, int nr)
  73{
  74	return test_bit(nr, &hci_pi(sk)->flags);
  75}
  76
  77unsigned short hci_sock_get_channel(struct sock *sk)
  78{
  79	return hci_pi(sk)->channel;
  80}
  81
  82u32 hci_sock_get_cookie(struct sock *sk)
  83{
  84	return hci_pi(sk)->cookie;
  85}
  86
  87static bool hci_sock_gen_cookie(struct sock *sk)
  88{
  89	int id = hci_pi(sk)->cookie;
  90
  91	if (!id) {
  92		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
  93		if (id < 0)
  94			id = 0xffffffff;
  95
  96		hci_pi(sk)->cookie = id;
  97		get_task_comm(hci_pi(sk)->comm, current);
  98		return true;
  99	}
 100
 101	return false;
 102}
 103
 104static void hci_sock_free_cookie(struct sock *sk)
 105{
 106	int id = hci_pi(sk)->cookie;
 107
 108	if (id) {
 109		hci_pi(sk)->cookie = 0xffffffff;
 110		ida_simple_remove(&sock_cookie_ida, id);
 111	}
 112}
 113
 114static inline int hci_test_bit(int nr, const void *addr)
 115{
 116	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 117}
 118
 119/* Security filter */
 120#define HCI_SFLT_MAX_OGF  5
 121
 122struct hci_sec_filter {
 123	__u32 type_mask;
 124	__u32 event_mask[2];
 125	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 126};
 127
 128static const struct hci_sec_filter hci_sec_filter = {
 129	/* Packet types */
 130	0x10,
 131	/* Events */
 132	{ 0x1000d9fe, 0x0000b00c },
 133	/* Commands */
 134	{
 135		{ 0x0 },
 136		/* OGF_LINK_CTL */
 137		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 138		/* OGF_LINK_POLICY */
 139		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 140		/* OGF_HOST_CTL */
 141		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 142		/* OGF_INFO_PARAM */
 143		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 144		/* OGF_STATUS_PARAM */
 145		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 146	}
 147};
 148
 149static struct bt_sock_list hci_sk_list = {
 150	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 151};
 152
 153static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 154{
 155	struct hci_filter *flt;
 156	int flt_type, flt_event;
 157
 158	/* Apply filter */
 159	flt = &hci_pi(sk)->filter;
 160
 161	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 162
 163	if (!test_bit(flt_type, &flt->type_mask))
 164		return true;
 165
 166	/* Extra filter for event packets only */
 167	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 168		return false;
 169
 170	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 171
 172	if (!hci_test_bit(flt_event, &flt->event_mask))
 173		return true;
 174
 175	/* Check filter only when opcode is set */
 176	if (!flt->opcode)
 177		return false;
 178
 179	if (flt_event == HCI_EV_CMD_COMPLETE &&
 180	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 181		return true;
 182
 183	if (flt_event == HCI_EV_CMD_STATUS &&
 184	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 185		return true;
 186
 187	return false;
 188}
 189
 190/* Send frame to RAW socket */
 191void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 192{
 193	struct sock *sk;
 194	struct sk_buff *skb_copy = NULL;
 195
 196	BT_DBG("hdev %p len %d", hdev, skb->len);
 197
 198	read_lock(&hci_sk_list.lock);
 199
 200	sk_for_each(sk, &hci_sk_list.head) {
 201		struct sk_buff *nskb;
 202
 203		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 204			continue;
 205
 206		/* Don't send frame to the socket it came from */
 207		if (skb->sk == sk)
 208			continue;
 209
 210		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 211			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 212			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 213			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 214			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 215				continue;
 216			if (is_filtered_packet(sk, skb))
 217				continue;
 218		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 219			if (!bt_cb(skb)->incoming)
 220				continue;
 221			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 222			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 223			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 224				continue;
 225		} else {
 226			/* Don't send frame to other channel types */
 227			continue;
 228		}
 229
 230		if (!skb_copy) {
 231			/* Create a private copy with headroom */
 232			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 233			if (!skb_copy)
 234				continue;
 235
 236			/* Put type byte before the data */
 237			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 238		}
 239
 240		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 241		if (!nskb)
 242			continue;
 243
 244		if (sock_queue_rcv_skb(sk, nskb))
 245			kfree_skb(nskb);
 246	}
 247
 248	read_unlock(&hci_sk_list.lock);
 249
 250	kfree_skb(skb_copy);
 251}
 252
 253/* Send frame to sockets with specific channel */
 254static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 255				  int flag, struct sock *skip_sk)
 256{
 257	struct sock *sk;
 258
 259	BT_DBG("channel %u len %d", channel, skb->len);
 260
 
 
 261	sk_for_each(sk, &hci_sk_list.head) {
 262		struct sk_buff *nskb;
 263
 264		/* Ignore socket without the flag set */
 265		if (!hci_sock_test_flag(sk, flag))
 266			continue;
 267
 268		/* Skip the original socket */
 269		if (sk == skip_sk)
 270			continue;
 271
 272		if (sk->sk_state != BT_BOUND)
 273			continue;
 274
 275		if (hci_pi(sk)->channel != channel)
 276			continue;
 277
 278		nskb = skb_clone(skb, GFP_ATOMIC);
 279		if (!nskb)
 280			continue;
 281
 282		if (sock_queue_rcv_skb(sk, nskb))
 283			kfree_skb(nskb);
 284	}
 285
 286}
 287
 288void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 289			 int flag, struct sock *skip_sk)
 290{
 291	read_lock(&hci_sk_list.lock);
 292	__hci_send_to_channel(channel, skb, flag, skip_sk);
 293	read_unlock(&hci_sk_list.lock);
 294}
 295
 296/* Send frame to monitor socket */
 297void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 298{
 299	struct sk_buff *skb_copy = NULL;
 300	struct hci_mon_hdr *hdr;
 301	__le16 opcode;
 302
 303	if (!atomic_read(&monitor_promisc))
 304		return;
 305
 306	BT_DBG("hdev %p len %d", hdev, skb->len);
 307
 308	switch (hci_skb_pkt_type(skb)) {
 309	case HCI_COMMAND_PKT:
 310		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 311		break;
 312	case HCI_EVENT_PKT:
 313		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 314		break;
 315	case HCI_ACLDATA_PKT:
 316		if (bt_cb(skb)->incoming)
 317			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 318		else
 319			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 320		break;
 321	case HCI_SCODATA_PKT:
 322		if (bt_cb(skb)->incoming)
 323			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 324		else
 325			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 326		break;
 327	case HCI_DIAG_PKT:
 328		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 329		break;
 330	default:
 331		return;
 332	}
 333
 334	/* Create a private copy with headroom */
 335	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 336	if (!skb_copy)
 337		return;
 338
 339	/* Put header before the data */
 340	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
 341	hdr->opcode = opcode;
 342	hdr->index = cpu_to_le16(hdev->id);
 343	hdr->len = cpu_to_le16(skb->len);
 344
 345	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 346			    HCI_SOCK_TRUSTED, NULL);
 347	kfree_skb(skb_copy);
 348}
 349
 350void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 351				 void *data, u16 data_len, ktime_t tstamp,
 352				 int flag, struct sock *skip_sk)
 353{
 354	struct sock *sk;
 355	__le16 index;
 356
 357	if (hdev)
 358		index = cpu_to_le16(hdev->id);
 359	else
 360		index = cpu_to_le16(MGMT_INDEX_NONE);
 361
 362	read_lock(&hci_sk_list.lock);
 363
 364	sk_for_each(sk, &hci_sk_list.head) {
 365		struct hci_mon_hdr *hdr;
 366		struct sk_buff *skb;
 367
 368		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 369			continue;
 370
 371		/* Ignore socket without the flag set */
 372		if (!hci_sock_test_flag(sk, flag))
 373			continue;
 374
 375		/* Skip the original socket */
 376		if (sk == skip_sk)
 377			continue;
 378
 379		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 380		if (!skb)
 381			continue;
 382
 383		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 384		put_unaligned_le16(event, skb_put(skb, 2));
 385
 386		if (data)
 387			skb_put_data(skb, data, data_len);
 388
 389		skb->tstamp = tstamp;
 390
 391		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 392		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 393		hdr->index = index;
 394		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 395
 396		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 397				      HCI_SOCK_TRUSTED, NULL);
 398		kfree_skb(skb);
 399	}
 400
 401	read_unlock(&hci_sk_list.lock);
 402}
 403
 404static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 405{
 406	struct hci_mon_hdr *hdr;
 407	struct hci_mon_new_index *ni;
 408	struct hci_mon_index_info *ii;
 409	struct sk_buff *skb;
 410	__le16 opcode;
 411
 412	switch (event) {
 413	case HCI_DEV_REG:
 414		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 415		if (!skb)
 416			return NULL;
 417
 418		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 419		ni->type = hdev->dev_type;
 420		ni->bus = hdev->bus;
 421		bacpy(&ni->bdaddr, &hdev->bdaddr);
 422		memcpy(ni->name, hdev->name, 8);
 423
 424		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 425		break;
 426
 427	case HCI_DEV_UNREG:
 428		skb = bt_skb_alloc(0, GFP_ATOMIC);
 429		if (!skb)
 430			return NULL;
 431
 432		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 433		break;
 434
 435	case HCI_DEV_SETUP:
 436		if (hdev->manufacturer == 0xffff)
 437			return NULL;
 438
 439		/* fall through */
 440
 441	case HCI_DEV_UP:
 442		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 443		if (!skb)
 444			return NULL;
 445
 446		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 447		bacpy(&ii->bdaddr, &hdev->bdaddr);
 448		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 449
 450		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 451		break;
 452
 453	case HCI_DEV_OPEN:
 454		skb = bt_skb_alloc(0, GFP_ATOMIC);
 455		if (!skb)
 456			return NULL;
 457
 458		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 459		break;
 460
 461	case HCI_DEV_CLOSE:
 462		skb = bt_skb_alloc(0, GFP_ATOMIC);
 463		if (!skb)
 464			return NULL;
 465
 466		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 467		break;
 468
 469	default:
 470		return NULL;
 471	}
 472
 473	__net_timestamp(skb);
 474
 475	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 476	hdr->opcode = opcode;
 477	hdr->index = cpu_to_le16(hdev->id);
 478	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 479
 480	return skb;
 481}
 482
 483static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 484{
 485	struct hci_mon_hdr *hdr;
 486	struct sk_buff *skb;
 487	u16 format;
 488	u8 ver[3];
 489	u32 flags;
 490
 491	/* No message needed when cookie is not present */
 492	if (!hci_pi(sk)->cookie)
 493		return NULL;
 494
 495	switch (hci_pi(sk)->channel) {
 496	case HCI_CHANNEL_RAW:
 497		format = 0x0000;
 498		ver[0] = BT_SUBSYS_VERSION;
 499		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 500		break;
 501	case HCI_CHANNEL_USER:
 502		format = 0x0001;
 503		ver[0] = BT_SUBSYS_VERSION;
 504		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 505		break;
 506	case HCI_CHANNEL_CONTROL:
 507		format = 0x0002;
 508		mgmt_fill_version_info(ver);
 509		break;
 510	default:
 511		/* No message for unsupported format */
 512		return NULL;
 513	}
 514
 515	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
 516	if (!skb)
 517		return NULL;
 518
 519	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 520
 521	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 522	put_unaligned_le16(format, skb_put(skb, 2));
 523	skb_put_data(skb, ver, sizeof(ver));
 524	put_unaligned_le32(flags, skb_put(skb, 4));
 525	skb_put_u8(skb, TASK_COMM_LEN);
 526	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
 527
 528	__net_timestamp(skb);
 529
 530	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 531	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 532	if (hci_pi(sk)->hdev)
 533		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 534	else
 535		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 536	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 537
 538	return skb;
 539}
 540
 541static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 542{
 543	struct hci_mon_hdr *hdr;
 544	struct sk_buff *skb;
 545
 546	/* No message needed when cookie is not present */
 547	if (!hci_pi(sk)->cookie)
 548		return NULL;
 549
 550	switch (hci_pi(sk)->channel) {
 551	case HCI_CHANNEL_RAW:
 552	case HCI_CHANNEL_USER:
 553	case HCI_CHANNEL_CONTROL:
 554		break;
 555	default:
 556		/* No message for unsupported format */
 557		return NULL;
 558	}
 559
 560	skb = bt_skb_alloc(4, GFP_ATOMIC);
 561	if (!skb)
 562		return NULL;
 563
 564	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 565
 566	__net_timestamp(skb);
 567
 568	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 569	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 570	if (hci_pi(sk)->hdev)
 571		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 572	else
 573		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 574	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 575
 576	return skb;
 577}
 578
 579static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 580						   u16 opcode, u16 len,
 581						   const void *buf)
 582{
 583	struct hci_mon_hdr *hdr;
 584	struct sk_buff *skb;
 585
 586	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 587	if (!skb)
 588		return NULL;
 589
 590	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 591	put_unaligned_le16(opcode, skb_put(skb, 2));
 592
 593	if (buf)
 594		skb_put_data(skb, buf, len);
 595
 596	__net_timestamp(skb);
 597
 598	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 599	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 600	hdr->index = cpu_to_le16(index);
 601	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 602
 603	return skb;
 604}
 605
 606static void __printf(2, 3)
 607send_monitor_note(struct sock *sk, const char *fmt, ...)
 608{
 609	size_t len;
 610	struct hci_mon_hdr *hdr;
 611	struct sk_buff *skb;
 612	va_list args;
 613
 614	va_start(args, fmt);
 615	len = vsnprintf(NULL, 0, fmt, args);
 616	va_end(args);
 617
 618	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 619	if (!skb)
 620		return;
 621
 622	va_start(args, fmt);
 623	vsprintf(skb_put(skb, len), fmt, args);
 624	*(u8 *)skb_put(skb, 1) = 0;
 625	va_end(args);
 626
 627	__net_timestamp(skb);
 628
 629	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 630	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 631	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 632	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 633
 634	if (sock_queue_rcv_skb(sk, skb))
 635		kfree_skb(skb);
 636}
 637
 638static void send_monitor_replay(struct sock *sk)
 639{
 640	struct hci_dev *hdev;
 641
 642	read_lock(&hci_dev_list_lock);
 643
 644	list_for_each_entry(hdev, &hci_dev_list, list) {
 645		struct sk_buff *skb;
 646
 647		skb = create_monitor_event(hdev, HCI_DEV_REG);
 648		if (!skb)
 649			continue;
 650
 651		if (sock_queue_rcv_skb(sk, skb))
 652			kfree_skb(skb);
 653
 654		if (!test_bit(HCI_RUNNING, &hdev->flags))
 655			continue;
 656
 657		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 658		if (!skb)
 659			continue;
 660
 661		if (sock_queue_rcv_skb(sk, skb))
 662			kfree_skb(skb);
 663
 664		if (test_bit(HCI_UP, &hdev->flags))
 665			skb = create_monitor_event(hdev, HCI_DEV_UP);
 666		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 667			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 668		else
 669			skb = NULL;
 670
 671		if (skb) {
 672			if (sock_queue_rcv_skb(sk, skb))
 673				kfree_skb(skb);
 674		}
 675	}
 676
 677	read_unlock(&hci_dev_list_lock);
 678}
 679
 680static void send_monitor_control_replay(struct sock *mon_sk)
 681{
 682	struct sock *sk;
 683
 684	read_lock(&hci_sk_list.lock);
 685
 686	sk_for_each(sk, &hci_sk_list.head) {
 687		struct sk_buff *skb;
 688
 689		skb = create_monitor_ctrl_open(sk);
 690		if (!skb)
 691			continue;
 692
 693		if (sock_queue_rcv_skb(mon_sk, skb))
 694			kfree_skb(skb);
 695	}
 696
 697	read_unlock(&hci_sk_list.lock);
 698}
 699
 700/* Generate internal stack event */
 701static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 702{
 703	struct hci_event_hdr *hdr;
 704	struct hci_ev_stack_internal *ev;
 705	struct sk_buff *skb;
 706
 707	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 708	if (!skb)
 709		return;
 710
 711	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
 712	hdr->evt  = HCI_EV_STACK_INTERNAL;
 713	hdr->plen = sizeof(*ev) + dlen;
 714
 715	ev = skb_put(skb, sizeof(*ev) + dlen);
 716	ev->type = type;
 717	memcpy(ev->data, data, dlen);
 718
 719	bt_cb(skb)->incoming = 1;
 720	__net_timestamp(skb);
 721
 722	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 723	hci_send_to_sock(hdev, skb);
 724	kfree_skb(skb);
 725}
 726
 727void hci_sock_dev_event(struct hci_dev *hdev, int event)
 728{
 729	BT_DBG("hdev %s event %d", hdev->name, event);
 730
 731	if (atomic_read(&monitor_promisc)) {
 732		struct sk_buff *skb;
 733
 734		/* Send event to monitor */
 735		skb = create_monitor_event(hdev, event);
 736		if (skb) {
 737			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 738					    HCI_SOCK_TRUSTED, NULL);
 739			kfree_skb(skb);
 740		}
 741	}
 742
 743	if (event <= HCI_DEV_DOWN) {
 744		struct hci_ev_si_device ev;
 745
 746		/* Send event to sockets */
 747		ev.event  = event;
 748		ev.dev_id = hdev->id;
 749		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 750	}
 751
 752	if (event == HCI_DEV_UNREG) {
 753		struct sock *sk;
 754
 755		/* Detach sockets from device */
 756		read_lock(&hci_sk_list.lock);
 757		sk_for_each(sk, &hci_sk_list.head) {
 758			bh_lock_sock_nested(sk);
 759			if (hci_pi(sk)->hdev == hdev) {
 760				hci_pi(sk)->hdev = NULL;
 761				sk->sk_err = EPIPE;
 762				sk->sk_state = BT_OPEN;
 763				sk->sk_state_change(sk);
 764
 765				hci_dev_put(hdev);
 766			}
 767			bh_unlock_sock(sk);
 768		}
 769		read_unlock(&hci_sk_list.lock);
 770	}
 771}
 772
 773static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 774{
 775	struct hci_mgmt_chan *c;
 776
 777	list_for_each_entry(c, &mgmt_chan_list, list) {
 778		if (c->channel == channel)
 779			return c;
 780	}
 781
 782	return NULL;
 783}
 784
 785static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 786{
 787	struct hci_mgmt_chan *c;
 788
 789	mutex_lock(&mgmt_chan_list_lock);
 790	c = __hci_mgmt_chan_find(channel);
 791	mutex_unlock(&mgmt_chan_list_lock);
 792
 793	return c;
 794}
 795
 796int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 797{
 798	if (c->channel < HCI_CHANNEL_CONTROL)
 799		return -EINVAL;
 800
 801	mutex_lock(&mgmt_chan_list_lock);
 802	if (__hci_mgmt_chan_find(c->channel)) {
 803		mutex_unlock(&mgmt_chan_list_lock);
 804		return -EALREADY;
 805	}
 806
 807	list_add_tail(&c->list, &mgmt_chan_list);
 808
 809	mutex_unlock(&mgmt_chan_list_lock);
 810
 811	return 0;
 812}
 813EXPORT_SYMBOL(hci_mgmt_chan_register);
 814
 815void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 816{
 817	mutex_lock(&mgmt_chan_list_lock);
 818	list_del(&c->list);
 819	mutex_unlock(&mgmt_chan_list_lock);
 820}
 821EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 822
 823static int hci_sock_release(struct socket *sock)
 824{
 825	struct sock *sk = sock->sk;
 826	struct hci_dev *hdev;
 827	struct sk_buff *skb;
 828
 829	BT_DBG("sock %p sk %p", sock, sk);
 830
 831	if (!sk)
 832		return 0;
 833
 834	hdev = hci_pi(sk)->hdev;
 835
 836	switch (hci_pi(sk)->channel) {
 837	case HCI_CHANNEL_MONITOR:
 838		atomic_dec(&monitor_promisc);
 839		break;
 840	case HCI_CHANNEL_RAW:
 841	case HCI_CHANNEL_USER:
 842	case HCI_CHANNEL_CONTROL:
 843		/* Send event to monitor */
 844		skb = create_monitor_ctrl_close(sk);
 845		if (skb) {
 846			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 847					    HCI_SOCK_TRUSTED, NULL);
 848			kfree_skb(skb);
 849		}
 850
 851		hci_sock_free_cookie(sk);
 852		break;
 853	}
 854
 855	bt_sock_unlink(&hci_sk_list, sk);
 856
 857	if (hdev) {
 858		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 859			/* When releasing a user channel exclusive access,
 860			 * call hci_dev_do_close directly instead of calling
 861			 * hci_dev_close to ensure the exclusive access will
 862			 * be released and the controller brought back down.
 863			 *
 864			 * The checking of HCI_AUTO_OFF is not needed in this
 865			 * case since it will have been cleared already when
 866			 * opening the user channel.
 867			 */
 868			hci_dev_do_close(hdev);
 869			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 870			mgmt_index_added(hdev);
 871		}
 872
 873		atomic_dec(&hdev->promisc);
 874		hci_dev_put(hdev);
 875	}
 876
 877	sock_orphan(sk);
 878
 879	skb_queue_purge(&sk->sk_receive_queue);
 880	skb_queue_purge(&sk->sk_write_queue);
 881
 882	sock_put(sk);
 883	return 0;
 884}
 885
 886static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 887{
 888	bdaddr_t bdaddr;
 889	int err;
 890
 891	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 892		return -EFAULT;
 893
 894	hci_dev_lock(hdev);
 895
 896	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 897
 898	hci_dev_unlock(hdev);
 899
 900	return err;
 901}
 902
 903static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 904{
 905	bdaddr_t bdaddr;
 906	int err;
 907
 908	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 909		return -EFAULT;
 910
 911	hci_dev_lock(hdev);
 912
 913	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 914
 915	hci_dev_unlock(hdev);
 916
 917	return err;
 918}
 919
 920/* Ioctls that require bound socket */
 921static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 922				unsigned long arg)
 923{
 924	struct hci_dev *hdev = hci_pi(sk)->hdev;
 925
 926	if (!hdev)
 927		return -EBADFD;
 928
 929	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 930		return -EBUSY;
 931
 932	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 933		return -EOPNOTSUPP;
 934
 935	if (hdev->dev_type != HCI_PRIMARY)
 936		return -EOPNOTSUPP;
 937
 938	switch (cmd) {
 939	case HCISETRAW:
 940		if (!capable(CAP_NET_ADMIN))
 941			return -EPERM;
 942		return -EOPNOTSUPP;
 943
 944	case HCIGETCONNINFO:
 945		return hci_get_conn_info(hdev, (void __user *)arg);
 946
 947	case HCIGETAUTHINFO:
 948		return hci_get_auth_info(hdev, (void __user *)arg);
 949
 950	case HCIBLOCKADDR:
 951		if (!capable(CAP_NET_ADMIN))
 952			return -EPERM;
 953		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 954
 955	case HCIUNBLOCKADDR:
 956		if (!capable(CAP_NET_ADMIN))
 957			return -EPERM;
 958		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 959	}
 960
 961	return -ENOIOCTLCMD;
 962}
 963
 964static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 965			  unsigned long arg)
 966{
 967	void __user *argp = (void __user *)arg;
 968	struct sock *sk = sock->sk;
 969	int err;
 970
 971	BT_DBG("cmd %x arg %lx", cmd, arg);
 972
 973	lock_sock(sk);
 974
 975	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 976		err = -EBADFD;
 977		goto done;
 978	}
 979
 980	/* When calling an ioctl on an unbound raw socket, then ensure
 981	 * that the monitor gets informed. Ensure that the resulting event
 982	 * is only send once by checking if the cookie exists or not. The
 983	 * socket cookie will be only ever generated once for the lifetime
 984	 * of a given socket.
 985	 */
 986	if (hci_sock_gen_cookie(sk)) {
 987		struct sk_buff *skb;
 988
 989		if (capable(CAP_NET_ADMIN))
 990			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 991
 992		/* Send event to monitor */
 993		skb = create_monitor_ctrl_open(sk);
 994		if (skb) {
 995			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 996					    HCI_SOCK_TRUSTED, NULL);
 997			kfree_skb(skb);
 998		}
 999	}
1000
1001	release_sock(sk);
1002
1003	switch (cmd) {
1004	case HCIGETDEVLIST:
1005		return hci_get_dev_list(argp);
1006
1007	case HCIGETDEVINFO:
1008		return hci_get_dev_info(argp);
1009
1010	case HCIGETCONNLIST:
1011		return hci_get_conn_list(argp);
1012
1013	case HCIDEVUP:
1014		if (!capable(CAP_NET_ADMIN))
1015			return -EPERM;
1016		return hci_dev_open(arg);
1017
1018	case HCIDEVDOWN:
1019		if (!capable(CAP_NET_ADMIN))
1020			return -EPERM;
1021		return hci_dev_close(arg);
1022
1023	case HCIDEVRESET:
1024		if (!capable(CAP_NET_ADMIN))
1025			return -EPERM;
1026		return hci_dev_reset(arg);
1027
1028	case HCIDEVRESTAT:
1029		if (!capable(CAP_NET_ADMIN))
1030			return -EPERM;
1031		return hci_dev_reset_stat(arg);
1032
1033	case HCISETSCAN:
1034	case HCISETAUTH:
1035	case HCISETENCRYPT:
1036	case HCISETPTYPE:
1037	case HCISETLINKPOL:
1038	case HCISETLINKMODE:
1039	case HCISETACLMTU:
1040	case HCISETSCOMTU:
1041		if (!capable(CAP_NET_ADMIN))
1042			return -EPERM;
1043		return hci_dev_cmd(cmd, argp);
1044
1045	case HCIINQUIRY:
1046		return hci_inquiry(argp);
1047	}
1048
1049	lock_sock(sk);
1050
1051	err = hci_sock_bound_ioctl(sk, cmd, arg);
1052
1053done:
1054	release_sock(sk);
1055	return err;
1056}
1057
1058static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1059			 int addr_len)
1060{
1061	struct sockaddr_hci haddr;
1062	struct sock *sk = sock->sk;
1063	struct hci_dev *hdev = NULL;
1064	struct sk_buff *skb;
1065	int len, err = 0;
1066
1067	BT_DBG("sock %p sk %p", sock, sk);
1068
1069	if (!addr)
1070		return -EINVAL;
1071
1072	memset(&haddr, 0, sizeof(haddr));
1073	len = min_t(unsigned int, sizeof(haddr), addr_len);
1074	memcpy(&haddr, addr, len);
1075
1076	if (haddr.hci_family != AF_BLUETOOTH)
1077		return -EINVAL;
1078
1079	lock_sock(sk);
1080
1081	if (sk->sk_state == BT_BOUND) {
1082		err = -EALREADY;
1083		goto done;
1084	}
1085
1086	switch (haddr.hci_channel) {
1087	case HCI_CHANNEL_RAW:
1088		if (hci_pi(sk)->hdev) {
1089			err = -EALREADY;
1090			goto done;
1091		}
1092
1093		if (haddr.hci_dev != HCI_DEV_NONE) {
1094			hdev = hci_dev_get(haddr.hci_dev);
1095			if (!hdev) {
1096				err = -ENODEV;
1097				goto done;
1098			}
1099
1100			atomic_inc(&hdev->promisc);
1101		}
1102
1103		hci_pi(sk)->channel = haddr.hci_channel;
1104
1105		if (!hci_sock_gen_cookie(sk)) {
1106			/* In the case when a cookie has already been assigned,
1107			 * then there has been already an ioctl issued against
1108			 * an unbound socket and with that triggerd an open
1109			 * notification. Send a close notification first to
1110			 * allow the state transition to bounded.
1111			 */
1112			skb = create_monitor_ctrl_close(sk);
1113			if (skb) {
1114				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1115						    HCI_SOCK_TRUSTED, NULL);
1116				kfree_skb(skb);
1117			}
1118		}
1119
1120		if (capable(CAP_NET_ADMIN))
1121			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1122
1123		hci_pi(sk)->hdev = hdev;
1124
1125		/* Send event to monitor */
1126		skb = create_monitor_ctrl_open(sk);
1127		if (skb) {
1128			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1129					    HCI_SOCK_TRUSTED, NULL);
1130			kfree_skb(skb);
1131		}
1132		break;
1133
1134	case HCI_CHANNEL_USER:
1135		if (hci_pi(sk)->hdev) {
1136			err = -EALREADY;
1137			goto done;
1138		}
1139
1140		if (haddr.hci_dev == HCI_DEV_NONE) {
1141			err = -EINVAL;
1142			goto done;
1143		}
1144
1145		if (!capable(CAP_NET_ADMIN)) {
1146			err = -EPERM;
1147			goto done;
1148		}
1149
1150		hdev = hci_dev_get(haddr.hci_dev);
1151		if (!hdev) {
1152			err = -ENODEV;
1153			goto done;
1154		}
1155
1156		if (test_bit(HCI_INIT, &hdev->flags) ||
1157		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1158		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1159		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1160		     test_bit(HCI_UP, &hdev->flags))) {
1161			err = -EBUSY;
1162			hci_dev_put(hdev);
1163			goto done;
1164		}
1165
1166		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1167			err = -EUSERS;
1168			hci_dev_put(hdev);
1169			goto done;
1170		}
1171
1172		mgmt_index_removed(hdev);
1173
1174		err = hci_dev_open(hdev->id);
1175		if (err) {
1176			if (err == -EALREADY) {
1177				/* In case the transport is already up and
1178				 * running, clear the error here.
1179				 *
1180				 * This can happen when opening a user
1181				 * channel and HCI_AUTO_OFF grace period
1182				 * is still active.
1183				 */
1184				err = 0;
1185			} else {
1186				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1187				mgmt_index_added(hdev);
1188				hci_dev_put(hdev);
1189				goto done;
1190			}
1191		}
1192
1193		hci_pi(sk)->channel = haddr.hci_channel;
1194
1195		if (!hci_sock_gen_cookie(sk)) {
1196			/* In the case when a cookie has already been assigned,
1197			 * this socket will transition from a raw socket into
1198			 * a user channel socket. For a clean transition, send
1199			 * the close notification first.
1200			 */
1201			skb = create_monitor_ctrl_close(sk);
1202			if (skb) {
1203				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1204						    HCI_SOCK_TRUSTED, NULL);
1205				kfree_skb(skb);
1206			}
1207		}
1208
1209		/* The user channel is restricted to CAP_NET_ADMIN
1210		 * capabilities and with that implicitly trusted.
1211		 */
1212		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1213
1214		hci_pi(sk)->hdev = hdev;
1215
1216		/* Send event to monitor */
1217		skb = create_monitor_ctrl_open(sk);
1218		if (skb) {
1219			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1220					    HCI_SOCK_TRUSTED, NULL);
1221			kfree_skb(skb);
1222		}
1223
1224		atomic_inc(&hdev->promisc);
1225		break;
1226
1227	case HCI_CHANNEL_MONITOR:
1228		if (haddr.hci_dev != HCI_DEV_NONE) {
1229			err = -EINVAL;
1230			goto done;
1231		}
1232
1233		if (!capable(CAP_NET_RAW)) {
1234			err = -EPERM;
1235			goto done;
1236		}
1237
1238		hci_pi(sk)->channel = haddr.hci_channel;
1239
1240		/* The monitor interface is restricted to CAP_NET_RAW
1241		 * capabilities and with that implicitly trusted.
1242		 */
1243		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1244
1245		send_monitor_note(sk, "Linux version %s (%s)",
1246				  init_utsname()->release,
1247				  init_utsname()->machine);
1248		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1249				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1250		send_monitor_replay(sk);
1251		send_monitor_control_replay(sk);
1252
1253		atomic_inc(&monitor_promisc);
1254		break;
1255
1256	case HCI_CHANNEL_LOGGING:
1257		if (haddr.hci_dev != HCI_DEV_NONE) {
1258			err = -EINVAL;
1259			goto done;
1260		}
1261
1262		if (!capable(CAP_NET_ADMIN)) {
1263			err = -EPERM;
1264			goto done;
1265		}
1266
1267		hci_pi(sk)->channel = haddr.hci_channel;
1268		break;
1269
1270	default:
1271		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1272			err = -EINVAL;
1273			goto done;
1274		}
1275
1276		if (haddr.hci_dev != HCI_DEV_NONE) {
1277			err = -EINVAL;
1278			goto done;
1279		}
1280
1281		/* Users with CAP_NET_ADMIN capabilities are allowed
1282		 * access to all management commands and events. For
1283		 * untrusted users the interface is restricted and
1284		 * also only untrusted events are sent.
1285		 */
1286		if (capable(CAP_NET_ADMIN))
1287			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1288
1289		hci_pi(sk)->channel = haddr.hci_channel;
1290
1291		/* At the moment the index and unconfigured index events
1292		 * are enabled unconditionally. Setting them on each
1293		 * socket when binding keeps this functionality. They
1294		 * however might be cleared later and then sending of these
1295		 * events will be disabled, but that is then intentional.
1296		 *
1297		 * This also enables generic events that are safe to be
1298		 * received by untrusted users. Example for such events
1299		 * are changes to settings, class of device, name etc.
1300		 */
1301		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1302			if (!hci_sock_gen_cookie(sk)) {
1303				/* In the case when a cookie has already been
1304				 * assigned, this socket will transtion from
1305				 * a raw socket into a control socket. To
1306				 * allow for a clean transtion, send the
1307				 * close notification first.
1308				 */
1309				skb = create_monitor_ctrl_close(sk);
1310				if (skb) {
1311					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1312							    HCI_SOCK_TRUSTED, NULL);
1313					kfree_skb(skb);
1314				}
1315			}
1316
1317			/* Send event to monitor */
1318			skb = create_monitor_ctrl_open(sk);
1319			if (skb) {
1320				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1321						    HCI_SOCK_TRUSTED, NULL);
1322				kfree_skb(skb);
1323			}
1324
1325			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1326			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1327			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1328			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1329			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1330			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1331		}
1332		break;
1333	}
1334
 
 
1335	sk->sk_state = BT_BOUND;
1336
1337done:
1338	release_sock(sk);
1339	return err;
1340}
1341
1342static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1343			    int peer)
1344{
1345	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1346	struct sock *sk = sock->sk;
1347	struct hci_dev *hdev;
1348	int err = 0;
1349
1350	BT_DBG("sock %p sk %p", sock, sk);
1351
1352	if (peer)
1353		return -EOPNOTSUPP;
1354
1355	lock_sock(sk);
1356
1357	hdev = hci_pi(sk)->hdev;
1358	if (!hdev) {
1359		err = -EBADFD;
1360		goto done;
1361	}
1362
 
1363	haddr->hci_family = AF_BLUETOOTH;
1364	haddr->hci_dev    = hdev->id;
1365	haddr->hci_channel= hci_pi(sk)->channel;
1366	err = sizeof(*haddr);
1367
1368done:
1369	release_sock(sk);
1370	return err;
1371}
1372
1373static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1374			  struct sk_buff *skb)
1375{
1376	__u32 mask = hci_pi(sk)->cmsg_mask;
1377
1378	if (mask & HCI_CMSG_DIR) {
1379		int incoming = bt_cb(skb)->incoming;
1380		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1381			 &incoming);
1382	}
1383
1384	if (mask & HCI_CMSG_TSTAMP) {
1385#ifdef CONFIG_COMPAT
1386		struct compat_timeval ctv;
1387#endif
1388		struct timeval tv;
1389		void *data;
1390		int len;
1391
1392		skb_get_timestamp(skb, &tv);
1393
1394		data = &tv;
1395		len = sizeof(tv);
1396#ifdef CONFIG_COMPAT
1397		if (!COMPAT_USE_64BIT_TIME &&
1398		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1399			ctv.tv_sec = tv.tv_sec;
1400			ctv.tv_usec = tv.tv_usec;
1401			data = &ctv;
1402			len = sizeof(ctv);
1403		}
1404#endif
1405
1406		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1407	}
1408}
1409
1410static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1411			    size_t len, int flags)
1412{
1413	int noblock = flags & MSG_DONTWAIT;
1414	struct sock *sk = sock->sk;
1415	struct sk_buff *skb;
1416	int copied, err;
1417	unsigned int skblen;
1418
1419	BT_DBG("sock %p, sk %p", sock, sk);
1420
1421	if (flags & MSG_OOB)
1422		return -EOPNOTSUPP;
1423
1424	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1425		return -EOPNOTSUPP;
1426
1427	if (sk->sk_state == BT_CLOSED)
1428		return 0;
1429
1430	skb = skb_recv_datagram(sk, flags, noblock, &err);
1431	if (!skb)
1432		return err;
1433
1434	skblen = skb->len;
1435	copied = skb->len;
1436	if (len < copied) {
1437		msg->msg_flags |= MSG_TRUNC;
1438		copied = len;
1439	}
1440
1441	skb_reset_transport_header(skb);
1442	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1443
1444	switch (hci_pi(sk)->channel) {
1445	case HCI_CHANNEL_RAW:
1446		hci_sock_cmsg(sk, msg, skb);
1447		break;
1448	case HCI_CHANNEL_USER:
1449	case HCI_CHANNEL_MONITOR:
1450		sock_recv_timestamp(msg, sk, skb);
1451		break;
1452	default:
1453		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1454			sock_recv_timestamp(msg, sk, skb);
1455		break;
1456	}
1457
1458	skb_free_datagram(sk, skb);
1459
1460	if (flags & MSG_TRUNC)
1461		copied = skblen;
1462
1463	return err ? : copied;
1464}
1465
1466static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1467			struct msghdr *msg, size_t msglen)
1468{
1469	void *buf;
1470	u8 *cp;
1471	struct mgmt_hdr *hdr;
1472	u16 opcode, index, len;
1473	struct hci_dev *hdev = NULL;
1474	const struct hci_mgmt_handler *handler;
1475	bool var_len, no_hdev;
1476	int err;
1477
1478	BT_DBG("got %zu bytes", msglen);
1479
1480	if (msglen < sizeof(*hdr))
1481		return -EINVAL;
1482
1483	buf = kmalloc(msglen, GFP_KERNEL);
1484	if (!buf)
1485		return -ENOMEM;
1486
1487	if (memcpy_from_msg(buf, msg, msglen)) {
1488		err = -EFAULT;
1489		goto done;
1490	}
1491
1492	hdr = buf;
1493	opcode = __le16_to_cpu(hdr->opcode);
1494	index = __le16_to_cpu(hdr->index);
1495	len = __le16_to_cpu(hdr->len);
1496
1497	if (len != msglen - sizeof(*hdr)) {
1498		err = -EINVAL;
1499		goto done;
1500	}
1501
1502	if (chan->channel == HCI_CHANNEL_CONTROL) {
1503		struct sk_buff *skb;
1504
1505		/* Send event to monitor */
1506		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1507						  buf + sizeof(*hdr));
1508		if (skb) {
1509			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1510					    HCI_SOCK_TRUSTED, NULL);
1511			kfree_skb(skb);
1512		}
1513	}
1514
1515	if (opcode >= chan->handler_count ||
1516	    chan->handlers[opcode].func == NULL) {
1517		BT_DBG("Unknown op %u", opcode);
1518		err = mgmt_cmd_status(sk, index, opcode,
1519				      MGMT_STATUS_UNKNOWN_COMMAND);
1520		goto done;
1521	}
1522
1523	handler = &chan->handlers[opcode];
1524
1525	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1526	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1527		err = mgmt_cmd_status(sk, index, opcode,
1528				      MGMT_STATUS_PERMISSION_DENIED);
1529		goto done;
1530	}
1531
1532	if (index != MGMT_INDEX_NONE) {
1533		hdev = hci_dev_get(index);
1534		if (!hdev) {
1535			err = mgmt_cmd_status(sk, index, opcode,
1536					      MGMT_STATUS_INVALID_INDEX);
1537			goto done;
1538		}
1539
1540		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1541		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1542		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1543			err = mgmt_cmd_status(sk, index, opcode,
1544					      MGMT_STATUS_INVALID_INDEX);
1545			goto done;
1546		}
1547
1548		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1549		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1550			err = mgmt_cmd_status(sk, index, opcode,
1551					      MGMT_STATUS_INVALID_INDEX);
1552			goto done;
1553		}
1554	}
1555
1556	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1557	if (no_hdev != !hdev) {
1558		err = mgmt_cmd_status(sk, index, opcode,
1559				      MGMT_STATUS_INVALID_INDEX);
1560		goto done;
1561	}
1562
1563	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1564	if ((var_len && len < handler->data_len) ||
1565	    (!var_len && len != handler->data_len)) {
1566		err = mgmt_cmd_status(sk, index, opcode,
1567				      MGMT_STATUS_INVALID_PARAMS);
1568		goto done;
1569	}
1570
1571	if (hdev && chan->hdev_init)
1572		chan->hdev_init(sk, hdev);
1573
1574	cp = buf + sizeof(*hdr);
1575
1576	err = handler->func(sk, hdev, cp, len);
1577	if (err < 0)
1578		goto done;
1579
1580	err = msglen;
1581
1582done:
1583	if (hdev)
1584		hci_dev_put(hdev);
1585
1586	kfree(buf);
1587	return err;
1588}
1589
1590static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1591{
1592	struct hci_mon_hdr *hdr;
1593	struct sk_buff *skb;
1594	struct hci_dev *hdev;
1595	u16 index;
1596	int err;
1597
1598	/* The logging frame consists at minimum of the standard header,
1599	 * the priority byte, the ident length byte and at least one string
1600	 * terminator NUL byte. Anything shorter are invalid packets.
1601	 */
1602	if (len < sizeof(*hdr) + 3)
1603		return -EINVAL;
1604
1605	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1606	if (!skb)
1607		return err;
1608
1609	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1610		err = -EFAULT;
1611		goto drop;
1612	}
1613
1614	hdr = (void *)skb->data;
1615
1616	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1617		err = -EINVAL;
1618		goto drop;
1619	}
1620
1621	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1622		__u8 priority = skb->data[sizeof(*hdr)];
1623		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1624
1625		/* Only the priorities 0-7 are valid and with that any other
1626		 * value results in an invalid packet.
1627		 *
1628		 * The priority byte is followed by an ident length byte and
1629		 * the NUL terminated ident string. Check that the ident
1630		 * length is not overflowing the packet and also that the
1631		 * ident string itself is NUL terminated. In case the ident
1632		 * length is zero, the length value actually doubles as NUL
1633		 * terminator identifier.
1634		 *
1635		 * The message follows the ident string (if present) and
1636		 * must be NUL terminated. Otherwise it is not a valid packet.
1637		 */
1638		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1639		    ident_len > len - sizeof(*hdr) - 3 ||
1640		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1641			err = -EINVAL;
1642			goto drop;
1643		}
1644	} else {
1645		err = -EINVAL;
1646		goto drop;
1647	}
1648
1649	index = __le16_to_cpu(hdr->index);
1650
1651	if (index != MGMT_INDEX_NONE) {
1652		hdev = hci_dev_get(index);
1653		if (!hdev) {
1654			err = -ENODEV;
1655			goto drop;
1656		}
1657	} else {
1658		hdev = NULL;
1659	}
1660
1661	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1662
1663	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1664	err = len;
1665
1666	if (hdev)
1667		hci_dev_put(hdev);
1668
1669drop:
1670	kfree_skb(skb);
1671	return err;
1672}
1673
1674static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1675			    size_t len)
1676{
1677	struct sock *sk = sock->sk;
1678	struct hci_mgmt_chan *chan;
1679	struct hci_dev *hdev;
1680	struct sk_buff *skb;
1681	int err;
1682
1683	BT_DBG("sock %p sk %p", sock, sk);
1684
1685	if (msg->msg_flags & MSG_OOB)
1686		return -EOPNOTSUPP;
1687
1688	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1689			       MSG_CMSG_COMPAT))
1690		return -EINVAL;
1691
1692	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1693		return -EINVAL;
1694
1695	lock_sock(sk);
1696
1697	switch (hci_pi(sk)->channel) {
1698	case HCI_CHANNEL_RAW:
1699	case HCI_CHANNEL_USER:
1700		break;
1701	case HCI_CHANNEL_MONITOR:
1702		err = -EOPNOTSUPP;
1703		goto done;
1704	case HCI_CHANNEL_LOGGING:
1705		err = hci_logging_frame(sk, msg, len);
1706		goto done;
1707	default:
1708		mutex_lock(&mgmt_chan_list_lock);
1709		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1710		if (chan)
1711			err = hci_mgmt_cmd(chan, sk, msg, len);
1712		else
1713			err = -EINVAL;
1714
1715		mutex_unlock(&mgmt_chan_list_lock);
1716		goto done;
1717	}
1718
1719	hdev = hci_pi(sk)->hdev;
1720	if (!hdev) {
1721		err = -EBADFD;
1722		goto done;
1723	}
1724
1725	if (!test_bit(HCI_UP, &hdev->flags)) {
1726		err = -ENETDOWN;
1727		goto done;
1728	}
1729
1730	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1731	if (!skb)
1732		goto done;
1733
1734	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1735		err = -EFAULT;
1736		goto drop;
1737	}
1738
1739	hci_skb_pkt_type(skb) = skb->data[0];
1740	skb_pull(skb, 1);
1741
1742	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1743		/* No permission check is needed for user channel
1744		 * since that gets enforced when binding the socket.
1745		 *
1746		 * However check that the packet type is valid.
1747		 */
1748		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1749		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1750		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1751			err = -EINVAL;
1752			goto drop;
1753		}
1754
1755		skb_queue_tail(&hdev->raw_q, skb);
1756		queue_work(hdev->workqueue, &hdev->tx_work);
1757	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1758		u16 opcode = get_unaligned_le16(skb->data);
1759		u16 ogf = hci_opcode_ogf(opcode);
1760		u16 ocf = hci_opcode_ocf(opcode);
1761
1762		if (((ogf > HCI_SFLT_MAX_OGF) ||
1763		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1764				   &hci_sec_filter.ocf_mask[ogf])) &&
1765		    !capable(CAP_NET_RAW)) {
1766			err = -EPERM;
1767			goto drop;
1768		}
1769
1770		/* Since the opcode has already been extracted here, store
1771		 * a copy of the value for later use by the drivers.
1772		 */
1773		hci_skb_opcode(skb) = opcode;
1774
1775		if (ogf == 0x3f) {
1776			skb_queue_tail(&hdev->raw_q, skb);
1777			queue_work(hdev->workqueue, &hdev->tx_work);
1778		} else {
1779			/* Stand-alone HCI commands must be flagged as
1780			 * single-command requests.
1781			 */
1782			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1783
1784			skb_queue_tail(&hdev->cmd_q, skb);
1785			queue_work(hdev->workqueue, &hdev->cmd_work);
1786		}
1787	} else {
1788		if (!capable(CAP_NET_RAW)) {
1789			err = -EPERM;
1790			goto drop;
1791		}
1792
1793		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1794		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1795			err = -EINVAL;
1796			goto drop;
1797		}
1798
1799		skb_queue_tail(&hdev->raw_q, skb);
1800		queue_work(hdev->workqueue, &hdev->tx_work);
1801	}
1802
1803	err = len;
1804
1805done:
1806	release_sock(sk);
1807	return err;
1808
1809drop:
1810	kfree_skb(skb);
1811	goto done;
1812}
1813
1814static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1815			       char __user *optval, unsigned int len)
1816{
1817	struct hci_ufilter uf = { .opcode = 0 };
1818	struct sock *sk = sock->sk;
1819	int err = 0, opt = 0;
1820
1821	BT_DBG("sk %p, opt %d", sk, optname);
1822
1823	if (level != SOL_HCI)
1824		return -ENOPROTOOPT;
1825
1826	lock_sock(sk);
1827
1828	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1829		err = -EBADFD;
1830		goto done;
1831	}
1832
1833	switch (optname) {
1834	case HCI_DATA_DIR:
1835		if (get_user(opt, (int __user *)optval)) {
1836			err = -EFAULT;
1837			break;
1838		}
1839
1840		if (opt)
1841			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1842		else
1843			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1844		break;
1845
1846	case HCI_TIME_STAMP:
1847		if (get_user(opt, (int __user *)optval)) {
1848			err = -EFAULT;
1849			break;
1850		}
1851
1852		if (opt)
1853			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1854		else
1855			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1856		break;
1857
1858	case HCI_FILTER:
1859		{
1860			struct hci_filter *f = &hci_pi(sk)->filter;
1861
1862			uf.type_mask = f->type_mask;
1863			uf.opcode    = f->opcode;
1864			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1865			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1866		}
1867
1868		len = min_t(unsigned int, len, sizeof(uf));
1869		if (copy_from_user(&uf, optval, len)) {
1870			err = -EFAULT;
1871			break;
1872		}
1873
1874		if (!capable(CAP_NET_RAW)) {
1875			uf.type_mask &= hci_sec_filter.type_mask;
1876			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1877			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1878		}
1879
1880		{
1881			struct hci_filter *f = &hci_pi(sk)->filter;
1882
1883			f->type_mask = uf.type_mask;
1884			f->opcode    = uf.opcode;
1885			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1886			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1887		}
1888		break;
1889
1890	default:
1891		err = -ENOPROTOOPT;
1892		break;
1893	}
1894
1895done:
1896	release_sock(sk);
1897	return err;
1898}
1899
1900static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1901			       char __user *optval, int __user *optlen)
1902{
1903	struct hci_ufilter uf;
1904	struct sock *sk = sock->sk;
1905	int len, opt, err = 0;
1906
1907	BT_DBG("sk %p, opt %d", sk, optname);
1908
1909	if (level != SOL_HCI)
1910		return -ENOPROTOOPT;
1911
1912	if (get_user(len, optlen))
1913		return -EFAULT;
1914
1915	lock_sock(sk);
1916
1917	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1918		err = -EBADFD;
1919		goto done;
1920	}
1921
1922	switch (optname) {
1923	case HCI_DATA_DIR:
1924		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1925			opt = 1;
1926		else
1927			opt = 0;
1928
1929		if (put_user(opt, optval))
1930			err = -EFAULT;
1931		break;
1932
1933	case HCI_TIME_STAMP:
1934		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1935			opt = 1;
1936		else
1937			opt = 0;
1938
1939		if (put_user(opt, optval))
1940			err = -EFAULT;
1941		break;
1942
1943	case HCI_FILTER:
1944		{
1945			struct hci_filter *f = &hci_pi(sk)->filter;
1946
1947			memset(&uf, 0, sizeof(uf));
1948			uf.type_mask = f->type_mask;
1949			uf.opcode    = f->opcode;
1950			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1951			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1952		}
1953
1954		len = min_t(unsigned int, len, sizeof(uf));
1955		if (copy_to_user(optval, &uf, len))
1956			err = -EFAULT;
1957		break;
1958
1959	default:
1960		err = -ENOPROTOOPT;
1961		break;
1962	}
1963
1964done:
1965	release_sock(sk);
1966	return err;
1967}
1968
1969static const struct proto_ops hci_sock_ops = {
1970	.family		= PF_BLUETOOTH,
1971	.owner		= THIS_MODULE,
1972	.release	= hci_sock_release,
1973	.bind		= hci_sock_bind,
1974	.getname	= hci_sock_getname,
1975	.sendmsg	= hci_sock_sendmsg,
1976	.recvmsg	= hci_sock_recvmsg,
1977	.ioctl		= hci_sock_ioctl,
1978	.poll		= datagram_poll,
1979	.listen		= sock_no_listen,
1980	.shutdown	= sock_no_shutdown,
1981	.setsockopt	= hci_sock_setsockopt,
1982	.getsockopt	= hci_sock_getsockopt,
1983	.connect	= sock_no_connect,
1984	.socketpair	= sock_no_socketpair,
1985	.accept		= sock_no_accept,
1986	.mmap		= sock_no_mmap
1987};
1988
1989static struct proto hci_sk_proto = {
1990	.name		= "HCI",
1991	.owner		= THIS_MODULE,
1992	.obj_size	= sizeof(struct hci_pinfo)
1993};
1994
1995static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1996			   int kern)
1997{
1998	struct sock *sk;
1999
2000	BT_DBG("sock %p", sock);
2001
2002	if (sock->type != SOCK_RAW)
2003		return -ESOCKTNOSUPPORT;
2004
2005	sock->ops = &hci_sock_ops;
2006
2007	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2008	if (!sk)
2009		return -ENOMEM;
2010
2011	sock_init_data(sock, sk);
2012
2013	sock_reset_flag(sk, SOCK_ZAPPED);
2014
2015	sk->sk_protocol = protocol;
2016
2017	sock->state = SS_UNCONNECTED;
2018	sk->sk_state = BT_OPEN;
2019
2020	bt_sock_link(&hci_sk_list, sk);
2021	return 0;
2022}
2023
2024static const struct net_proto_family hci_sock_family_ops = {
2025	.family	= PF_BLUETOOTH,
2026	.owner	= THIS_MODULE,
2027	.create	= hci_sock_create,
2028};
2029
2030int __init hci_sock_init(void)
2031{
2032	int err;
2033
2034	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2035
2036	err = proto_register(&hci_sk_proto, 0);
2037	if (err < 0)
2038		return err;
2039
2040	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2041	if (err < 0) {
2042		BT_ERR("HCI socket registration failed");
2043		goto error;
2044	}
2045
2046	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2047	if (err < 0) {
2048		BT_ERR("Failed to create HCI proc file");
2049		bt_sock_unregister(BTPROTO_HCI);
2050		goto error;
2051	}
2052
2053	BT_INFO("HCI socket layer initialized");
2054
2055	return 0;
2056
2057error:
2058	proto_unregister(&hci_sk_proto);
2059	return err;
2060}
2061
2062void hci_sock_cleanup(void)
2063{
2064	bt_procfs_cleanup(&init_net, "hci");
2065	bt_sock_unregister(BTPROTO_HCI);
2066	proto_unregister(&hci_sk_proto);
2067}