Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
 
  29#include <asm/unaligned.h>
  30
  31#include <net/bluetooth/bluetooth.h>
  32#include <net/bluetooth/hci_core.h>
  33#include <net/bluetooth/hci_mon.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "mgmt_util.h"
  37
  38static LIST_HEAD(mgmt_chan_list);
  39static DEFINE_MUTEX(mgmt_chan_list_lock);
  40
 
 
  41static atomic_t monitor_promisc = ATOMIC_INIT(0);
  42
  43/* ----- HCI socket interface ----- */
  44
  45/* Socket info */
  46#define hci_pi(sk) ((struct hci_pinfo *) sk)
  47
  48struct hci_pinfo {
  49	struct bt_sock    bt;
  50	struct hci_dev    *hdev;
  51	struct hci_filter filter;
  52	__u32             cmsg_mask;
  53	unsigned short    channel;
  54	unsigned long     flags;
 
 
  55};
  56
 
 
 
 
 
 
 
 
 
 
 
  57void hci_sock_set_flag(struct sock *sk, int nr)
  58{
  59	set_bit(nr, &hci_pi(sk)->flags);
  60}
  61
  62void hci_sock_clear_flag(struct sock *sk, int nr)
  63{
  64	clear_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67int hci_sock_test_flag(struct sock *sk, int nr)
  68{
  69	return test_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72unsigned short hci_sock_get_channel(struct sock *sk)
  73{
  74	return hci_pi(sk)->channel;
  75}
  76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77static inline int hci_test_bit(int nr, const void *addr)
  78{
  79	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
  80}
  81
  82/* Security filter */
  83#define HCI_SFLT_MAX_OGF  5
  84
  85struct hci_sec_filter {
  86	__u32 type_mask;
  87	__u32 event_mask[2];
  88	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
  89};
  90
  91static const struct hci_sec_filter hci_sec_filter = {
  92	/* Packet types */
  93	0x10,
  94	/* Events */
  95	{ 0x1000d9fe, 0x0000b00c },
  96	/* Commands */
  97	{
  98		{ 0x0 },
  99		/* OGF_LINK_CTL */
 100		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 101		/* OGF_LINK_POLICY */
 102		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 103		/* OGF_HOST_CTL */
 104		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 105		/* OGF_INFO_PARAM */
 106		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 107		/* OGF_STATUS_PARAM */
 108		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 109	}
 110};
 111
 112static struct bt_sock_list hci_sk_list = {
 113	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 114};
 115
 116static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 117{
 118	struct hci_filter *flt;
 119	int flt_type, flt_event;
 120
 121	/* Apply filter */
 122	flt = &hci_pi(sk)->filter;
 123
 124	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 125
 126	if (!test_bit(flt_type, &flt->type_mask))
 127		return true;
 128
 129	/* Extra filter for event packets only */
 130	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 131		return false;
 132
 133	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 134
 135	if (!hci_test_bit(flt_event, &flt->event_mask))
 136		return true;
 137
 138	/* Check filter only when opcode is set */
 139	if (!flt->opcode)
 140		return false;
 141
 142	if (flt_event == HCI_EV_CMD_COMPLETE &&
 143	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 144		return true;
 145
 146	if (flt_event == HCI_EV_CMD_STATUS &&
 147	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 148		return true;
 149
 150	return false;
 151}
 152
 153/* Send frame to RAW socket */
 154void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 155{
 156	struct sock *sk;
 157	struct sk_buff *skb_copy = NULL;
 158
 159	BT_DBG("hdev %p len %d", hdev, skb->len);
 160
 161	read_lock(&hci_sk_list.lock);
 162
 163	sk_for_each(sk, &hci_sk_list.head) {
 164		struct sk_buff *nskb;
 165
 166		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 167			continue;
 168
 169		/* Don't send frame to the socket it came from */
 170		if (skb->sk == sk)
 171			continue;
 172
 173		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 174			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 175			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 176			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 177			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 178				continue;
 179			if (is_filtered_packet(sk, skb))
 180				continue;
 181		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 182			if (!bt_cb(skb)->incoming)
 183				continue;
 184			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 185			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 186			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 187				continue;
 188		} else {
 189			/* Don't send frame to other channel types */
 190			continue;
 191		}
 192
 193		if (!skb_copy) {
 194			/* Create a private copy with headroom */
 195			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 196			if (!skb_copy)
 197				continue;
 198
 199			/* Put type byte before the data */
 200			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 201		}
 202
 203		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 204		if (!nskb)
 205			continue;
 206
 207		if (sock_queue_rcv_skb(sk, nskb))
 208			kfree_skb(nskb);
 209	}
 210
 211	read_unlock(&hci_sk_list.lock);
 212
 213	kfree_skb(skb_copy);
 214}
 215
 216/* Send frame to sockets with specific channel */
 217void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 218			 int flag, struct sock *skip_sk)
 219{
 220	struct sock *sk;
 221
 222	BT_DBG("channel %u len %d", channel, skb->len);
 223
 224	read_lock(&hci_sk_list.lock);
 225
 226	sk_for_each(sk, &hci_sk_list.head) {
 227		struct sk_buff *nskb;
 228
 229		/* Ignore socket without the flag set */
 230		if (!hci_sock_test_flag(sk, flag))
 231			continue;
 232
 233		/* Skip the original socket */
 234		if (sk == skip_sk)
 235			continue;
 236
 237		if (sk->sk_state != BT_BOUND)
 238			continue;
 239
 240		if (hci_pi(sk)->channel != channel)
 241			continue;
 242
 243		nskb = skb_clone(skb, GFP_ATOMIC);
 244		if (!nskb)
 245			continue;
 246
 247		if (sock_queue_rcv_skb(sk, nskb))
 248			kfree_skb(nskb);
 249	}
 250
 
 
 
 
 
 
 
 251	read_unlock(&hci_sk_list.lock);
 252}
 253
 254/* Send frame to monitor socket */
 255void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 256{
 257	struct sk_buff *skb_copy = NULL;
 258	struct hci_mon_hdr *hdr;
 259	__le16 opcode;
 260
 261	if (!atomic_read(&monitor_promisc))
 262		return;
 263
 264	BT_DBG("hdev %p len %d", hdev, skb->len);
 265
 266	switch (hci_skb_pkt_type(skb)) {
 267	case HCI_COMMAND_PKT:
 268		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 269		break;
 270	case HCI_EVENT_PKT:
 271		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 272		break;
 273	case HCI_ACLDATA_PKT:
 274		if (bt_cb(skb)->incoming)
 275			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 276		else
 277			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 278		break;
 279	case HCI_SCODATA_PKT:
 280		if (bt_cb(skb)->incoming)
 281			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 282		else
 283			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 284		break;
 
 
 
 
 
 
 285	case HCI_DIAG_PKT:
 286		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 287		break;
 288	default:
 289		return;
 290	}
 291
 292	/* Create a private copy with headroom */
 293	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 294	if (!skb_copy)
 295		return;
 296
 297	/* Put header before the data */
 298	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 299	hdr->opcode = opcode;
 300	hdr->index = cpu_to_le16(hdev->id);
 301	hdr->len = cpu_to_le16(skb->len);
 302
 303	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 304			    HCI_SOCK_TRUSTED, NULL);
 305	kfree_skb(skb_copy);
 306}
 307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 309{
 310	struct hci_mon_hdr *hdr;
 311	struct hci_mon_new_index *ni;
 312	struct hci_mon_index_info *ii;
 313	struct sk_buff *skb;
 314	__le16 opcode;
 315
 316	switch (event) {
 317	case HCI_DEV_REG:
 318		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 319		if (!skb)
 320			return NULL;
 321
 322		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 323		ni->type = hdev->dev_type;
 324		ni->bus = hdev->bus;
 325		bacpy(&ni->bdaddr, &hdev->bdaddr);
 326		memcpy(ni->name, hdev->name, 8);
 327
 328		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 329		break;
 330
 331	case HCI_DEV_UNREG:
 332		skb = bt_skb_alloc(0, GFP_ATOMIC);
 333		if (!skb)
 334			return NULL;
 335
 336		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 337		break;
 338
 339	case HCI_DEV_SETUP:
 340		if (hdev->manufacturer == 0xffff)
 341			return NULL;
 342
 343		/* fall through */
 344
 345	case HCI_DEV_UP:
 346		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 347		if (!skb)
 348			return NULL;
 349
 350		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 351		bacpy(&ii->bdaddr, &hdev->bdaddr);
 352		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 353
 354		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 355		break;
 356
 357	case HCI_DEV_OPEN:
 358		skb = bt_skb_alloc(0, GFP_ATOMIC);
 359		if (!skb)
 360			return NULL;
 361
 362		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 363		break;
 364
 365	case HCI_DEV_CLOSE:
 366		skb = bt_skb_alloc(0, GFP_ATOMIC);
 367		if (!skb)
 368			return NULL;
 369
 370		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 371		break;
 372
 373	default:
 374		return NULL;
 375	}
 376
 377	__net_timestamp(skb);
 378
 379	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 380	hdr->opcode = opcode;
 381	hdr->index = cpu_to_le16(hdev->id);
 382	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 383
 384	return skb;
 385}
 386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387static void __printf(2, 3)
 388send_monitor_note(struct sock *sk, const char *fmt, ...)
 389{
 390	size_t len;
 391	struct hci_mon_hdr *hdr;
 392	struct sk_buff *skb;
 393	va_list args;
 394
 395	va_start(args, fmt);
 396	len = vsnprintf(NULL, 0, fmt, args);
 397	va_end(args);
 398
 399	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 400	if (!skb)
 401		return;
 402
 403	va_start(args, fmt);
 404	vsprintf(skb_put(skb, len), fmt, args);
 405	*skb_put(skb, 1) = 0;
 406	va_end(args);
 407
 408	__net_timestamp(skb);
 409
 410	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 411	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 412	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 413	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 414
 415	if (sock_queue_rcv_skb(sk, skb))
 416		kfree_skb(skb);
 417}
 418
 419static void send_monitor_replay(struct sock *sk)
 420{
 421	struct hci_dev *hdev;
 422
 423	read_lock(&hci_dev_list_lock);
 424
 425	list_for_each_entry(hdev, &hci_dev_list, list) {
 426		struct sk_buff *skb;
 427
 428		skb = create_monitor_event(hdev, HCI_DEV_REG);
 429		if (!skb)
 430			continue;
 431
 432		if (sock_queue_rcv_skb(sk, skb))
 433			kfree_skb(skb);
 434
 435		if (!test_bit(HCI_RUNNING, &hdev->flags))
 436			continue;
 437
 438		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 439		if (!skb)
 440			continue;
 441
 442		if (sock_queue_rcv_skb(sk, skb))
 443			kfree_skb(skb);
 444
 445		if (test_bit(HCI_UP, &hdev->flags))
 446			skb = create_monitor_event(hdev, HCI_DEV_UP);
 447		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 448			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 449		else
 450			skb = NULL;
 451
 452		if (skb) {
 453			if (sock_queue_rcv_skb(sk, skb))
 454				kfree_skb(skb);
 455		}
 456	}
 457
 458	read_unlock(&hci_dev_list_lock);
 459}
 460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461/* Generate internal stack event */
 462static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 463{
 464	struct hci_event_hdr *hdr;
 465	struct hci_ev_stack_internal *ev;
 466	struct sk_buff *skb;
 467
 468	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 469	if (!skb)
 470		return;
 471
 472	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 473	hdr->evt  = HCI_EV_STACK_INTERNAL;
 474	hdr->plen = sizeof(*ev) + dlen;
 475
 476	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 477	ev->type = type;
 478	memcpy(ev->data, data, dlen);
 479
 480	bt_cb(skb)->incoming = 1;
 481	__net_timestamp(skb);
 482
 483	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 484	hci_send_to_sock(hdev, skb);
 485	kfree_skb(skb);
 486}
 487
 488void hci_sock_dev_event(struct hci_dev *hdev, int event)
 489{
 490	BT_DBG("hdev %s event %d", hdev->name, event);
 491
 492	if (atomic_read(&monitor_promisc)) {
 493		struct sk_buff *skb;
 494
 495		/* Send event to monitor */
 496		skb = create_monitor_event(hdev, event);
 497		if (skb) {
 498			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 499					    HCI_SOCK_TRUSTED, NULL);
 500			kfree_skb(skb);
 501		}
 502	}
 503
 504	if (event <= HCI_DEV_DOWN) {
 505		struct hci_ev_si_device ev;
 506
 507		/* Send event to sockets */
 508		ev.event  = event;
 509		ev.dev_id = hdev->id;
 510		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 511	}
 512
 513	if (event == HCI_DEV_UNREG) {
 514		struct sock *sk;
 515
 516		/* Detach sockets from device */
 517		read_lock(&hci_sk_list.lock);
 518		sk_for_each(sk, &hci_sk_list.head) {
 519			bh_lock_sock_nested(sk);
 520			if (hci_pi(sk)->hdev == hdev) {
 521				hci_pi(sk)->hdev = NULL;
 522				sk->sk_err = EPIPE;
 523				sk->sk_state = BT_OPEN;
 524				sk->sk_state_change(sk);
 525
 526				hci_dev_put(hdev);
 527			}
 528			bh_unlock_sock(sk);
 529		}
 530		read_unlock(&hci_sk_list.lock);
 531	}
 532}
 533
 534static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 535{
 536	struct hci_mgmt_chan *c;
 537
 538	list_for_each_entry(c, &mgmt_chan_list, list) {
 539		if (c->channel == channel)
 540			return c;
 541	}
 542
 543	return NULL;
 544}
 545
 546static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 547{
 548	struct hci_mgmt_chan *c;
 549
 550	mutex_lock(&mgmt_chan_list_lock);
 551	c = __hci_mgmt_chan_find(channel);
 552	mutex_unlock(&mgmt_chan_list_lock);
 553
 554	return c;
 555}
 556
 557int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 558{
 559	if (c->channel < HCI_CHANNEL_CONTROL)
 560		return -EINVAL;
 561
 562	mutex_lock(&mgmt_chan_list_lock);
 563	if (__hci_mgmt_chan_find(c->channel)) {
 564		mutex_unlock(&mgmt_chan_list_lock);
 565		return -EALREADY;
 566	}
 567
 568	list_add_tail(&c->list, &mgmt_chan_list);
 569
 570	mutex_unlock(&mgmt_chan_list_lock);
 571
 572	return 0;
 573}
 574EXPORT_SYMBOL(hci_mgmt_chan_register);
 575
 576void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 577{
 578	mutex_lock(&mgmt_chan_list_lock);
 579	list_del(&c->list);
 580	mutex_unlock(&mgmt_chan_list_lock);
 581}
 582EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 583
 584static int hci_sock_release(struct socket *sock)
 585{
 586	struct sock *sk = sock->sk;
 587	struct hci_dev *hdev;
 
 588
 589	BT_DBG("sock %p sk %p", sock, sk);
 590
 591	if (!sk)
 592		return 0;
 593
 594	hdev = hci_pi(sk)->hdev;
 595
 596	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
 
 597		atomic_dec(&monitor_promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598
 599	bt_sock_unlink(&hci_sk_list, sk);
 600
 
 601	if (hdev) {
 602		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 603			/* When releasing an user channel exclusive access,
 604			 * call hci_dev_do_close directly instead of calling
 605			 * hci_dev_close to ensure the exclusive access will
 606			 * be released and the controller brought back down.
 607			 *
 608			 * The checking of HCI_AUTO_OFF is not needed in this
 609			 * case since it will have been cleared already when
 610			 * opening the user channel.
 611			 */
 612			hci_dev_do_close(hdev);
 613			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 614			mgmt_index_added(hdev);
 615		}
 616
 617		atomic_dec(&hdev->promisc);
 618		hci_dev_put(hdev);
 619	}
 620
 621	sock_orphan(sk);
 622
 623	skb_queue_purge(&sk->sk_receive_queue);
 624	skb_queue_purge(&sk->sk_write_queue);
 625
 
 626	sock_put(sk);
 627	return 0;
 628}
 629
 630static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 631{
 632	bdaddr_t bdaddr;
 633	int err;
 634
 635	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 636		return -EFAULT;
 637
 638	hci_dev_lock(hdev);
 639
 640	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 641
 642	hci_dev_unlock(hdev);
 643
 644	return err;
 645}
 646
 647static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 648{
 649	bdaddr_t bdaddr;
 650	int err;
 651
 652	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 653		return -EFAULT;
 654
 655	hci_dev_lock(hdev);
 656
 657	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 658
 659	hci_dev_unlock(hdev);
 660
 661	return err;
 662}
 663
 664/* Ioctls that require bound socket */
 665static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 666				unsigned long arg)
 667{
 668	struct hci_dev *hdev = hci_pi(sk)->hdev;
 669
 670	if (!hdev)
 671		return -EBADFD;
 672
 673	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 674		return -EBUSY;
 675
 676	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 677		return -EOPNOTSUPP;
 678
 679	if (hdev->dev_type != HCI_BREDR)
 680		return -EOPNOTSUPP;
 681
 682	switch (cmd) {
 683	case HCISETRAW:
 684		if (!capable(CAP_NET_ADMIN))
 685			return -EPERM;
 686		return -EOPNOTSUPP;
 687
 688	case HCIGETCONNINFO:
 689		return hci_get_conn_info(hdev, (void __user *)arg);
 690
 691	case HCIGETAUTHINFO:
 692		return hci_get_auth_info(hdev, (void __user *)arg);
 693
 694	case HCIBLOCKADDR:
 695		if (!capable(CAP_NET_ADMIN))
 696			return -EPERM;
 697		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 698
 699	case HCIUNBLOCKADDR:
 700		if (!capable(CAP_NET_ADMIN))
 701			return -EPERM;
 702		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 703	}
 704
 705	return -ENOIOCTLCMD;
 706}
 707
 708static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 709			  unsigned long arg)
 710{
 711	void __user *argp = (void __user *)arg;
 712	struct sock *sk = sock->sk;
 713	int err;
 714
 715	BT_DBG("cmd %x arg %lx", cmd, arg);
 716
 717	lock_sock(sk);
 718
 719	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 720		err = -EBADFD;
 721		goto done;
 722	}
 723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724	release_sock(sk);
 725
 726	switch (cmd) {
 727	case HCIGETDEVLIST:
 728		return hci_get_dev_list(argp);
 729
 730	case HCIGETDEVINFO:
 731		return hci_get_dev_info(argp);
 732
 733	case HCIGETCONNLIST:
 734		return hci_get_conn_list(argp);
 735
 736	case HCIDEVUP:
 737		if (!capable(CAP_NET_ADMIN))
 738			return -EPERM;
 739		return hci_dev_open(arg);
 740
 741	case HCIDEVDOWN:
 742		if (!capable(CAP_NET_ADMIN))
 743			return -EPERM;
 744		return hci_dev_close(arg);
 745
 746	case HCIDEVRESET:
 747		if (!capable(CAP_NET_ADMIN))
 748			return -EPERM;
 749		return hci_dev_reset(arg);
 750
 751	case HCIDEVRESTAT:
 752		if (!capable(CAP_NET_ADMIN))
 753			return -EPERM;
 754		return hci_dev_reset_stat(arg);
 755
 756	case HCISETSCAN:
 757	case HCISETAUTH:
 758	case HCISETENCRYPT:
 759	case HCISETPTYPE:
 760	case HCISETLINKPOL:
 761	case HCISETLINKMODE:
 762	case HCISETACLMTU:
 763	case HCISETSCOMTU:
 764		if (!capable(CAP_NET_ADMIN))
 765			return -EPERM;
 766		return hci_dev_cmd(cmd, argp);
 767
 768	case HCIINQUIRY:
 769		return hci_inquiry(argp);
 770	}
 771
 772	lock_sock(sk);
 773
 774	err = hci_sock_bound_ioctl(sk, cmd, arg);
 775
 776done:
 777	release_sock(sk);
 778	return err;
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 782			 int addr_len)
 783{
 784	struct sockaddr_hci haddr;
 785	struct sock *sk = sock->sk;
 786	struct hci_dev *hdev = NULL;
 
 787	int len, err = 0;
 788
 789	BT_DBG("sock %p sk %p", sock, sk);
 790
 791	if (!addr)
 792		return -EINVAL;
 793
 794	memset(&haddr, 0, sizeof(haddr));
 795	len = min_t(unsigned int, sizeof(haddr), addr_len);
 796	memcpy(&haddr, addr, len);
 797
 798	if (haddr.hci_family != AF_BLUETOOTH)
 799		return -EINVAL;
 800
 801	lock_sock(sk);
 802
 
 
 
 
 
 
 
 
 
 
 
 
 803	if (sk->sk_state == BT_BOUND) {
 804		err = -EALREADY;
 805		goto done;
 806	}
 807
 808	switch (haddr.hci_channel) {
 809	case HCI_CHANNEL_RAW:
 810		if (hci_pi(sk)->hdev) {
 811			err = -EALREADY;
 812			goto done;
 813		}
 814
 815		if (haddr.hci_dev != HCI_DEV_NONE) {
 816			hdev = hci_dev_get(haddr.hci_dev);
 817			if (!hdev) {
 818				err = -ENODEV;
 819				goto done;
 820			}
 821
 822			atomic_inc(&hdev->promisc);
 823		}
 824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 826		break;
 827
 828	case HCI_CHANNEL_USER:
 829		if (hci_pi(sk)->hdev) {
 830			err = -EALREADY;
 831			goto done;
 832		}
 833
 834		if (haddr.hci_dev == HCI_DEV_NONE) {
 835			err = -EINVAL;
 836			goto done;
 837		}
 838
 839		if (!capable(CAP_NET_ADMIN)) {
 840			err = -EPERM;
 841			goto done;
 842		}
 843
 844		hdev = hci_dev_get(haddr.hci_dev);
 845		if (!hdev) {
 846			err = -ENODEV;
 847			goto done;
 848		}
 849
 850		if (test_bit(HCI_INIT, &hdev->flags) ||
 851		    hci_dev_test_flag(hdev, HCI_SETUP) ||
 852		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 853		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
 854		     test_bit(HCI_UP, &hdev->flags))) {
 855			err = -EBUSY;
 856			hci_dev_put(hdev);
 857			goto done;
 858		}
 859
 860		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
 861			err = -EUSERS;
 862			hci_dev_put(hdev);
 863			goto done;
 864		}
 865
 866		mgmt_index_removed(hdev);
 867
 868		err = hci_dev_open(hdev->id);
 869		if (err) {
 870			if (err == -EALREADY) {
 871				/* In case the transport is already up and
 872				 * running, clear the error here.
 873				 *
 874				 * This can happen when opening an user
 875				 * channel and HCI_AUTO_OFF grace period
 876				 * is still active.
 877				 */
 878				err = 0;
 879			} else {
 880				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 881				mgmt_index_added(hdev);
 882				hci_dev_put(hdev);
 883				goto done;
 884			}
 885		}
 886
 887		atomic_inc(&hdev->promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 888
 889		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 
 
 890		break;
 891
 892	case HCI_CHANNEL_MONITOR:
 893		if (haddr.hci_dev != HCI_DEV_NONE) {
 894			err = -EINVAL;
 895			goto done;
 896		}
 897
 898		if (!capable(CAP_NET_RAW)) {
 899			err = -EPERM;
 900			goto done;
 901		}
 902
 
 
 903		/* The monitor interface is restricted to CAP_NET_RAW
 904		 * capabilities and with that implicitly trusted.
 905		 */
 906		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 907
 908		send_monitor_note(sk, "Linux version %s (%s)",
 909				  init_utsname()->release,
 910				  init_utsname()->machine);
 911		send_monitor_note(sk, "Bluetooth subsystem version %s",
 912				  BT_SUBSYS_VERSION);
 913		send_monitor_replay(sk);
 
 914
 915		atomic_inc(&monitor_promisc);
 916		break;
 917
 918	case HCI_CHANNEL_LOGGING:
 919		if (haddr.hci_dev != HCI_DEV_NONE) {
 920			err = -EINVAL;
 921			goto done;
 922		}
 923
 924		if (!capable(CAP_NET_ADMIN)) {
 925			err = -EPERM;
 926			goto done;
 927		}
 
 
 928		break;
 929
 930	default:
 931		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
 932			err = -EINVAL;
 933			goto done;
 934		}
 935
 936		if (haddr.hci_dev != HCI_DEV_NONE) {
 937			err = -EINVAL;
 938			goto done;
 939		}
 940
 941		/* Users with CAP_NET_ADMIN capabilities are allowed
 942		 * access to all management commands and events. For
 943		 * untrusted users the interface is restricted and
 944		 * also only untrusted events are sent.
 945		 */
 946		if (capable(CAP_NET_ADMIN))
 947			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 948
 
 
 949		/* At the moment the index and unconfigured index events
 950		 * are enabled unconditionally. Setting them on each
 951		 * socket when binding keeps this functionality. They
 952		 * however might be cleared later and then sending of these
 953		 * events will be disabled, but that is then intentional.
 954		 *
 955		 * This also enables generic events that are safe to be
 956		 * received by untrusted users. Example for such events
 957		 * are changes to settings, class of device, name etc.
 958		 */
 959		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
 961			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
 962			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
 
 
 
 963		}
 964		break;
 965	}
 966
 967
 968	hci_pi(sk)->channel = haddr.hci_channel;
 969	sk->sk_state = BT_BOUND;
 970
 971done:
 972	release_sock(sk);
 973	return err;
 974}
 975
 976static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 977			    int *addr_len, int peer)
 978{
 979	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
 980	struct sock *sk = sock->sk;
 981	struct hci_dev *hdev;
 982	int err = 0;
 983
 984	BT_DBG("sock %p sk %p", sock, sk);
 985
 986	if (peer)
 987		return -EOPNOTSUPP;
 988
 989	lock_sock(sk);
 990
 991	hdev = hci_pi(sk)->hdev;
 992	if (!hdev) {
 993		err = -EBADFD;
 994		goto done;
 995	}
 996
 997	*addr_len = sizeof(*haddr);
 998	haddr->hci_family = AF_BLUETOOTH;
 999	haddr->hci_dev    = hdev->id;
1000	haddr->hci_channel= hci_pi(sk)->channel;
 
1001
1002done:
1003	release_sock(sk);
1004	return err;
1005}
1006
1007static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1008			  struct sk_buff *skb)
1009{
1010	__u32 mask = hci_pi(sk)->cmsg_mask;
1011
1012	if (mask & HCI_CMSG_DIR) {
1013		int incoming = bt_cb(skb)->incoming;
1014		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1015			 &incoming);
1016	}
1017
1018	if (mask & HCI_CMSG_TSTAMP) {
1019#ifdef CONFIG_COMPAT
1020		struct compat_timeval ctv;
1021#endif
1022		struct timeval tv;
1023		void *data;
1024		int len;
1025
1026		skb_get_timestamp(skb, &tv);
1027
1028		data = &tv;
1029		len = sizeof(tv);
1030#ifdef CONFIG_COMPAT
1031		if (!COMPAT_USE_64BIT_TIME &&
1032		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1033			ctv.tv_sec = tv.tv_sec;
1034			ctv.tv_usec = tv.tv_usec;
1035			data = &ctv;
1036			len = sizeof(ctv);
1037		}
1038#endif
1039
1040		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1041	}
1042}
1043
1044static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1045			    size_t len, int flags)
1046{
1047	int noblock = flags & MSG_DONTWAIT;
1048	struct sock *sk = sock->sk;
1049	struct sk_buff *skb;
1050	int copied, err;
 
1051
1052	BT_DBG("sock %p, sk %p", sock, sk);
1053
1054	if (flags & MSG_OOB)
1055		return -EOPNOTSUPP;
1056
1057	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1058		return -EOPNOTSUPP;
1059
1060	if (sk->sk_state == BT_CLOSED)
1061		return 0;
1062
1063	skb = skb_recv_datagram(sk, flags, noblock, &err);
1064	if (!skb)
1065		return err;
1066
 
1067	copied = skb->len;
1068	if (len < copied) {
1069		msg->msg_flags |= MSG_TRUNC;
1070		copied = len;
1071	}
1072
1073	skb_reset_transport_header(skb);
1074	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1075
1076	switch (hci_pi(sk)->channel) {
1077	case HCI_CHANNEL_RAW:
1078		hci_sock_cmsg(sk, msg, skb);
1079		break;
1080	case HCI_CHANNEL_USER:
1081	case HCI_CHANNEL_MONITOR:
1082		sock_recv_timestamp(msg, sk, skb);
1083		break;
1084	default:
1085		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1086			sock_recv_timestamp(msg, sk, skb);
1087		break;
1088	}
1089
1090	skb_free_datagram(sk, skb);
1091
 
 
 
1092	return err ? : copied;
1093}
1094
1095static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1096			struct msghdr *msg, size_t msglen)
1097{
1098	void *buf;
1099	u8 *cp;
1100	struct mgmt_hdr *hdr;
1101	u16 opcode, index, len;
1102	struct hci_dev *hdev = NULL;
1103	const struct hci_mgmt_handler *handler;
1104	bool var_len, no_hdev;
1105	int err;
1106
1107	BT_DBG("got %zu bytes", msglen);
1108
1109	if (msglen < sizeof(*hdr))
1110		return -EINVAL;
1111
1112	buf = kmalloc(msglen, GFP_KERNEL);
1113	if (!buf)
1114		return -ENOMEM;
1115
1116	if (memcpy_from_msg(buf, msg, msglen)) {
1117		err = -EFAULT;
1118		goto done;
1119	}
1120
1121	hdr = buf;
1122	opcode = __le16_to_cpu(hdr->opcode);
1123	index = __le16_to_cpu(hdr->index);
1124	len = __le16_to_cpu(hdr->len);
1125
1126	if (len != msglen - sizeof(*hdr)) {
1127		err = -EINVAL;
1128		goto done;
1129	}
1130
 
 
 
 
 
 
 
 
 
 
 
 
 
1131	if (opcode >= chan->handler_count ||
1132	    chan->handlers[opcode].func == NULL) {
1133		BT_DBG("Unknown op %u", opcode);
1134		err = mgmt_cmd_status(sk, index, opcode,
1135				      MGMT_STATUS_UNKNOWN_COMMAND);
1136		goto done;
1137	}
1138
1139	handler = &chan->handlers[opcode];
1140
1141	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1142	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1143		err = mgmt_cmd_status(sk, index, opcode,
1144				      MGMT_STATUS_PERMISSION_DENIED);
1145		goto done;
1146	}
1147
1148	if (index != MGMT_INDEX_NONE) {
1149		hdev = hci_dev_get(index);
1150		if (!hdev) {
1151			err = mgmt_cmd_status(sk, index, opcode,
1152					      MGMT_STATUS_INVALID_INDEX);
1153			goto done;
1154		}
1155
1156		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1157		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1158		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1159			err = mgmt_cmd_status(sk, index, opcode,
1160					      MGMT_STATUS_INVALID_INDEX);
1161			goto done;
1162		}
1163
1164		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1165		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1166			err = mgmt_cmd_status(sk, index, opcode,
1167					      MGMT_STATUS_INVALID_INDEX);
1168			goto done;
1169		}
1170	}
1171
1172	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1173	if (no_hdev != !hdev) {
1174		err = mgmt_cmd_status(sk, index, opcode,
1175				      MGMT_STATUS_INVALID_INDEX);
1176		goto done;
 
 
1177	}
1178
1179	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1180	if ((var_len && len < handler->data_len) ||
1181	    (!var_len && len != handler->data_len)) {
1182		err = mgmt_cmd_status(sk, index, opcode,
1183				      MGMT_STATUS_INVALID_PARAMS);
1184		goto done;
1185	}
1186
1187	if (hdev && chan->hdev_init)
1188		chan->hdev_init(sk, hdev);
1189
1190	cp = buf + sizeof(*hdr);
1191
1192	err = handler->func(sk, hdev, cp, len);
1193	if (err < 0)
1194		goto done;
1195
1196	err = msglen;
1197
1198done:
1199	if (hdev)
1200		hci_dev_put(hdev);
1201
1202	kfree(buf);
1203	return err;
1204}
1205
1206static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1207{
1208	struct hci_mon_hdr *hdr;
1209	struct sk_buff *skb;
1210	struct hci_dev *hdev;
1211	u16 index;
1212	int err;
1213
1214	/* The logging frame consists at minimum of the standard header,
1215	 * the priority byte, the ident length byte and at least one string
1216	 * terminator NUL byte. Anything shorter are invalid packets.
1217	 */
1218	if (len < sizeof(*hdr) + 3)
1219		return -EINVAL;
1220
1221	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1222	if (!skb)
1223		return err;
1224
1225	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1226		err = -EFAULT;
1227		goto drop;
1228	}
1229
1230	hdr = (void *)skb->data;
1231
1232	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1233		err = -EINVAL;
1234		goto drop;
1235	}
1236
1237	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1238		__u8 priority = skb->data[sizeof(*hdr)];
1239		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1240
1241		/* Only the priorities 0-7 are valid and with that any other
1242		 * value results in an invalid packet.
1243		 *
1244		 * The priority byte is followed by an ident length byte and
1245		 * the NUL terminated ident string. Check that the ident
1246		 * length is not overflowing the packet and also that the
1247		 * ident string itself is NUL terminated. In case the ident
1248		 * length is zero, the length value actually doubles as NUL
1249		 * terminator identifier.
1250		 *
1251		 * The message follows the ident string (if present) and
1252		 * must be NUL terminated. Otherwise it is not a valid packet.
1253		 */
1254		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1255		    ident_len > len - sizeof(*hdr) - 3 ||
1256		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1257			err = -EINVAL;
1258			goto drop;
1259		}
1260	} else {
1261		err = -EINVAL;
1262		goto drop;
1263	}
1264
1265	index = __le16_to_cpu(hdr->index);
1266
1267	if (index != MGMT_INDEX_NONE) {
1268		hdev = hci_dev_get(index);
1269		if (!hdev) {
1270			err = -ENODEV;
1271			goto drop;
1272		}
1273	} else {
1274		hdev = NULL;
1275	}
1276
1277	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1278
1279	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1280	err = len;
1281
1282	if (hdev)
1283		hci_dev_put(hdev);
1284
1285drop:
1286	kfree_skb(skb);
1287	return err;
1288}
1289
1290static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1291			    size_t len)
1292{
1293	struct sock *sk = sock->sk;
1294	struct hci_mgmt_chan *chan;
1295	struct hci_dev *hdev;
1296	struct sk_buff *skb;
1297	int err;
1298
1299	BT_DBG("sock %p sk %p", sock, sk);
1300
1301	if (msg->msg_flags & MSG_OOB)
1302		return -EOPNOTSUPP;
1303
1304	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
 
1305		return -EINVAL;
1306
1307	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1308		return -EINVAL;
1309
1310	lock_sock(sk);
1311
1312	switch (hci_pi(sk)->channel) {
1313	case HCI_CHANNEL_RAW:
1314	case HCI_CHANNEL_USER:
1315		break;
1316	case HCI_CHANNEL_MONITOR:
1317		err = -EOPNOTSUPP;
1318		goto done;
1319	case HCI_CHANNEL_LOGGING:
1320		err = hci_logging_frame(sk, msg, len);
1321		goto done;
1322	default:
1323		mutex_lock(&mgmt_chan_list_lock);
1324		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1325		if (chan)
1326			err = hci_mgmt_cmd(chan, sk, msg, len);
1327		else
1328			err = -EINVAL;
1329
1330		mutex_unlock(&mgmt_chan_list_lock);
1331		goto done;
1332	}
1333
1334	hdev = hci_pi(sk)->hdev;
1335	if (!hdev) {
1336		err = -EBADFD;
1337		goto done;
1338	}
1339
1340	if (!test_bit(HCI_UP, &hdev->flags)) {
1341		err = -ENETDOWN;
1342		goto done;
1343	}
1344
1345	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1346	if (!skb)
1347		goto done;
1348
1349	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1350		err = -EFAULT;
1351		goto drop;
1352	}
1353
1354	hci_skb_pkt_type(skb) = skb->data[0];
1355	skb_pull(skb, 1);
1356
1357	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1358		/* No permission check is needed for user channel
1359		 * since that gets enforced when binding the socket.
1360		 *
1361		 * However check that the packet type is valid.
1362		 */
1363		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1364		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1365		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1366			err = -EINVAL;
1367			goto drop;
1368		}
1369
1370		skb_queue_tail(&hdev->raw_q, skb);
1371		queue_work(hdev->workqueue, &hdev->tx_work);
1372	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1373		u16 opcode = get_unaligned_le16(skb->data);
1374		u16 ogf = hci_opcode_ogf(opcode);
1375		u16 ocf = hci_opcode_ocf(opcode);
1376
1377		if (((ogf > HCI_SFLT_MAX_OGF) ||
1378		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1379				   &hci_sec_filter.ocf_mask[ogf])) &&
1380		    !capable(CAP_NET_RAW)) {
1381			err = -EPERM;
1382			goto drop;
1383		}
1384
1385		/* Since the opcode has already been extracted here, store
1386		 * a copy of the value for later use by the drivers.
1387		 */
1388		hci_skb_opcode(skb) = opcode;
1389
1390		if (ogf == 0x3f) {
1391			skb_queue_tail(&hdev->raw_q, skb);
1392			queue_work(hdev->workqueue, &hdev->tx_work);
1393		} else {
1394			/* Stand-alone HCI commands must be flagged as
1395			 * single-command requests.
1396			 */
1397			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1398
1399			skb_queue_tail(&hdev->cmd_q, skb);
1400			queue_work(hdev->workqueue, &hdev->cmd_work);
1401		}
1402	} else {
1403		if (!capable(CAP_NET_RAW)) {
1404			err = -EPERM;
1405			goto drop;
1406		}
1407
1408		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1409		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1410			err = -EINVAL;
1411			goto drop;
1412		}
1413
1414		skb_queue_tail(&hdev->raw_q, skb);
1415		queue_work(hdev->workqueue, &hdev->tx_work);
1416	}
1417
1418	err = len;
1419
1420done:
1421	release_sock(sk);
1422	return err;
1423
1424drop:
1425	kfree_skb(skb);
1426	goto done;
1427}
1428
1429static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1430			       char __user *optval, unsigned int len)
1431{
1432	struct hci_ufilter uf = { .opcode = 0 };
1433	struct sock *sk = sock->sk;
1434	int err = 0, opt = 0;
1435
1436	BT_DBG("sk %p, opt %d", sk, optname);
1437
 
 
 
1438	lock_sock(sk);
1439
1440	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1441		err = -EBADFD;
1442		goto done;
1443	}
1444
1445	switch (optname) {
1446	case HCI_DATA_DIR:
1447		if (get_user(opt, (int __user *)optval)) {
1448			err = -EFAULT;
1449			break;
1450		}
1451
1452		if (opt)
1453			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1454		else
1455			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1456		break;
1457
1458	case HCI_TIME_STAMP:
1459		if (get_user(opt, (int __user *)optval)) {
1460			err = -EFAULT;
1461			break;
1462		}
1463
1464		if (opt)
1465			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1466		else
1467			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1468		break;
1469
1470	case HCI_FILTER:
1471		{
1472			struct hci_filter *f = &hci_pi(sk)->filter;
1473
1474			uf.type_mask = f->type_mask;
1475			uf.opcode    = f->opcode;
1476			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1477			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1478		}
1479
1480		len = min_t(unsigned int, len, sizeof(uf));
1481		if (copy_from_user(&uf, optval, len)) {
1482			err = -EFAULT;
1483			break;
1484		}
1485
1486		if (!capable(CAP_NET_RAW)) {
1487			uf.type_mask &= hci_sec_filter.type_mask;
1488			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1489			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1490		}
1491
1492		{
1493			struct hci_filter *f = &hci_pi(sk)->filter;
1494
1495			f->type_mask = uf.type_mask;
1496			f->opcode    = uf.opcode;
1497			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1498			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1499		}
1500		break;
1501
1502	default:
1503		err = -ENOPROTOOPT;
1504		break;
1505	}
1506
1507done:
1508	release_sock(sk);
1509	return err;
1510}
1511
1512static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1513			       char __user *optval, int __user *optlen)
1514{
1515	struct hci_ufilter uf;
1516	struct sock *sk = sock->sk;
1517	int len, opt, err = 0;
1518
1519	BT_DBG("sk %p, opt %d", sk, optname);
1520
 
 
 
1521	if (get_user(len, optlen))
1522		return -EFAULT;
1523
1524	lock_sock(sk);
1525
1526	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1527		err = -EBADFD;
1528		goto done;
1529	}
1530
1531	switch (optname) {
1532	case HCI_DATA_DIR:
1533		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1534			opt = 1;
1535		else
1536			opt = 0;
1537
1538		if (put_user(opt, optval))
1539			err = -EFAULT;
1540		break;
1541
1542	case HCI_TIME_STAMP:
1543		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1544			opt = 1;
1545		else
1546			opt = 0;
1547
1548		if (put_user(opt, optval))
1549			err = -EFAULT;
1550		break;
1551
1552	case HCI_FILTER:
1553		{
1554			struct hci_filter *f = &hci_pi(sk)->filter;
1555
1556			memset(&uf, 0, sizeof(uf));
1557			uf.type_mask = f->type_mask;
1558			uf.opcode    = f->opcode;
1559			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1560			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1561		}
1562
1563		len = min_t(unsigned int, len, sizeof(uf));
1564		if (copy_to_user(optval, &uf, len))
1565			err = -EFAULT;
1566		break;
1567
1568	default:
1569		err = -ENOPROTOOPT;
1570		break;
1571	}
1572
1573done:
1574	release_sock(sk);
1575	return err;
1576}
1577
1578static const struct proto_ops hci_sock_ops = {
1579	.family		= PF_BLUETOOTH,
1580	.owner		= THIS_MODULE,
1581	.release	= hci_sock_release,
1582	.bind		= hci_sock_bind,
1583	.getname	= hci_sock_getname,
1584	.sendmsg	= hci_sock_sendmsg,
1585	.recvmsg	= hci_sock_recvmsg,
1586	.ioctl		= hci_sock_ioctl,
 
 
 
1587	.poll		= datagram_poll,
1588	.listen		= sock_no_listen,
1589	.shutdown	= sock_no_shutdown,
1590	.setsockopt	= hci_sock_setsockopt,
1591	.getsockopt	= hci_sock_getsockopt,
1592	.connect	= sock_no_connect,
1593	.socketpair	= sock_no_socketpair,
1594	.accept		= sock_no_accept,
1595	.mmap		= sock_no_mmap
1596};
1597
1598static struct proto hci_sk_proto = {
1599	.name		= "HCI",
1600	.owner		= THIS_MODULE,
1601	.obj_size	= sizeof(struct hci_pinfo)
1602};
1603
1604static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1605			   int kern)
1606{
1607	struct sock *sk;
1608
1609	BT_DBG("sock %p", sock);
1610
1611	if (sock->type != SOCK_RAW)
1612		return -ESOCKTNOSUPPORT;
1613
1614	sock->ops = &hci_sock_ops;
1615
1616	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1617	if (!sk)
1618		return -ENOMEM;
1619
1620	sock_init_data(sock, sk);
1621
1622	sock_reset_flag(sk, SOCK_ZAPPED);
1623
1624	sk->sk_protocol = protocol;
1625
1626	sock->state = SS_UNCONNECTED;
1627	sk->sk_state = BT_OPEN;
1628
1629	bt_sock_link(&hci_sk_list, sk);
1630	return 0;
1631}
1632
1633static const struct net_proto_family hci_sock_family_ops = {
1634	.family	= PF_BLUETOOTH,
1635	.owner	= THIS_MODULE,
1636	.create	= hci_sock_create,
1637};
1638
1639int __init hci_sock_init(void)
1640{
1641	int err;
1642
1643	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1644
1645	err = proto_register(&hci_sk_proto, 0);
1646	if (err < 0)
1647		return err;
1648
1649	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1650	if (err < 0) {
1651		BT_ERR("HCI socket registration failed");
1652		goto error;
1653	}
1654
1655	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1656	if (err < 0) {
1657		BT_ERR("Failed to create HCI proc file");
1658		bt_sock_unregister(BTPROTO_HCI);
1659		goto error;
1660	}
1661
1662	BT_INFO("HCI socket layer initialized");
1663
1664	return 0;
1665
1666error:
1667	proto_unregister(&hci_sk_proto);
1668	return err;
1669}
1670
1671void hci_sock_cleanup(void)
1672{
1673	bt_procfs_cleanup(&init_net, "hci");
1674	bt_sock_unregister(BTPROTO_HCI);
1675	proto_unregister(&hci_sk_proto);
1676}
v5.14.15
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26#include <linux/compat.h>
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u8              cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
  60};
  61
  62static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
  63{
  64	struct hci_dev *hdev = hci_pi(sk)->hdev;
  65
  66	if (!hdev)
  67		return ERR_PTR(-EBADFD);
  68	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
  69		return ERR_PTR(-EPIPE);
  70	return hdev;
  71}
  72
  73void hci_sock_set_flag(struct sock *sk, int nr)
  74{
  75	set_bit(nr, &hci_pi(sk)->flags);
  76}
  77
  78void hci_sock_clear_flag(struct sock *sk, int nr)
  79{
  80	clear_bit(nr, &hci_pi(sk)->flags);
  81}
  82
  83int hci_sock_test_flag(struct sock *sk, int nr)
  84{
  85	return test_bit(nr, &hci_pi(sk)->flags);
  86}
  87
  88unsigned short hci_sock_get_channel(struct sock *sk)
  89{
  90	return hci_pi(sk)->channel;
  91}
  92
  93u32 hci_sock_get_cookie(struct sock *sk)
  94{
  95	return hci_pi(sk)->cookie;
  96}
  97
  98static bool hci_sock_gen_cookie(struct sock *sk)
  99{
 100	int id = hci_pi(sk)->cookie;
 101
 102	if (!id) {
 103		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
 104		if (id < 0)
 105			id = 0xffffffff;
 106
 107		hci_pi(sk)->cookie = id;
 108		get_task_comm(hci_pi(sk)->comm, current);
 109		return true;
 110	}
 111
 112	return false;
 113}
 114
 115static void hci_sock_free_cookie(struct sock *sk)
 116{
 117	int id = hci_pi(sk)->cookie;
 118
 119	if (id) {
 120		hci_pi(sk)->cookie = 0xffffffff;
 121		ida_simple_remove(&sock_cookie_ida, id);
 122	}
 123}
 124
 125static inline int hci_test_bit(int nr, const void *addr)
 126{
 127	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 128}
 129
 130/* Security filter */
 131#define HCI_SFLT_MAX_OGF  5
 132
 133struct hci_sec_filter {
 134	__u32 type_mask;
 135	__u32 event_mask[2];
 136	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 137};
 138
 139static const struct hci_sec_filter hci_sec_filter = {
 140	/* Packet types */
 141	0x10,
 142	/* Events */
 143	{ 0x1000d9fe, 0x0000b00c },
 144	/* Commands */
 145	{
 146		{ 0x0 },
 147		/* OGF_LINK_CTL */
 148		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 149		/* OGF_LINK_POLICY */
 150		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 151		/* OGF_HOST_CTL */
 152		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 153		/* OGF_INFO_PARAM */
 154		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 155		/* OGF_STATUS_PARAM */
 156		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 157	}
 158};
 159
 160static struct bt_sock_list hci_sk_list = {
 161	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 162};
 163
 164static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 165{
 166	struct hci_filter *flt;
 167	int flt_type, flt_event;
 168
 169	/* Apply filter */
 170	flt = &hci_pi(sk)->filter;
 171
 172	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 173
 174	if (!test_bit(flt_type, &flt->type_mask))
 175		return true;
 176
 177	/* Extra filter for event packets only */
 178	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 179		return false;
 180
 181	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 182
 183	if (!hci_test_bit(flt_event, &flt->event_mask))
 184		return true;
 185
 186	/* Check filter only when opcode is set */
 187	if (!flt->opcode)
 188		return false;
 189
 190	if (flt_event == HCI_EV_CMD_COMPLETE &&
 191	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 192		return true;
 193
 194	if (flt_event == HCI_EV_CMD_STATUS &&
 195	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 196		return true;
 197
 198	return false;
 199}
 200
 201/* Send frame to RAW socket */
 202void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 203{
 204	struct sock *sk;
 205	struct sk_buff *skb_copy = NULL;
 206
 207	BT_DBG("hdev %p len %d", hdev, skb->len);
 208
 209	read_lock(&hci_sk_list.lock);
 210
 211	sk_for_each(sk, &hci_sk_list.head) {
 212		struct sk_buff *nskb;
 213
 214		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 215			continue;
 216
 217		/* Don't send frame to the socket it came from */
 218		if (skb->sk == sk)
 219			continue;
 220
 221		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 222			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 223			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 224			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 225			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 226			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 227				continue;
 228			if (is_filtered_packet(sk, skb))
 229				continue;
 230		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 231			if (!bt_cb(skb)->incoming)
 232				continue;
 233			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 234			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 235			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 236			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 237				continue;
 238		} else {
 239			/* Don't send frame to other channel types */
 240			continue;
 241		}
 242
 243		if (!skb_copy) {
 244			/* Create a private copy with headroom */
 245			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 246			if (!skb_copy)
 247				continue;
 248
 249			/* Put type byte before the data */
 250			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 251		}
 252
 253		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 254		if (!nskb)
 255			continue;
 256
 257		if (sock_queue_rcv_skb(sk, nskb))
 258			kfree_skb(nskb);
 259	}
 260
 261	read_unlock(&hci_sk_list.lock);
 262
 263	kfree_skb(skb_copy);
 264}
 265
 266/* Send frame to sockets with specific channel */
 267static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 268				  int flag, struct sock *skip_sk)
 269{
 270	struct sock *sk;
 271
 272	BT_DBG("channel %u len %d", channel, skb->len);
 273
 
 
 274	sk_for_each(sk, &hci_sk_list.head) {
 275		struct sk_buff *nskb;
 276
 277		/* Ignore socket without the flag set */
 278		if (!hci_sock_test_flag(sk, flag))
 279			continue;
 280
 281		/* Skip the original socket */
 282		if (sk == skip_sk)
 283			continue;
 284
 285		if (sk->sk_state != BT_BOUND)
 286			continue;
 287
 288		if (hci_pi(sk)->channel != channel)
 289			continue;
 290
 291		nskb = skb_clone(skb, GFP_ATOMIC);
 292		if (!nskb)
 293			continue;
 294
 295		if (sock_queue_rcv_skb(sk, nskb))
 296			kfree_skb(nskb);
 297	}
 298
 299}
 300
 301void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 302			 int flag, struct sock *skip_sk)
 303{
 304	read_lock(&hci_sk_list.lock);
 305	__hci_send_to_channel(channel, skb, flag, skip_sk);
 306	read_unlock(&hci_sk_list.lock);
 307}
 308
 309/* Send frame to monitor socket */
 310void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 311{
 312	struct sk_buff *skb_copy = NULL;
 313	struct hci_mon_hdr *hdr;
 314	__le16 opcode;
 315
 316	if (!atomic_read(&monitor_promisc))
 317		return;
 318
 319	BT_DBG("hdev %p len %d", hdev, skb->len);
 320
 321	switch (hci_skb_pkt_type(skb)) {
 322	case HCI_COMMAND_PKT:
 323		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 324		break;
 325	case HCI_EVENT_PKT:
 326		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 327		break;
 328	case HCI_ACLDATA_PKT:
 329		if (bt_cb(skb)->incoming)
 330			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 331		else
 332			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 333		break;
 334	case HCI_SCODATA_PKT:
 335		if (bt_cb(skb)->incoming)
 336			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 337		else
 338			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 339		break;
 340	case HCI_ISODATA_PKT:
 341		if (bt_cb(skb)->incoming)
 342			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
 343		else
 344			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
 345		break;
 346	case HCI_DIAG_PKT:
 347		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 348		break;
 349	default:
 350		return;
 351	}
 352
 353	/* Create a private copy with headroom */
 354	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 355	if (!skb_copy)
 356		return;
 357
 358	/* Put header before the data */
 359	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
 360	hdr->opcode = opcode;
 361	hdr->index = cpu_to_le16(hdev->id);
 362	hdr->len = cpu_to_le16(skb->len);
 363
 364	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 365			    HCI_SOCK_TRUSTED, NULL);
 366	kfree_skb(skb_copy);
 367}
 368
 369void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 370				 void *data, u16 data_len, ktime_t tstamp,
 371				 int flag, struct sock *skip_sk)
 372{
 373	struct sock *sk;
 374	__le16 index;
 375
 376	if (hdev)
 377		index = cpu_to_le16(hdev->id);
 378	else
 379		index = cpu_to_le16(MGMT_INDEX_NONE);
 380
 381	read_lock(&hci_sk_list.lock);
 382
 383	sk_for_each(sk, &hci_sk_list.head) {
 384		struct hci_mon_hdr *hdr;
 385		struct sk_buff *skb;
 386
 387		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 388			continue;
 389
 390		/* Ignore socket without the flag set */
 391		if (!hci_sock_test_flag(sk, flag))
 392			continue;
 393
 394		/* Skip the original socket */
 395		if (sk == skip_sk)
 396			continue;
 397
 398		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 399		if (!skb)
 400			continue;
 401
 402		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 403		put_unaligned_le16(event, skb_put(skb, 2));
 404
 405		if (data)
 406			skb_put_data(skb, data, data_len);
 407
 408		skb->tstamp = tstamp;
 409
 410		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 411		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 412		hdr->index = index;
 413		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 414
 415		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 416				      HCI_SOCK_TRUSTED, NULL);
 417		kfree_skb(skb);
 418	}
 419
 420	read_unlock(&hci_sk_list.lock);
 421}
 422
 423static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 424{
 425	struct hci_mon_hdr *hdr;
 426	struct hci_mon_new_index *ni;
 427	struct hci_mon_index_info *ii;
 428	struct sk_buff *skb;
 429	__le16 opcode;
 430
 431	switch (event) {
 432	case HCI_DEV_REG:
 433		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 434		if (!skb)
 435			return NULL;
 436
 437		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 438		ni->type = hdev->dev_type;
 439		ni->bus = hdev->bus;
 440		bacpy(&ni->bdaddr, &hdev->bdaddr);
 441		memcpy(ni->name, hdev->name, 8);
 442
 443		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 444		break;
 445
 446	case HCI_DEV_UNREG:
 447		skb = bt_skb_alloc(0, GFP_ATOMIC);
 448		if (!skb)
 449			return NULL;
 450
 451		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 452		break;
 453
 454	case HCI_DEV_SETUP:
 455		if (hdev->manufacturer == 0xffff)
 456			return NULL;
 457		fallthrough;
 
 458
 459	case HCI_DEV_UP:
 460		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 461		if (!skb)
 462			return NULL;
 463
 464		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 465		bacpy(&ii->bdaddr, &hdev->bdaddr);
 466		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 467
 468		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 469		break;
 470
 471	case HCI_DEV_OPEN:
 472		skb = bt_skb_alloc(0, GFP_ATOMIC);
 473		if (!skb)
 474			return NULL;
 475
 476		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 477		break;
 478
 479	case HCI_DEV_CLOSE:
 480		skb = bt_skb_alloc(0, GFP_ATOMIC);
 481		if (!skb)
 482			return NULL;
 483
 484		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 485		break;
 486
 487	default:
 488		return NULL;
 489	}
 490
 491	__net_timestamp(skb);
 492
 493	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 494	hdr->opcode = opcode;
 495	hdr->index = cpu_to_le16(hdev->id);
 496	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 497
 498	return skb;
 499}
 500
 501static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 502{
 503	struct hci_mon_hdr *hdr;
 504	struct sk_buff *skb;
 505	u16 format;
 506	u8 ver[3];
 507	u32 flags;
 508
 509	/* No message needed when cookie is not present */
 510	if (!hci_pi(sk)->cookie)
 511		return NULL;
 512
 513	switch (hci_pi(sk)->channel) {
 514	case HCI_CHANNEL_RAW:
 515		format = 0x0000;
 516		ver[0] = BT_SUBSYS_VERSION;
 517		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 518		break;
 519	case HCI_CHANNEL_USER:
 520		format = 0x0001;
 521		ver[0] = BT_SUBSYS_VERSION;
 522		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 523		break;
 524	case HCI_CHANNEL_CONTROL:
 525		format = 0x0002;
 526		mgmt_fill_version_info(ver);
 527		break;
 528	default:
 529		/* No message for unsupported format */
 530		return NULL;
 531	}
 532
 533	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
 534	if (!skb)
 535		return NULL;
 536
 537	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 538
 539	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 540	put_unaligned_le16(format, skb_put(skb, 2));
 541	skb_put_data(skb, ver, sizeof(ver));
 542	put_unaligned_le32(flags, skb_put(skb, 4));
 543	skb_put_u8(skb, TASK_COMM_LEN);
 544	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
 545
 546	__net_timestamp(skb);
 547
 548	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 549	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 550	if (hci_pi(sk)->hdev)
 551		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 552	else
 553		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 554	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 555
 556	return skb;
 557}
 558
 559static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 560{
 561	struct hci_mon_hdr *hdr;
 562	struct sk_buff *skb;
 563
 564	/* No message needed when cookie is not present */
 565	if (!hci_pi(sk)->cookie)
 566		return NULL;
 567
 568	switch (hci_pi(sk)->channel) {
 569	case HCI_CHANNEL_RAW:
 570	case HCI_CHANNEL_USER:
 571	case HCI_CHANNEL_CONTROL:
 572		break;
 573	default:
 574		/* No message for unsupported format */
 575		return NULL;
 576	}
 577
 578	skb = bt_skb_alloc(4, GFP_ATOMIC);
 579	if (!skb)
 580		return NULL;
 581
 582	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 583
 584	__net_timestamp(skb);
 585
 586	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 587	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 588	if (hci_pi(sk)->hdev)
 589		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 590	else
 591		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 592	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 593
 594	return skb;
 595}
 596
 597static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 598						   u16 opcode, u16 len,
 599						   const void *buf)
 600{
 601	struct hci_mon_hdr *hdr;
 602	struct sk_buff *skb;
 603
 604	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 605	if (!skb)
 606		return NULL;
 607
 608	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 609	put_unaligned_le16(opcode, skb_put(skb, 2));
 610
 611	if (buf)
 612		skb_put_data(skb, buf, len);
 613
 614	__net_timestamp(skb);
 615
 616	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 617	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 618	hdr->index = cpu_to_le16(index);
 619	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 620
 621	return skb;
 622}
 623
 624static void __printf(2, 3)
 625send_monitor_note(struct sock *sk, const char *fmt, ...)
 626{
 627	size_t len;
 628	struct hci_mon_hdr *hdr;
 629	struct sk_buff *skb;
 630	va_list args;
 631
 632	va_start(args, fmt);
 633	len = vsnprintf(NULL, 0, fmt, args);
 634	va_end(args);
 635
 636	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 637	if (!skb)
 638		return;
 639
 640	va_start(args, fmt);
 641	vsprintf(skb_put(skb, len), fmt, args);
 642	*(u8 *)skb_put(skb, 1) = 0;
 643	va_end(args);
 644
 645	__net_timestamp(skb);
 646
 647	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 648	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 649	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 650	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 651
 652	if (sock_queue_rcv_skb(sk, skb))
 653		kfree_skb(skb);
 654}
 655
 656static void send_monitor_replay(struct sock *sk)
 657{
 658	struct hci_dev *hdev;
 659
 660	read_lock(&hci_dev_list_lock);
 661
 662	list_for_each_entry(hdev, &hci_dev_list, list) {
 663		struct sk_buff *skb;
 664
 665		skb = create_monitor_event(hdev, HCI_DEV_REG);
 666		if (!skb)
 667			continue;
 668
 669		if (sock_queue_rcv_skb(sk, skb))
 670			kfree_skb(skb);
 671
 672		if (!test_bit(HCI_RUNNING, &hdev->flags))
 673			continue;
 674
 675		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 676		if (!skb)
 677			continue;
 678
 679		if (sock_queue_rcv_skb(sk, skb))
 680			kfree_skb(skb);
 681
 682		if (test_bit(HCI_UP, &hdev->flags))
 683			skb = create_monitor_event(hdev, HCI_DEV_UP);
 684		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 685			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 686		else
 687			skb = NULL;
 688
 689		if (skb) {
 690			if (sock_queue_rcv_skb(sk, skb))
 691				kfree_skb(skb);
 692		}
 693	}
 694
 695	read_unlock(&hci_dev_list_lock);
 696}
 697
 698static void send_monitor_control_replay(struct sock *mon_sk)
 699{
 700	struct sock *sk;
 701
 702	read_lock(&hci_sk_list.lock);
 703
 704	sk_for_each(sk, &hci_sk_list.head) {
 705		struct sk_buff *skb;
 706
 707		skb = create_monitor_ctrl_open(sk);
 708		if (!skb)
 709			continue;
 710
 711		if (sock_queue_rcv_skb(mon_sk, skb))
 712			kfree_skb(skb);
 713	}
 714
 715	read_unlock(&hci_sk_list.lock);
 716}
 717
 718/* Generate internal stack event */
 719static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 720{
 721	struct hci_event_hdr *hdr;
 722	struct hci_ev_stack_internal *ev;
 723	struct sk_buff *skb;
 724
 725	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 726	if (!skb)
 727		return;
 728
 729	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
 730	hdr->evt  = HCI_EV_STACK_INTERNAL;
 731	hdr->plen = sizeof(*ev) + dlen;
 732
 733	ev = skb_put(skb, sizeof(*ev) + dlen);
 734	ev->type = type;
 735	memcpy(ev->data, data, dlen);
 736
 737	bt_cb(skb)->incoming = 1;
 738	__net_timestamp(skb);
 739
 740	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 741	hci_send_to_sock(hdev, skb);
 742	kfree_skb(skb);
 743}
 744
 745void hci_sock_dev_event(struct hci_dev *hdev, int event)
 746{
 747	BT_DBG("hdev %s event %d", hdev->name, event);
 748
 749	if (atomic_read(&monitor_promisc)) {
 750		struct sk_buff *skb;
 751
 752		/* Send event to monitor */
 753		skb = create_monitor_event(hdev, event);
 754		if (skb) {
 755			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 756					    HCI_SOCK_TRUSTED, NULL);
 757			kfree_skb(skb);
 758		}
 759	}
 760
 761	if (event <= HCI_DEV_DOWN) {
 762		struct hci_ev_si_device ev;
 763
 764		/* Send event to sockets */
 765		ev.event  = event;
 766		ev.dev_id = hdev->id;
 767		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 768	}
 769
 770	if (event == HCI_DEV_UNREG) {
 771		struct sock *sk;
 772
 773		/* Wake up sockets using this dead device */
 774		read_lock(&hci_sk_list.lock);
 775		sk_for_each(sk, &hci_sk_list.head) {
 
 776			if (hci_pi(sk)->hdev == hdev) {
 
 777				sk->sk_err = EPIPE;
 
 778				sk->sk_state_change(sk);
 
 
 779			}
 
 780		}
 781		read_unlock(&hci_sk_list.lock);
 782	}
 783}
 784
 785static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 786{
 787	struct hci_mgmt_chan *c;
 788
 789	list_for_each_entry(c, &mgmt_chan_list, list) {
 790		if (c->channel == channel)
 791			return c;
 792	}
 793
 794	return NULL;
 795}
 796
 797static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 798{
 799	struct hci_mgmt_chan *c;
 800
 801	mutex_lock(&mgmt_chan_list_lock);
 802	c = __hci_mgmt_chan_find(channel);
 803	mutex_unlock(&mgmt_chan_list_lock);
 804
 805	return c;
 806}
 807
 808int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 809{
 810	if (c->channel < HCI_CHANNEL_CONTROL)
 811		return -EINVAL;
 812
 813	mutex_lock(&mgmt_chan_list_lock);
 814	if (__hci_mgmt_chan_find(c->channel)) {
 815		mutex_unlock(&mgmt_chan_list_lock);
 816		return -EALREADY;
 817	}
 818
 819	list_add_tail(&c->list, &mgmt_chan_list);
 820
 821	mutex_unlock(&mgmt_chan_list_lock);
 822
 823	return 0;
 824}
 825EXPORT_SYMBOL(hci_mgmt_chan_register);
 826
 827void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 828{
 829	mutex_lock(&mgmt_chan_list_lock);
 830	list_del(&c->list);
 831	mutex_unlock(&mgmt_chan_list_lock);
 832}
 833EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 834
 835static int hci_sock_release(struct socket *sock)
 836{
 837	struct sock *sk = sock->sk;
 838	struct hci_dev *hdev;
 839	struct sk_buff *skb;
 840
 841	BT_DBG("sock %p sk %p", sock, sk);
 842
 843	if (!sk)
 844		return 0;
 845
 846	lock_sock(sk);
 847
 848	switch (hci_pi(sk)->channel) {
 849	case HCI_CHANNEL_MONITOR:
 850		atomic_dec(&monitor_promisc);
 851		break;
 852	case HCI_CHANNEL_RAW:
 853	case HCI_CHANNEL_USER:
 854	case HCI_CHANNEL_CONTROL:
 855		/* Send event to monitor */
 856		skb = create_monitor_ctrl_close(sk);
 857		if (skb) {
 858			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 859					    HCI_SOCK_TRUSTED, NULL);
 860			kfree_skb(skb);
 861		}
 862
 863		hci_sock_free_cookie(sk);
 864		break;
 865	}
 866
 867	bt_sock_unlink(&hci_sk_list, sk);
 868
 869	hdev = hci_pi(sk)->hdev;
 870	if (hdev) {
 871		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 872			/* When releasing a user channel exclusive access,
 873			 * call hci_dev_do_close directly instead of calling
 874			 * hci_dev_close to ensure the exclusive access will
 875			 * be released and the controller brought back down.
 876			 *
 877			 * The checking of HCI_AUTO_OFF is not needed in this
 878			 * case since it will have been cleared already when
 879			 * opening the user channel.
 880			 */
 881			hci_dev_do_close(hdev);
 882			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 883			mgmt_index_added(hdev);
 884		}
 885
 886		atomic_dec(&hdev->promisc);
 887		hci_dev_put(hdev);
 888	}
 889
 890	sock_orphan(sk);
 891
 892	skb_queue_purge(&sk->sk_receive_queue);
 893	skb_queue_purge(&sk->sk_write_queue);
 894
 895	release_sock(sk);
 896	sock_put(sk);
 897	return 0;
 898}
 899
 900static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
 901{
 902	bdaddr_t bdaddr;
 903	int err;
 904
 905	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 906		return -EFAULT;
 907
 908	hci_dev_lock(hdev);
 909
 910	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 911
 912	hci_dev_unlock(hdev);
 913
 914	return err;
 915}
 916
 917static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
 918{
 919	bdaddr_t bdaddr;
 920	int err;
 921
 922	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 923		return -EFAULT;
 924
 925	hci_dev_lock(hdev);
 926
 927	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 928
 929	hci_dev_unlock(hdev);
 930
 931	return err;
 932}
 933
 934/* Ioctls that require bound socket */
 935static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 936				unsigned long arg)
 937{
 938	struct hci_dev *hdev = hci_hdev_from_sock(sk);
 939
 940	if (IS_ERR(hdev))
 941		return PTR_ERR(hdev);
 942
 943	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 944		return -EBUSY;
 945
 946	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 947		return -EOPNOTSUPP;
 948
 949	if (hdev->dev_type != HCI_PRIMARY)
 950		return -EOPNOTSUPP;
 951
 952	switch (cmd) {
 953	case HCISETRAW:
 954		if (!capable(CAP_NET_ADMIN))
 955			return -EPERM;
 956		return -EOPNOTSUPP;
 957
 958	case HCIGETCONNINFO:
 959		return hci_get_conn_info(hdev, (void __user *)arg);
 960
 961	case HCIGETAUTHINFO:
 962		return hci_get_auth_info(hdev, (void __user *)arg);
 963
 964	case HCIBLOCKADDR:
 965		if (!capable(CAP_NET_ADMIN))
 966			return -EPERM;
 967		return hci_sock_reject_list_add(hdev, (void __user *)arg);
 968
 969	case HCIUNBLOCKADDR:
 970		if (!capable(CAP_NET_ADMIN))
 971			return -EPERM;
 972		return hci_sock_reject_list_del(hdev, (void __user *)arg);
 973	}
 974
 975	return -ENOIOCTLCMD;
 976}
 977
 978static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 979			  unsigned long arg)
 980{
 981	void __user *argp = (void __user *)arg;
 982	struct sock *sk = sock->sk;
 983	int err;
 984
 985	BT_DBG("cmd %x arg %lx", cmd, arg);
 986
 987	lock_sock(sk);
 988
 989	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 990		err = -EBADFD;
 991		goto done;
 992	}
 993
 994	/* When calling an ioctl on an unbound raw socket, then ensure
 995	 * that the monitor gets informed. Ensure that the resulting event
 996	 * is only send once by checking if the cookie exists or not. The
 997	 * socket cookie will be only ever generated once for the lifetime
 998	 * of a given socket.
 999	 */
1000	if (hci_sock_gen_cookie(sk)) {
1001		struct sk_buff *skb;
1002
1003		if (capable(CAP_NET_ADMIN))
1004			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1005
1006		/* Send event to monitor */
1007		skb = create_monitor_ctrl_open(sk);
1008		if (skb) {
1009			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1010					    HCI_SOCK_TRUSTED, NULL);
1011			kfree_skb(skb);
1012		}
1013	}
1014
1015	release_sock(sk);
1016
1017	switch (cmd) {
1018	case HCIGETDEVLIST:
1019		return hci_get_dev_list(argp);
1020
1021	case HCIGETDEVINFO:
1022		return hci_get_dev_info(argp);
1023
1024	case HCIGETCONNLIST:
1025		return hci_get_conn_list(argp);
1026
1027	case HCIDEVUP:
1028		if (!capable(CAP_NET_ADMIN))
1029			return -EPERM;
1030		return hci_dev_open(arg);
1031
1032	case HCIDEVDOWN:
1033		if (!capable(CAP_NET_ADMIN))
1034			return -EPERM;
1035		return hci_dev_close(arg);
1036
1037	case HCIDEVRESET:
1038		if (!capable(CAP_NET_ADMIN))
1039			return -EPERM;
1040		return hci_dev_reset(arg);
1041
1042	case HCIDEVRESTAT:
1043		if (!capable(CAP_NET_ADMIN))
1044			return -EPERM;
1045		return hci_dev_reset_stat(arg);
1046
1047	case HCISETSCAN:
1048	case HCISETAUTH:
1049	case HCISETENCRYPT:
1050	case HCISETPTYPE:
1051	case HCISETLINKPOL:
1052	case HCISETLINKMODE:
1053	case HCISETACLMTU:
1054	case HCISETSCOMTU:
1055		if (!capable(CAP_NET_ADMIN))
1056			return -EPERM;
1057		return hci_dev_cmd(cmd, argp);
1058
1059	case HCIINQUIRY:
1060		return hci_inquiry(argp);
1061	}
1062
1063	lock_sock(sk);
1064
1065	err = hci_sock_bound_ioctl(sk, cmd, arg);
1066
1067done:
1068	release_sock(sk);
1069	return err;
1070}
1071
1072#ifdef CONFIG_COMPAT
1073static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1074				 unsigned long arg)
1075{
1076	switch (cmd) {
1077	case HCIDEVUP:
1078	case HCIDEVDOWN:
1079	case HCIDEVRESET:
1080	case HCIDEVRESTAT:
1081		return hci_sock_ioctl(sock, cmd, arg);
1082	}
1083
1084	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1085}
1086#endif
1087
1088static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1089			 int addr_len)
1090{
1091	struct sockaddr_hci haddr;
1092	struct sock *sk = sock->sk;
1093	struct hci_dev *hdev = NULL;
1094	struct sk_buff *skb;
1095	int len, err = 0;
1096
1097	BT_DBG("sock %p sk %p", sock, sk);
1098
1099	if (!addr)
1100		return -EINVAL;
1101
1102	memset(&haddr, 0, sizeof(haddr));
1103	len = min_t(unsigned int, sizeof(haddr), addr_len);
1104	memcpy(&haddr, addr, len);
1105
1106	if (haddr.hci_family != AF_BLUETOOTH)
1107		return -EINVAL;
1108
1109	lock_sock(sk);
1110
1111	/* Allow detaching from dead device and attaching to alive device, if
1112	 * the caller wants to re-bind (instead of close) this socket in
1113	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1114	 */
1115	hdev = hci_pi(sk)->hdev;
1116	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1117		hci_pi(sk)->hdev = NULL;
1118		sk->sk_state = BT_OPEN;
1119		hci_dev_put(hdev);
1120	}
1121	hdev = NULL;
1122
1123	if (sk->sk_state == BT_BOUND) {
1124		err = -EALREADY;
1125		goto done;
1126	}
1127
1128	switch (haddr.hci_channel) {
1129	case HCI_CHANNEL_RAW:
1130		if (hci_pi(sk)->hdev) {
1131			err = -EALREADY;
1132			goto done;
1133		}
1134
1135		if (haddr.hci_dev != HCI_DEV_NONE) {
1136			hdev = hci_dev_get(haddr.hci_dev);
1137			if (!hdev) {
1138				err = -ENODEV;
1139				goto done;
1140			}
1141
1142			atomic_inc(&hdev->promisc);
1143		}
1144
1145		hci_pi(sk)->channel = haddr.hci_channel;
1146
1147		if (!hci_sock_gen_cookie(sk)) {
1148			/* In the case when a cookie has already been assigned,
1149			 * then there has been already an ioctl issued against
1150			 * an unbound socket and with that triggered an open
1151			 * notification. Send a close notification first to
1152			 * allow the state transition to bounded.
1153			 */
1154			skb = create_monitor_ctrl_close(sk);
1155			if (skb) {
1156				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1157						    HCI_SOCK_TRUSTED, NULL);
1158				kfree_skb(skb);
1159			}
1160		}
1161
1162		if (capable(CAP_NET_ADMIN))
1163			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1164
1165		hci_pi(sk)->hdev = hdev;
1166
1167		/* Send event to monitor */
1168		skb = create_monitor_ctrl_open(sk);
1169		if (skb) {
1170			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1171					    HCI_SOCK_TRUSTED, NULL);
1172			kfree_skb(skb);
1173		}
1174		break;
1175
1176	case HCI_CHANNEL_USER:
1177		if (hci_pi(sk)->hdev) {
1178			err = -EALREADY;
1179			goto done;
1180		}
1181
1182		if (haddr.hci_dev == HCI_DEV_NONE) {
1183			err = -EINVAL;
1184			goto done;
1185		}
1186
1187		if (!capable(CAP_NET_ADMIN)) {
1188			err = -EPERM;
1189			goto done;
1190		}
1191
1192		hdev = hci_dev_get(haddr.hci_dev);
1193		if (!hdev) {
1194			err = -ENODEV;
1195			goto done;
1196		}
1197
1198		if (test_bit(HCI_INIT, &hdev->flags) ||
1199		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1200		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1201		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1202		     test_bit(HCI_UP, &hdev->flags))) {
1203			err = -EBUSY;
1204			hci_dev_put(hdev);
1205			goto done;
1206		}
1207
1208		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1209			err = -EUSERS;
1210			hci_dev_put(hdev);
1211			goto done;
1212		}
1213
1214		mgmt_index_removed(hdev);
1215
1216		err = hci_dev_open(hdev->id);
1217		if (err) {
1218			if (err == -EALREADY) {
1219				/* In case the transport is already up and
1220				 * running, clear the error here.
1221				 *
1222				 * This can happen when opening a user
1223				 * channel and HCI_AUTO_OFF grace period
1224				 * is still active.
1225				 */
1226				err = 0;
1227			} else {
1228				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1229				mgmt_index_added(hdev);
1230				hci_dev_put(hdev);
1231				goto done;
1232			}
1233		}
1234
1235		hci_pi(sk)->channel = haddr.hci_channel;
1236
1237		if (!hci_sock_gen_cookie(sk)) {
1238			/* In the case when a cookie has already been assigned,
1239			 * this socket will transition from a raw socket into
1240			 * a user channel socket. For a clean transition, send
1241			 * the close notification first.
1242			 */
1243			skb = create_monitor_ctrl_close(sk);
1244			if (skb) {
1245				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1246						    HCI_SOCK_TRUSTED, NULL);
1247				kfree_skb(skb);
1248			}
1249		}
1250
1251		/* The user channel is restricted to CAP_NET_ADMIN
1252		 * capabilities and with that implicitly trusted.
1253		 */
1254		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1255
1256		hci_pi(sk)->hdev = hdev;
1257
1258		/* Send event to monitor */
1259		skb = create_monitor_ctrl_open(sk);
1260		if (skb) {
1261			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1262					    HCI_SOCK_TRUSTED, NULL);
1263			kfree_skb(skb);
1264		}
1265
1266		atomic_inc(&hdev->promisc);
1267		break;
1268
1269	case HCI_CHANNEL_MONITOR:
1270		if (haddr.hci_dev != HCI_DEV_NONE) {
1271			err = -EINVAL;
1272			goto done;
1273		}
1274
1275		if (!capable(CAP_NET_RAW)) {
1276			err = -EPERM;
1277			goto done;
1278		}
1279
1280		hci_pi(sk)->channel = haddr.hci_channel;
1281
1282		/* The monitor interface is restricted to CAP_NET_RAW
1283		 * capabilities and with that implicitly trusted.
1284		 */
1285		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1286
1287		send_monitor_note(sk, "Linux version %s (%s)",
1288				  init_utsname()->release,
1289				  init_utsname()->machine);
1290		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1291				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1292		send_monitor_replay(sk);
1293		send_monitor_control_replay(sk);
1294
1295		atomic_inc(&monitor_promisc);
1296		break;
1297
1298	case HCI_CHANNEL_LOGGING:
1299		if (haddr.hci_dev != HCI_DEV_NONE) {
1300			err = -EINVAL;
1301			goto done;
1302		}
1303
1304		if (!capable(CAP_NET_ADMIN)) {
1305			err = -EPERM;
1306			goto done;
1307		}
1308
1309		hci_pi(sk)->channel = haddr.hci_channel;
1310		break;
1311
1312	default:
1313		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1314			err = -EINVAL;
1315			goto done;
1316		}
1317
1318		if (haddr.hci_dev != HCI_DEV_NONE) {
1319			err = -EINVAL;
1320			goto done;
1321		}
1322
1323		/* Users with CAP_NET_ADMIN capabilities are allowed
1324		 * access to all management commands and events. For
1325		 * untrusted users the interface is restricted and
1326		 * also only untrusted events are sent.
1327		 */
1328		if (capable(CAP_NET_ADMIN))
1329			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1330
1331		hci_pi(sk)->channel = haddr.hci_channel;
1332
1333		/* At the moment the index and unconfigured index events
1334		 * are enabled unconditionally. Setting them on each
1335		 * socket when binding keeps this functionality. They
1336		 * however might be cleared later and then sending of these
1337		 * events will be disabled, but that is then intentional.
1338		 *
1339		 * This also enables generic events that are safe to be
1340		 * received by untrusted users. Example for such events
1341		 * are changes to settings, class of device, name etc.
1342		 */
1343		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1344			if (!hci_sock_gen_cookie(sk)) {
1345				/* In the case when a cookie has already been
1346				 * assigned, this socket will transition from
1347				 * a raw socket into a control socket. To
1348				 * allow for a clean transition, send the
1349				 * close notification first.
1350				 */
1351				skb = create_monitor_ctrl_close(sk);
1352				if (skb) {
1353					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1354							    HCI_SOCK_TRUSTED, NULL);
1355					kfree_skb(skb);
1356				}
1357			}
1358
1359			/* Send event to monitor */
1360			skb = create_monitor_ctrl_open(sk);
1361			if (skb) {
1362				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1363						    HCI_SOCK_TRUSTED, NULL);
1364				kfree_skb(skb);
1365			}
1366
1367			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1368			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1369			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1370			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1371			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1372			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1373		}
1374		break;
1375	}
1376
 
 
1377	sk->sk_state = BT_BOUND;
1378
1379done:
1380	release_sock(sk);
1381	return err;
1382}
1383
1384static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1385			    int peer)
1386{
1387	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1388	struct sock *sk = sock->sk;
1389	struct hci_dev *hdev;
1390	int err = 0;
1391
1392	BT_DBG("sock %p sk %p", sock, sk);
1393
1394	if (peer)
1395		return -EOPNOTSUPP;
1396
1397	lock_sock(sk);
1398
1399	hdev = hci_hdev_from_sock(sk);
1400	if (IS_ERR(hdev)) {
1401		err = PTR_ERR(hdev);
1402		goto done;
1403	}
1404
 
1405	haddr->hci_family = AF_BLUETOOTH;
1406	haddr->hci_dev    = hdev->id;
1407	haddr->hci_channel= hci_pi(sk)->channel;
1408	err = sizeof(*haddr);
1409
1410done:
1411	release_sock(sk);
1412	return err;
1413}
1414
1415static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1416			  struct sk_buff *skb)
1417{
1418	__u8 mask = hci_pi(sk)->cmsg_mask;
1419
1420	if (mask & HCI_CMSG_DIR) {
1421		int incoming = bt_cb(skb)->incoming;
1422		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1423			 &incoming);
1424	}
1425
1426	if (mask & HCI_CMSG_TSTAMP) {
1427#ifdef CONFIG_COMPAT
1428		struct old_timeval32 ctv;
1429#endif
1430		struct __kernel_old_timeval tv;
1431		void *data;
1432		int len;
1433
1434		skb_get_timestamp(skb, &tv);
1435
1436		data = &tv;
1437		len = sizeof(tv);
1438#ifdef CONFIG_COMPAT
1439		if (!COMPAT_USE_64BIT_TIME &&
1440		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1441			ctv.tv_sec = tv.tv_sec;
1442			ctv.tv_usec = tv.tv_usec;
1443			data = &ctv;
1444			len = sizeof(ctv);
1445		}
1446#endif
1447
1448		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1449	}
1450}
1451
1452static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1453			    size_t len, int flags)
1454{
1455	int noblock = flags & MSG_DONTWAIT;
1456	struct sock *sk = sock->sk;
1457	struct sk_buff *skb;
1458	int copied, err;
1459	unsigned int skblen;
1460
1461	BT_DBG("sock %p, sk %p", sock, sk);
1462
1463	if (flags & MSG_OOB)
1464		return -EOPNOTSUPP;
1465
1466	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1467		return -EOPNOTSUPP;
1468
1469	if (sk->sk_state == BT_CLOSED)
1470		return 0;
1471
1472	skb = skb_recv_datagram(sk, flags, noblock, &err);
1473	if (!skb)
1474		return err;
1475
1476	skblen = skb->len;
1477	copied = skb->len;
1478	if (len < copied) {
1479		msg->msg_flags |= MSG_TRUNC;
1480		copied = len;
1481	}
1482
1483	skb_reset_transport_header(skb);
1484	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1485
1486	switch (hci_pi(sk)->channel) {
1487	case HCI_CHANNEL_RAW:
1488		hci_sock_cmsg(sk, msg, skb);
1489		break;
1490	case HCI_CHANNEL_USER:
1491	case HCI_CHANNEL_MONITOR:
1492		sock_recv_timestamp(msg, sk, skb);
1493		break;
1494	default:
1495		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1496			sock_recv_timestamp(msg, sk, skb);
1497		break;
1498	}
1499
1500	skb_free_datagram(sk, skb);
1501
1502	if (flags & MSG_TRUNC)
1503		copied = skblen;
1504
1505	return err ? : copied;
1506}
1507
1508static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1509			struct msghdr *msg, size_t msglen)
1510{
1511	void *buf;
1512	u8 *cp;
1513	struct mgmt_hdr *hdr;
1514	u16 opcode, index, len;
1515	struct hci_dev *hdev = NULL;
1516	const struct hci_mgmt_handler *handler;
1517	bool var_len, no_hdev;
1518	int err;
1519
1520	BT_DBG("got %zu bytes", msglen);
1521
1522	if (msglen < sizeof(*hdr))
1523		return -EINVAL;
1524
1525	buf = kmalloc(msglen, GFP_KERNEL);
1526	if (!buf)
1527		return -ENOMEM;
1528
1529	if (memcpy_from_msg(buf, msg, msglen)) {
1530		err = -EFAULT;
1531		goto done;
1532	}
1533
1534	hdr = buf;
1535	opcode = __le16_to_cpu(hdr->opcode);
1536	index = __le16_to_cpu(hdr->index);
1537	len = __le16_to_cpu(hdr->len);
1538
1539	if (len != msglen - sizeof(*hdr)) {
1540		err = -EINVAL;
1541		goto done;
1542	}
1543
1544	if (chan->channel == HCI_CHANNEL_CONTROL) {
1545		struct sk_buff *skb;
1546
1547		/* Send event to monitor */
1548		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1549						  buf + sizeof(*hdr));
1550		if (skb) {
1551			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1552					    HCI_SOCK_TRUSTED, NULL);
1553			kfree_skb(skb);
1554		}
1555	}
1556
1557	if (opcode >= chan->handler_count ||
1558	    chan->handlers[opcode].func == NULL) {
1559		BT_DBG("Unknown op %u", opcode);
1560		err = mgmt_cmd_status(sk, index, opcode,
1561				      MGMT_STATUS_UNKNOWN_COMMAND);
1562		goto done;
1563	}
1564
1565	handler = &chan->handlers[opcode];
1566
1567	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1568	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1569		err = mgmt_cmd_status(sk, index, opcode,
1570				      MGMT_STATUS_PERMISSION_DENIED);
1571		goto done;
1572	}
1573
1574	if (index != MGMT_INDEX_NONE) {
1575		hdev = hci_dev_get(index);
1576		if (!hdev) {
1577			err = mgmt_cmd_status(sk, index, opcode,
1578					      MGMT_STATUS_INVALID_INDEX);
1579			goto done;
1580		}
1581
1582		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1583		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1584		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1585			err = mgmt_cmd_status(sk, index, opcode,
1586					      MGMT_STATUS_INVALID_INDEX);
1587			goto done;
1588		}
1589
1590		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1591		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1592			err = mgmt_cmd_status(sk, index, opcode,
1593					      MGMT_STATUS_INVALID_INDEX);
1594			goto done;
1595		}
1596	}
1597
1598	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1599		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1600		if (no_hdev != !hdev) {
1601			err = mgmt_cmd_status(sk, index, opcode,
1602					      MGMT_STATUS_INVALID_INDEX);
1603			goto done;
1604		}
1605	}
1606
1607	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1608	if ((var_len && len < handler->data_len) ||
1609	    (!var_len && len != handler->data_len)) {
1610		err = mgmt_cmd_status(sk, index, opcode,
1611				      MGMT_STATUS_INVALID_PARAMS);
1612		goto done;
1613	}
1614
1615	if (hdev && chan->hdev_init)
1616		chan->hdev_init(sk, hdev);
1617
1618	cp = buf + sizeof(*hdr);
1619
1620	err = handler->func(sk, hdev, cp, len);
1621	if (err < 0)
1622		goto done;
1623
1624	err = msglen;
1625
1626done:
1627	if (hdev)
1628		hci_dev_put(hdev);
1629
1630	kfree(buf);
1631	return err;
1632}
1633
1634static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1635{
1636	struct hci_mon_hdr *hdr;
1637	struct sk_buff *skb;
1638	struct hci_dev *hdev;
1639	u16 index;
1640	int err;
1641
1642	/* The logging frame consists at minimum of the standard header,
1643	 * the priority byte, the ident length byte and at least one string
1644	 * terminator NUL byte. Anything shorter are invalid packets.
1645	 */
1646	if (len < sizeof(*hdr) + 3)
1647		return -EINVAL;
1648
1649	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1650	if (!skb)
1651		return err;
1652
1653	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1654		err = -EFAULT;
1655		goto drop;
1656	}
1657
1658	hdr = (void *)skb->data;
1659
1660	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1661		err = -EINVAL;
1662		goto drop;
1663	}
1664
1665	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1666		__u8 priority = skb->data[sizeof(*hdr)];
1667		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1668
1669		/* Only the priorities 0-7 are valid and with that any other
1670		 * value results in an invalid packet.
1671		 *
1672		 * The priority byte is followed by an ident length byte and
1673		 * the NUL terminated ident string. Check that the ident
1674		 * length is not overflowing the packet and also that the
1675		 * ident string itself is NUL terminated. In case the ident
1676		 * length is zero, the length value actually doubles as NUL
1677		 * terminator identifier.
1678		 *
1679		 * The message follows the ident string (if present) and
1680		 * must be NUL terminated. Otherwise it is not a valid packet.
1681		 */
1682		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1683		    ident_len > len - sizeof(*hdr) - 3 ||
1684		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1685			err = -EINVAL;
1686			goto drop;
1687		}
1688	} else {
1689		err = -EINVAL;
1690		goto drop;
1691	}
1692
1693	index = __le16_to_cpu(hdr->index);
1694
1695	if (index != MGMT_INDEX_NONE) {
1696		hdev = hci_dev_get(index);
1697		if (!hdev) {
1698			err = -ENODEV;
1699			goto drop;
1700		}
1701	} else {
1702		hdev = NULL;
1703	}
1704
1705	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1706
1707	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1708	err = len;
1709
1710	if (hdev)
1711		hci_dev_put(hdev);
1712
1713drop:
1714	kfree_skb(skb);
1715	return err;
1716}
1717
1718static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1719			    size_t len)
1720{
1721	struct sock *sk = sock->sk;
1722	struct hci_mgmt_chan *chan;
1723	struct hci_dev *hdev;
1724	struct sk_buff *skb;
1725	int err;
1726
1727	BT_DBG("sock %p sk %p", sock, sk);
1728
1729	if (msg->msg_flags & MSG_OOB)
1730		return -EOPNOTSUPP;
1731
1732	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1733			       MSG_CMSG_COMPAT))
1734		return -EINVAL;
1735
1736	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1737		return -EINVAL;
1738
1739	lock_sock(sk);
1740
1741	switch (hci_pi(sk)->channel) {
1742	case HCI_CHANNEL_RAW:
1743	case HCI_CHANNEL_USER:
1744		break;
1745	case HCI_CHANNEL_MONITOR:
1746		err = -EOPNOTSUPP;
1747		goto done;
1748	case HCI_CHANNEL_LOGGING:
1749		err = hci_logging_frame(sk, msg, len);
1750		goto done;
1751	default:
1752		mutex_lock(&mgmt_chan_list_lock);
1753		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1754		if (chan)
1755			err = hci_mgmt_cmd(chan, sk, msg, len);
1756		else
1757			err = -EINVAL;
1758
1759		mutex_unlock(&mgmt_chan_list_lock);
1760		goto done;
1761	}
1762
1763	hdev = hci_hdev_from_sock(sk);
1764	if (IS_ERR(hdev)) {
1765		err = PTR_ERR(hdev);
1766		goto done;
1767	}
1768
1769	if (!test_bit(HCI_UP, &hdev->flags)) {
1770		err = -ENETDOWN;
1771		goto done;
1772	}
1773
1774	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1775	if (!skb)
1776		goto done;
1777
1778	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1779		err = -EFAULT;
1780		goto drop;
1781	}
1782
1783	hci_skb_pkt_type(skb) = skb->data[0];
1784	skb_pull(skb, 1);
1785
1786	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1787		/* No permission check is needed for user channel
1788		 * since that gets enforced when binding the socket.
1789		 *
1790		 * However check that the packet type is valid.
1791		 */
1792		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1793		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1794		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1795		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1796			err = -EINVAL;
1797			goto drop;
1798		}
1799
1800		skb_queue_tail(&hdev->raw_q, skb);
1801		queue_work(hdev->workqueue, &hdev->tx_work);
1802	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1803		u16 opcode = get_unaligned_le16(skb->data);
1804		u16 ogf = hci_opcode_ogf(opcode);
1805		u16 ocf = hci_opcode_ocf(opcode);
1806
1807		if (((ogf > HCI_SFLT_MAX_OGF) ||
1808		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1809				   &hci_sec_filter.ocf_mask[ogf])) &&
1810		    !capable(CAP_NET_RAW)) {
1811			err = -EPERM;
1812			goto drop;
1813		}
1814
1815		/* Since the opcode has already been extracted here, store
1816		 * a copy of the value for later use by the drivers.
1817		 */
1818		hci_skb_opcode(skb) = opcode;
1819
1820		if (ogf == 0x3f) {
1821			skb_queue_tail(&hdev->raw_q, skb);
1822			queue_work(hdev->workqueue, &hdev->tx_work);
1823		} else {
1824			/* Stand-alone HCI commands must be flagged as
1825			 * single-command requests.
1826			 */
1827			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1828
1829			skb_queue_tail(&hdev->cmd_q, skb);
1830			queue_work(hdev->workqueue, &hdev->cmd_work);
1831		}
1832	} else {
1833		if (!capable(CAP_NET_RAW)) {
1834			err = -EPERM;
1835			goto drop;
1836		}
1837
1838		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1839		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1840		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1841			err = -EINVAL;
1842			goto drop;
1843		}
1844
1845		skb_queue_tail(&hdev->raw_q, skb);
1846		queue_work(hdev->workqueue, &hdev->tx_work);
1847	}
1848
1849	err = len;
1850
1851done:
1852	release_sock(sk);
1853	return err;
1854
1855drop:
1856	kfree_skb(skb);
1857	goto done;
1858}
1859
1860static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1861			       sockptr_t optval, unsigned int len)
1862{
1863	struct hci_ufilter uf = { .opcode = 0 };
1864	struct sock *sk = sock->sk;
1865	int err = 0, opt = 0;
1866
1867	BT_DBG("sk %p, opt %d", sk, optname);
1868
1869	if (level != SOL_HCI)
1870		return -ENOPROTOOPT;
1871
1872	lock_sock(sk);
1873
1874	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1875		err = -EBADFD;
1876		goto done;
1877	}
1878
1879	switch (optname) {
1880	case HCI_DATA_DIR:
1881		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1882			err = -EFAULT;
1883			break;
1884		}
1885
1886		if (opt)
1887			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1888		else
1889			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1890		break;
1891
1892	case HCI_TIME_STAMP:
1893		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1894			err = -EFAULT;
1895			break;
1896		}
1897
1898		if (opt)
1899			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1900		else
1901			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1902		break;
1903
1904	case HCI_FILTER:
1905		{
1906			struct hci_filter *f = &hci_pi(sk)->filter;
1907
1908			uf.type_mask = f->type_mask;
1909			uf.opcode    = f->opcode;
1910			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1911			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1912		}
1913
1914		len = min_t(unsigned int, len, sizeof(uf));
1915		if (copy_from_sockptr(&uf, optval, len)) {
1916			err = -EFAULT;
1917			break;
1918		}
1919
1920		if (!capable(CAP_NET_RAW)) {
1921			uf.type_mask &= hci_sec_filter.type_mask;
1922			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1923			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1924		}
1925
1926		{
1927			struct hci_filter *f = &hci_pi(sk)->filter;
1928
1929			f->type_mask = uf.type_mask;
1930			f->opcode    = uf.opcode;
1931			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1932			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1933		}
1934		break;
1935
1936	default:
1937		err = -ENOPROTOOPT;
1938		break;
1939	}
1940
1941done:
1942	release_sock(sk);
1943	return err;
1944}
1945
1946static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1947			       char __user *optval, int __user *optlen)
1948{
1949	struct hci_ufilter uf;
1950	struct sock *sk = sock->sk;
1951	int len, opt, err = 0;
1952
1953	BT_DBG("sk %p, opt %d", sk, optname);
1954
1955	if (level != SOL_HCI)
1956		return -ENOPROTOOPT;
1957
1958	if (get_user(len, optlen))
1959		return -EFAULT;
1960
1961	lock_sock(sk);
1962
1963	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1964		err = -EBADFD;
1965		goto done;
1966	}
1967
1968	switch (optname) {
1969	case HCI_DATA_DIR:
1970		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1971			opt = 1;
1972		else
1973			opt = 0;
1974
1975		if (put_user(opt, optval))
1976			err = -EFAULT;
1977		break;
1978
1979	case HCI_TIME_STAMP:
1980		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1981			opt = 1;
1982		else
1983			opt = 0;
1984
1985		if (put_user(opt, optval))
1986			err = -EFAULT;
1987		break;
1988
1989	case HCI_FILTER:
1990		{
1991			struct hci_filter *f = &hci_pi(sk)->filter;
1992
1993			memset(&uf, 0, sizeof(uf));
1994			uf.type_mask = f->type_mask;
1995			uf.opcode    = f->opcode;
1996			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1997			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1998		}
1999
2000		len = min_t(unsigned int, len, sizeof(uf));
2001		if (copy_to_user(optval, &uf, len))
2002			err = -EFAULT;
2003		break;
2004
2005	default:
2006		err = -ENOPROTOOPT;
2007		break;
2008	}
2009
2010done:
2011	release_sock(sk);
2012	return err;
2013}
2014
2015static const struct proto_ops hci_sock_ops = {
2016	.family		= PF_BLUETOOTH,
2017	.owner		= THIS_MODULE,
2018	.release	= hci_sock_release,
2019	.bind		= hci_sock_bind,
2020	.getname	= hci_sock_getname,
2021	.sendmsg	= hci_sock_sendmsg,
2022	.recvmsg	= hci_sock_recvmsg,
2023	.ioctl		= hci_sock_ioctl,
2024#ifdef CONFIG_COMPAT
2025	.compat_ioctl	= hci_sock_compat_ioctl,
2026#endif
2027	.poll		= datagram_poll,
2028	.listen		= sock_no_listen,
2029	.shutdown	= sock_no_shutdown,
2030	.setsockopt	= hci_sock_setsockopt,
2031	.getsockopt	= hci_sock_getsockopt,
2032	.connect	= sock_no_connect,
2033	.socketpair	= sock_no_socketpair,
2034	.accept		= sock_no_accept,
2035	.mmap		= sock_no_mmap
2036};
2037
2038static struct proto hci_sk_proto = {
2039	.name		= "HCI",
2040	.owner		= THIS_MODULE,
2041	.obj_size	= sizeof(struct hci_pinfo)
2042};
2043
2044static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2045			   int kern)
2046{
2047	struct sock *sk;
2048
2049	BT_DBG("sock %p", sock);
2050
2051	if (sock->type != SOCK_RAW)
2052		return -ESOCKTNOSUPPORT;
2053
2054	sock->ops = &hci_sock_ops;
2055
2056	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2057	if (!sk)
2058		return -ENOMEM;
2059
2060	sock_init_data(sock, sk);
2061
2062	sock_reset_flag(sk, SOCK_ZAPPED);
2063
2064	sk->sk_protocol = protocol;
2065
2066	sock->state = SS_UNCONNECTED;
2067	sk->sk_state = BT_OPEN;
2068
2069	bt_sock_link(&hci_sk_list, sk);
2070	return 0;
2071}
2072
2073static const struct net_proto_family hci_sock_family_ops = {
2074	.family	= PF_BLUETOOTH,
2075	.owner	= THIS_MODULE,
2076	.create	= hci_sock_create,
2077};
2078
2079int __init hci_sock_init(void)
2080{
2081	int err;
2082
2083	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2084
2085	err = proto_register(&hci_sk_proto, 0);
2086	if (err < 0)
2087		return err;
2088
2089	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2090	if (err < 0) {
2091		BT_ERR("HCI socket registration failed");
2092		goto error;
2093	}
2094
2095	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2096	if (err < 0) {
2097		BT_ERR("Failed to create HCI proc file");
2098		bt_sock_unregister(BTPROTO_HCI);
2099		goto error;
2100	}
2101
2102	BT_INFO("HCI socket layer initialized");
2103
2104	return 0;
2105
2106error:
2107	proto_unregister(&hci_sk_proto);
2108	return err;
2109}
2110
2111void hci_sock_cleanup(void)
2112{
2113	bt_procfs_cleanup(&init_net, "hci");
2114	bt_sock_unregister(BTPROTO_HCI);
2115	proto_unregister(&hci_sk_proto);
2116}