Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
 
  29#include <asm/unaligned.h>
  30
  31#include <net/bluetooth/bluetooth.h>
  32#include <net/bluetooth/hci_core.h>
  33#include <net/bluetooth/hci_mon.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "mgmt_util.h"
  37
  38static LIST_HEAD(mgmt_chan_list);
  39static DEFINE_MUTEX(mgmt_chan_list_lock);
  40
 
 
  41static atomic_t monitor_promisc = ATOMIC_INIT(0);
  42
  43/* ----- HCI socket interface ----- */
  44
  45/* Socket info */
  46#define hci_pi(sk) ((struct hci_pinfo *) sk)
  47
  48struct hci_pinfo {
  49	struct bt_sock    bt;
  50	struct hci_dev    *hdev;
  51	struct hci_filter filter;
  52	__u32             cmsg_mask;
  53	unsigned short    channel;
  54	unsigned long     flags;
 
 
 
  55};
  56
 
 
 
 
 
 
 
 
 
 
 
  57void hci_sock_set_flag(struct sock *sk, int nr)
  58{
  59	set_bit(nr, &hci_pi(sk)->flags);
  60}
  61
  62void hci_sock_clear_flag(struct sock *sk, int nr)
  63{
  64	clear_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67int hci_sock_test_flag(struct sock *sk, int nr)
  68{
  69	return test_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72unsigned short hci_sock_get_channel(struct sock *sk)
  73{
  74	return hci_pi(sk)->channel;
  75}
  76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77static inline int hci_test_bit(int nr, const void *addr)
  78{
  79	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
  80}
  81
  82/* Security filter */
  83#define HCI_SFLT_MAX_OGF  5
  84
  85struct hci_sec_filter {
  86	__u32 type_mask;
  87	__u32 event_mask[2];
  88	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
  89};
  90
  91static const struct hci_sec_filter hci_sec_filter = {
  92	/* Packet types */
  93	0x10,
  94	/* Events */
  95	{ 0x1000d9fe, 0x0000b00c },
  96	/* Commands */
  97	{
  98		{ 0x0 },
  99		/* OGF_LINK_CTL */
 100		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 101		/* OGF_LINK_POLICY */
 102		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 103		/* OGF_HOST_CTL */
 104		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 105		/* OGF_INFO_PARAM */
 106		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 107		/* OGF_STATUS_PARAM */
 108		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 109	}
 110};
 111
 112static struct bt_sock_list hci_sk_list = {
 113	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 114};
 115
 116static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 117{
 118	struct hci_filter *flt;
 119	int flt_type, flt_event;
 120
 121	/* Apply filter */
 122	flt = &hci_pi(sk)->filter;
 123
 124	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 125
 126	if (!test_bit(flt_type, &flt->type_mask))
 127		return true;
 128
 129	/* Extra filter for event packets only */
 130	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 131		return false;
 132
 133	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 134
 135	if (!hci_test_bit(flt_event, &flt->event_mask))
 136		return true;
 137
 138	/* Check filter only when opcode is set */
 139	if (!flt->opcode)
 140		return false;
 141
 142	if (flt_event == HCI_EV_CMD_COMPLETE &&
 143	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 144		return true;
 145
 146	if (flt_event == HCI_EV_CMD_STATUS &&
 147	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 148		return true;
 149
 150	return false;
 151}
 152
 153/* Send frame to RAW socket */
 154void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 155{
 156	struct sock *sk;
 157	struct sk_buff *skb_copy = NULL;
 158
 159	BT_DBG("hdev %p len %d", hdev, skb->len);
 160
 161	read_lock(&hci_sk_list.lock);
 162
 163	sk_for_each(sk, &hci_sk_list.head) {
 164		struct sk_buff *nskb;
 165
 166		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 167			continue;
 168
 169		/* Don't send frame to the socket it came from */
 170		if (skb->sk == sk)
 171			continue;
 172
 173		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 174			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 175			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 176			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 177			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 178				continue;
 179			if (is_filtered_packet(sk, skb))
 180				continue;
 181		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 182			if (!bt_cb(skb)->incoming)
 183				continue;
 184			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 185			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 186			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 187				continue;
 188		} else {
 189			/* Don't send frame to other channel types */
 190			continue;
 191		}
 192
 193		if (!skb_copy) {
 194			/* Create a private copy with headroom */
 195			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 196			if (!skb_copy)
 197				continue;
 198
 199			/* Put type byte before the data */
 200			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 201		}
 202
 203		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 204		if (!nskb)
 205			continue;
 206
 207		if (sock_queue_rcv_skb(sk, nskb))
 208			kfree_skb(nskb);
 209	}
 210
 211	read_unlock(&hci_sk_list.lock);
 212
 213	kfree_skb(skb_copy);
 214}
 215
 216/* Send frame to sockets with specific channel */
 217void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 218			 int flag, struct sock *skip_sk)
 219{
 220	struct sock *sk;
 221
 222	BT_DBG("channel %u len %d", channel, skb->len);
 223
 224	read_lock(&hci_sk_list.lock);
 225
 226	sk_for_each(sk, &hci_sk_list.head) {
 227		struct sk_buff *nskb;
 228
 229		/* Ignore socket without the flag set */
 230		if (!hci_sock_test_flag(sk, flag))
 231			continue;
 232
 233		/* Skip the original socket */
 234		if (sk == skip_sk)
 235			continue;
 236
 237		if (sk->sk_state != BT_BOUND)
 238			continue;
 239
 240		if (hci_pi(sk)->channel != channel)
 241			continue;
 242
 243		nskb = skb_clone(skb, GFP_ATOMIC);
 244		if (!nskb)
 245			continue;
 246
 247		if (sock_queue_rcv_skb(sk, nskb))
 248			kfree_skb(nskb);
 249	}
 250
 
 
 
 
 
 
 
 251	read_unlock(&hci_sk_list.lock);
 252}
 253
 254/* Send frame to monitor socket */
 255void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 256{
 257	struct sk_buff *skb_copy = NULL;
 258	struct hci_mon_hdr *hdr;
 259	__le16 opcode;
 260
 261	if (!atomic_read(&monitor_promisc))
 262		return;
 263
 264	BT_DBG("hdev %p len %d", hdev, skb->len);
 265
 266	switch (hci_skb_pkt_type(skb)) {
 267	case HCI_COMMAND_PKT:
 268		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 269		break;
 270	case HCI_EVENT_PKT:
 271		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 272		break;
 273	case HCI_ACLDATA_PKT:
 274		if (bt_cb(skb)->incoming)
 275			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 276		else
 277			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 278		break;
 279	case HCI_SCODATA_PKT:
 280		if (bt_cb(skb)->incoming)
 281			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 282		else
 283			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 284		break;
 
 
 
 
 
 
 285	case HCI_DIAG_PKT:
 286		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 287		break;
 288	default:
 289		return;
 290	}
 291
 292	/* Create a private copy with headroom */
 293	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 294	if (!skb_copy)
 295		return;
 296
 297	/* Put header before the data */
 298	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 299	hdr->opcode = opcode;
 300	hdr->index = cpu_to_le16(hdev->id);
 301	hdr->len = cpu_to_le16(skb->len);
 302
 303	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 304			    HCI_SOCK_TRUSTED, NULL);
 305	kfree_skb(skb_copy);
 306}
 307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 309{
 310	struct hci_mon_hdr *hdr;
 311	struct hci_mon_new_index *ni;
 312	struct hci_mon_index_info *ii;
 313	struct sk_buff *skb;
 314	__le16 opcode;
 315
 316	switch (event) {
 317	case HCI_DEV_REG:
 318		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 319		if (!skb)
 320			return NULL;
 321
 322		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 323		ni->type = hdev->dev_type;
 324		ni->bus = hdev->bus;
 325		bacpy(&ni->bdaddr, &hdev->bdaddr);
 326		memcpy(ni->name, hdev->name, 8);
 327
 328		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 329		break;
 330
 331	case HCI_DEV_UNREG:
 332		skb = bt_skb_alloc(0, GFP_ATOMIC);
 333		if (!skb)
 334			return NULL;
 335
 336		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 337		break;
 338
 339	case HCI_DEV_SETUP:
 340		if (hdev->manufacturer == 0xffff)
 341			return NULL;
 342
 343		/* fall through */
 344
 345	case HCI_DEV_UP:
 346		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 347		if (!skb)
 348			return NULL;
 349
 350		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 351		bacpy(&ii->bdaddr, &hdev->bdaddr);
 352		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 353
 354		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 355		break;
 356
 357	case HCI_DEV_OPEN:
 358		skb = bt_skb_alloc(0, GFP_ATOMIC);
 359		if (!skb)
 360			return NULL;
 361
 362		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 363		break;
 364
 365	case HCI_DEV_CLOSE:
 366		skb = bt_skb_alloc(0, GFP_ATOMIC);
 367		if (!skb)
 368			return NULL;
 369
 370		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 371		break;
 372
 373	default:
 374		return NULL;
 375	}
 376
 377	__net_timestamp(skb);
 378
 379	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 380	hdr->opcode = opcode;
 381	hdr->index = cpu_to_le16(hdev->id);
 382	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 383
 384	return skb;
 385}
 386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387static void __printf(2, 3)
 388send_monitor_note(struct sock *sk, const char *fmt, ...)
 389{
 390	size_t len;
 391	struct hci_mon_hdr *hdr;
 392	struct sk_buff *skb;
 393	va_list args;
 394
 395	va_start(args, fmt);
 396	len = vsnprintf(NULL, 0, fmt, args);
 397	va_end(args);
 398
 399	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 400	if (!skb)
 401		return;
 402
 403	va_start(args, fmt);
 404	vsprintf(skb_put(skb, len), fmt, args);
 405	*skb_put(skb, 1) = 0;
 406	va_end(args);
 407
 408	__net_timestamp(skb);
 409
 410	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 411	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 412	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 413	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 414
 415	if (sock_queue_rcv_skb(sk, skb))
 416		kfree_skb(skb);
 417}
 418
 419static void send_monitor_replay(struct sock *sk)
 420{
 421	struct hci_dev *hdev;
 422
 423	read_lock(&hci_dev_list_lock);
 424
 425	list_for_each_entry(hdev, &hci_dev_list, list) {
 426		struct sk_buff *skb;
 427
 428		skb = create_monitor_event(hdev, HCI_DEV_REG);
 429		if (!skb)
 430			continue;
 431
 432		if (sock_queue_rcv_skb(sk, skb))
 433			kfree_skb(skb);
 434
 435		if (!test_bit(HCI_RUNNING, &hdev->flags))
 436			continue;
 437
 438		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 439		if (!skb)
 440			continue;
 441
 442		if (sock_queue_rcv_skb(sk, skb))
 443			kfree_skb(skb);
 444
 445		if (test_bit(HCI_UP, &hdev->flags))
 446			skb = create_monitor_event(hdev, HCI_DEV_UP);
 447		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 448			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 449		else
 450			skb = NULL;
 451
 452		if (skb) {
 453			if (sock_queue_rcv_skb(sk, skb))
 454				kfree_skb(skb);
 455		}
 456	}
 457
 458	read_unlock(&hci_dev_list_lock);
 459}
 460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461/* Generate internal stack event */
 462static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 463{
 464	struct hci_event_hdr *hdr;
 465	struct hci_ev_stack_internal *ev;
 466	struct sk_buff *skb;
 467
 468	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 469	if (!skb)
 470		return;
 471
 472	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 473	hdr->evt  = HCI_EV_STACK_INTERNAL;
 474	hdr->plen = sizeof(*ev) + dlen;
 475
 476	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 477	ev->type = type;
 478	memcpy(ev->data, data, dlen);
 479
 480	bt_cb(skb)->incoming = 1;
 481	__net_timestamp(skb);
 482
 483	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 484	hci_send_to_sock(hdev, skb);
 485	kfree_skb(skb);
 486}
 487
 488void hci_sock_dev_event(struct hci_dev *hdev, int event)
 489{
 490	BT_DBG("hdev %s event %d", hdev->name, event);
 491
 492	if (atomic_read(&monitor_promisc)) {
 493		struct sk_buff *skb;
 494
 495		/* Send event to monitor */
 496		skb = create_monitor_event(hdev, event);
 497		if (skb) {
 498			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 499					    HCI_SOCK_TRUSTED, NULL);
 500			kfree_skb(skb);
 501		}
 502	}
 503
 504	if (event <= HCI_DEV_DOWN) {
 505		struct hci_ev_si_device ev;
 506
 507		/* Send event to sockets */
 508		ev.event  = event;
 509		ev.dev_id = hdev->id;
 510		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 511	}
 512
 513	if (event == HCI_DEV_UNREG) {
 514		struct sock *sk;
 515
 516		/* Detach sockets from device */
 517		read_lock(&hci_sk_list.lock);
 518		sk_for_each(sk, &hci_sk_list.head) {
 519			bh_lock_sock_nested(sk);
 520			if (hci_pi(sk)->hdev == hdev) {
 521				hci_pi(sk)->hdev = NULL;
 522				sk->sk_err = EPIPE;
 523				sk->sk_state = BT_OPEN;
 524				sk->sk_state_change(sk);
 525
 526				hci_dev_put(hdev);
 527			}
 528			bh_unlock_sock(sk);
 529		}
 530		read_unlock(&hci_sk_list.lock);
 531	}
 532}
 533
 534static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 535{
 536	struct hci_mgmt_chan *c;
 537
 538	list_for_each_entry(c, &mgmt_chan_list, list) {
 539		if (c->channel == channel)
 540			return c;
 541	}
 542
 543	return NULL;
 544}
 545
 546static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 547{
 548	struct hci_mgmt_chan *c;
 549
 550	mutex_lock(&mgmt_chan_list_lock);
 551	c = __hci_mgmt_chan_find(channel);
 552	mutex_unlock(&mgmt_chan_list_lock);
 553
 554	return c;
 555}
 556
 557int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 558{
 559	if (c->channel < HCI_CHANNEL_CONTROL)
 560		return -EINVAL;
 561
 562	mutex_lock(&mgmt_chan_list_lock);
 563	if (__hci_mgmt_chan_find(c->channel)) {
 564		mutex_unlock(&mgmt_chan_list_lock);
 565		return -EALREADY;
 566	}
 567
 568	list_add_tail(&c->list, &mgmt_chan_list);
 569
 570	mutex_unlock(&mgmt_chan_list_lock);
 571
 572	return 0;
 573}
 574EXPORT_SYMBOL(hci_mgmt_chan_register);
 575
 576void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 577{
 578	mutex_lock(&mgmt_chan_list_lock);
 579	list_del(&c->list);
 580	mutex_unlock(&mgmt_chan_list_lock);
 581}
 582EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 583
 584static int hci_sock_release(struct socket *sock)
 585{
 586	struct sock *sk = sock->sk;
 587	struct hci_dev *hdev;
 
 588
 589	BT_DBG("sock %p sk %p", sock, sk);
 590
 591	if (!sk)
 592		return 0;
 593
 594	hdev = hci_pi(sk)->hdev;
 595
 596	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
 
 597		atomic_dec(&monitor_promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598
 599	bt_sock_unlink(&hci_sk_list, sk);
 600
 
 601	if (hdev) {
 602		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 603			/* When releasing an user channel exclusive access,
 
 604			 * call hci_dev_do_close directly instead of calling
 605			 * hci_dev_close to ensure the exclusive access will
 606			 * be released and the controller brought back down.
 607			 *
 608			 * The checking of HCI_AUTO_OFF is not needed in this
 609			 * case since it will have been cleared already when
 610			 * opening the user channel.
 
 
 
 
 
 611			 */
 612			hci_dev_do_close(hdev);
 613			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 614			mgmt_index_added(hdev);
 615		}
 616
 617		atomic_dec(&hdev->promisc);
 618		hci_dev_put(hdev);
 619	}
 620
 621	sock_orphan(sk);
 622
 623	skb_queue_purge(&sk->sk_receive_queue);
 624	skb_queue_purge(&sk->sk_write_queue);
 625
 626	sock_put(sk);
 627	return 0;
 628}
 629
 630static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 631{
 632	bdaddr_t bdaddr;
 633	int err;
 634
 635	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 636		return -EFAULT;
 637
 638	hci_dev_lock(hdev);
 639
 640	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 641
 642	hci_dev_unlock(hdev);
 643
 644	return err;
 645}
 646
 647static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 648{
 649	bdaddr_t bdaddr;
 650	int err;
 651
 652	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 653		return -EFAULT;
 654
 655	hci_dev_lock(hdev);
 656
 657	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 658
 659	hci_dev_unlock(hdev);
 660
 661	return err;
 662}
 663
 664/* Ioctls that require bound socket */
 665static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 666				unsigned long arg)
 667{
 668	struct hci_dev *hdev = hci_pi(sk)->hdev;
 669
 670	if (!hdev)
 671		return -EBADFD;
 672
 673	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 674		return -EBUSY;
 675
 676	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 677		return -EOPNOTSUPP;
 678
 679	if (hdev->dev_type != HCI_BREDR)
 680		return -EOPNOTSUPP;
 681
 682	switch (cmd) {
 683	case HCISETRAW:
 684		if (!capable(CAP_NET_ADMIN))
 685			return -EPERM;
 686		return -EOPNOTSUPP;
 687
 688	case HCIGETCONNINFO:
 689		return hci_get_conn_info(hdev, (void __user *)arg);
 690
 691	case HCIGETAUTHINFO:
 692		return hci_get_auth_info(hdev, (void __user *)arg);
 693
 694	case HCIBLOCKADDR:
 695		if (!capable(CAP_NET_ADMIN))
 696			return -EPERM;
 697		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 698
 699	case HCIUNBLOCKADDR:
 700		if (!capable(CAP_NET_ADMIN))
 701			return -EPERM;
 702		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 703	}
 704
 705	return -ENOIOCTLCMD;
 706}
 707
 708static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 709			  unsigned long arg)
 710{
 711	void __user *argp = (void __user *)arg;
 712	struct sock *sk = sock->sk;
 713	int err;
 714
 715	BT_DBG("cmd %x arg %lx", cmd, arg);
 716
 717	lock_sock(sk);
 718
 719	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 720		err = -EBADFD;
 721		goto done;
 722	}
 723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724	release_sock(sk);
 725
 726	switch (cmd) {
 727	case HCIGETDEVLIST:
 728		return hci_get_dev_list(argp);
 729
 730	case HCIGETDEVINFO:
 731		return hci_get_dev_info(argp);
 732
 733	case HCIGETCONNLIST:
 734		return hci_get_conn_list(argp);
 735
 736	case HCIDEVUP:
 737		if (!capable(CAP_NET_ADMIN))
 738			return -EPERM;
 739		return hci_dev_open(arg);
 740
 741	case HCIDEVDOWN:
 742		if (!capable(CAP_NET_ADMIN))
 743			return -EPERM;
 744		return hci_dev_close(arg);
 745
 746	case HCIDEVRESET:
 747		if (!capable(CAP_NET_ADMIN))
 748			return -EPERM;
 749		return hci_dev_reset(arg);
 750
 751	case HCIDEVRESTAT:
 752		if (!capable(CAP_NET_ADMIN))
 753			return -EPERM;
 754		return hci_dev_reset_stat(arg);
 755
 756	case HCISETSCAN:
 757	case HCISETAUTH:
 758	case HCISETENCRYPT:
 759	case HCISETPTYPE:
 760	case HCISETLINKPOL:
 761	case HCISETLINKMODE:
 762	case HCISETACLMTU:
 763	case HCISETSCOMTU:
 764		if (!capable(CAP_NET_ADMIN))
 765			return -EPERM;
 766		return hci_dev_cmd(cmd, argp);
 767
 768	case HCIINQUIRY:
 769		return hci_inquiry(argp);
 770	}
 771
 772	lock_sock(sk);
 773
 774	err = hci_sock_bound_ioctl(sk, cmd, arg);
 775
 776done:
 777	release_sock(sk);
 778	return err;
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 782			 int addr_len)
 783{
 784	struct sockaddr_hci haddr;
 785	struct sock *sk = sock->sk;
 786	struct hci_dev *hdev = NULL;
 
 787	int len, err = 0;
 788
 789	BT_DBG("sock %p sk %p", sock, sk);
 790
 791	if (!addr)
 792		return -EINVAL;
 793
 794	memset(&haddr, 0, sizeof(haddr));
 795	len = min_t(unsigned int, sizeof(haddr), addr_len);
 796	memcpy(&haddr, addr, len);
 797
 798	if (haddr.hci_family != AF_BLUETOOTH)
 799		return -EINVAL;
 800
 801	lock_sock(sk);
 802
 
 
 
 
 
 
 
 
 
 
 
 
 803	if (sk->sk_state == BT_BOUND) {
 804		err = -EALREADY;
 805		goto done;
 806	}
 807
 808	switch (haddr.hci_channel) {
 809	case HCI_CHANNEL_RAW:
 810		if (hci_pi(sk)->hdev) {
 811			err = -EALREADY;
 812			goto done;
 813		}
 814
 815		if (haddr.hci_dev != HCI_DEV_NONE) {
 816			hdev = hci_dev_get(haddr.hci_dev);
 817			if (!hdev) {
 818				err = -ENODEV;
 819				goto done;
 820			}
 821
 822			atomic_inc(&hdev->promisc);
 823		}
 824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 826		break;
 827
 828	case HCI_CHANNEL_USER:
 829		if (hci_pi(sk)->hdev) {
 830			err = -EALREADY;
 831			goto done;
 832		}
 833
 834		if (haddr.hci_dev == HCI_DEV_NONE) {
 835			err = -EINVAL;
 836			goto done;
 837		}
 838
 839		if (!capable(CAP_NET_ADMIN)) {
 840			err = -EPERM;
 841			goto done;
 842		}
 843
 844		hdev = hci_dev_get(haddr.hci_dev);
 845		if (!hdev) {
 846			err = -ENODEV;
 847			goto done;
 848		}
 849
 850		if (test_bit(HCI_INIT, &hdev->flags) ||
 851		    hci_dev_test_flag(hdev, HCI_SETUP) ||
 852		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 853		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
 854		     test_bit(HCI_UP, &hdev->flags))) {
 855			err = -EBUSY;
 856			hci_dev_put(hdev);
 857			goto done;
 858		}
 859
 860		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
 861			err = -EUSERS;
 862			hci_dev_put(hdev);
 863			goto done;
 864		}
 865
 866		mgmt_index_removed(hdev);
 867
 868		err = hci_dev_open(hdev->id);
 869		if (err) {
 870			if (err == -EALREADY) {
 871				/* In case the transport is already up and
 872				 * running, clear the error here.
 873				 *
 874				 * This can happen when opening an user
 875				 * channel and HCI_AUTO_OFF grace period
 876				 * is still active.
 877				 */
 878				err = 0;
 879			} else {
 880				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 881				mgmt_index_added(hdev);
 882				hci_dev_put(hdev);
 883				goto done;
 884			}
 885		}
 886
 887		atomic_inc(&hdev->promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 888
 889		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 
 
 890		break;
 891
 892	case HCI_CHANNEL_MONITOR:
 893		if (haddr.hci_dev != HCI_DEV_NONE) {
 894			err = -EINVAL;
 895			goto done;
 896		}
 897
 898		if (!capable(CAP_NET_RAW)) {
 899			err = -EPERM;
 900			goto done;
 901		}
 902
 
 
 903		/* The monitor interface is restricted to CAP_NET_RAW
 904		 * capabilities and with that implicitly trusted.
 905		 */
 906		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 907
 908		send_monitor_note(sk, "Linux version %s (%s)",
 909				  init_utsname()->release,
 910				  init_utsname()->machine);
 911		send_monitor_note(sk, "Bluetooth subsystem version %s",
 912				  BT_SUBSYS_VERSION);
 913		send_monitor_replay(sk);
 
 914
 915		atomic_inc(&monitor_promisc);
 916		break;
 917
 918	case HCI_CHANNEL_LOGGING:
 919		if (haddr.hci_dev != HCI_DEV_NONE) {
 920			err = -EINVAL;
 921			goto done;
 922		}
 923
 924		if (!capable(CAP_NET_ADMIN)) {
 925			err = -EPERM;
 926			goto done;
 927		}
 
 
 928		break;
 929
 930	default:
 931		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
 932			err = -EINVAL;
 933			goto done;
 934		}
 935
 936		if (haddr.hci_dev != HCI_DEV_NONE) {
 937			err = -EINVAL;
 938			goto done;
 939		}
 940
 941		/* Users with CAP_NET_ADMIN capabilities are allowed
 942		 * access to all management commands and events. For
 943		 * untrusted users the interface is restricted and
 944		 * also only untrusted events are sent.
 945		 */
 946		if (capable(CAP_NET_ADMIN))
 947			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 948
 
 
 949		/* At the moment the index and unconfigured index events
 950		 * are enabled unconditionally. Setting them on each
 951		 * socket when binding keeps this functionality. They
 952		 * however might be cleared later and then sending of these
 953		 * events will be disabled, but that is then intentional.
 954		 *
 955		 * This also enables generic events that are safe to be
 956		 * received by untrusted users. Example for such events
 957		 * are changes to settings, class of device, name etc.
 958		 */
 959		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
 961			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
 962			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
 
 
 
 963		}
 964		break;
 965	}
 966
 
 
 
 967
 968	hci_pi(sk)->channel = haddr.hci_channel;
 969	sk->sk_state = BT_BOUND;
 970
 971done:
 972	release_sock(sk);
 973	return err;
 974}
 975
 976static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 977			    int *addr_len, int peer)
 978{
 979	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
 980	struct sock *sk = sock->sk;
 981	struct hci_dev *hdev;
 982	int err = 0;
 983
 984	BT_DBG("sock %p sk %p", sock, sk);
 985
 986	if (peer)
 987		return -EOPNOTSUPP;
 988
 989	lock_sock(sk);
 990
 991	hdev = hci_pi(sk)->hdev;
 992	if (!hdev) {
 993		err = -EBADFD;
 994		goto done;
 995	}
 996
 997	*addr_len = sizeof(*haddr);
 998	haddr->hci_family = AF_BLUETOOTH;
 999	haddr->hci_dev    = hdev->id;
1000	haddr->hci_channel= hci_pi(sk)->channel;
 
1001
1002done:
1003	release_sock(sk);
1004	return err;
1005}
1006
1007static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1008			  struct sk_buff *skb)
1009{
1010	__u32 mask = hci_pi(sk)->cmsg_mask;
1011
1012	if (mask & HCI_CMSG_DIR) {
1013		int incoming = bt_cb(skb)->incoming;
1014		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1015			 &incoming);
1016	}
1017
1018	if (mask & HCI_CMSG_TSTAMP) {
1019#ifdef CONFIG_COMPAT
1020		struct compat_timeval ctv;
1021#endif
1022		struct timeval tv;
1023		void *data;
1024		int len;
1025
1026		skb_get_timestamp(skb, &tv);
1027
1028		data = &tv;
1029		len = sizeof(tv);
1030#ifdef CONFIG_COMPAT
1031		if (!COMPAT_USE_64BIT_TIME &&
1032		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1033			ctv.tv_sec = tv.tv_sec;
1034			ctv.tv_usec = tv.tv_usec;
1035			data = &ctv;
1036			len = sizeof(ctv);
1037		}
1038#endif
1039
1040		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1041	}
1042}
1043
1044static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1045			    size_t len, int flags)
1046{
1047	int noblock = flags & MSG_DONTWAIT;
1048	struct sock *sk = sock->sk;
1049	struct sk_buff *skb;
1050	int copied, err;
 
1051
1052	BT_DBG("sock %p, sk %p", sock, sk);
1053
1054	if (flags & MSG_OOB)
1055		return -EOPNOTSUPP;
1056
1057	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1058		return -EOPNOTSUPP;
1059
1060	if (sk->sk_state == BT_CLOSED)
1061		return 0;
1062
1063	skb = skb_recv_datagram(sk, flags, noblock, &err);
1064	if (!skb)
1065		return err;
1066
 
1067	copied = skb->len;
1068	if (len < copied) {
1069		msg->msg_flags |= MSG_TRUNC;
1070		copied = len;
1071	}
1072
1073	skb_reset_transport_header(skb);
1074	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1075
1076	switch (hci_pi(sk)->channel) {
1077	case HCI_CHANNEL_RAW:
1078		hci_sock_cmsg(sk, msg, skb);
1079		break;
1080	case HCI_CHANNEL_USER:
1081	case HCI_CHANNEL_MONITOR:
1082		sock_recv_timestamp(msg, sk, skb);
1083		break;
1084	default:
1085		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1086			sock_recv_timestamp(msg, sk, skb);
1087		break;
1088	}
1089
1090	skb_free_datagram(sk, skb);
1091
 
 
 
1092	return err ? : copied;
1093}
1094
1095static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1096			struct msghdr *msg, size_t msglen)
1097{
1098	void *buf;
1099	u8 *cp;
1100	struct mgmt_hdr *hdr;
1101	u16 opcode, index, len;
1102	struct hci_dev *hdev = NULL;
1103	const struct hci_mgmt_handler *handler;
1104	bool var_len, no_hdev;
1105	int err;
1106
1107	BT_DBG("got %zu bytes", msglen);
1108
1109	if (msglen < sizeof(*hdr))
1110		return -EINVAL;
1111
1112	buf = kmalloc(msglen, GFP_KERNEL);
1113	if (!buf)
1114		return -ENOMEM;
1115
1116	if (memcpy_from_msg(buf, msg, msglen)) {
1117		err = -EFAULT;
1118		goto done;
1119	}
1120
1121	hdr = buf;
1122	opcode = __le16_to_cpu(hdr->opcode);
1123	index = __le16_to_cpu(hdr->index);
1124	len = __le16_to_cpu(hdr->len);
1125
1126	if (len != msglen - sizeof(*hdr)) {
1127		err = -EINVAL;
1128		goto done;
1129	}
1130
 
 
 
 
 
 
 
 
 
 
 
 
 
1131	if (opcode >= chan->handler_count ||
1132	    chan->handlers[opcode].func == NULL) {
1133		BT_DBG("Unknown op %u", opcode);
1134		err = mgmt_cmd_status(sk, index, opcode,
1135				      MGMT_STATUS_UNKNOWN_COMMAND);
1136		goto done;
1137	}
1138
1139	handler = &chan->handlers[opcode];
1140
1141	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1142	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1143		err = mgmt_cmd_status(sk, index, opcode,
1144				      MGMT_STATUS_PERMISSION_DENIED);
1145		goto done;
1146	}
1147
1148	if (index != MGMT_INDEX_NONE) {
1149		hdev = hci_dev_get(index);
1150		if (!hdev) {
1151			err = mgmt_cmd_status(sk, index, opcode,
1152					      MGMT_STATUS_INVALID_INDEX);
1153			goto done;
1154		}
1155
1156		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1157		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1158		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1159			err = mgmt_cmd_status(sk, index, opcode,
1160					      MGMT_STATUS_INVALID_INDEX);
1161			goto done;
1162		}
1163
1164		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1165		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1166			err = mgmt_cmd_status(sk, index, opcode,
1167					      MGMT_STATUS_INVALID_INDEX);
1168			goto done;
1169		}
1170	}
1171
1172	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1173	if (no_hdev != !hdev) {
1174		err = mgmt_cmd_status(sk, index, opcode,
1175				      MGMT_STATUS_INVALID_INDEX);
1176		goto done;
 
 
1177	}
1178
1179	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1180	if ((var_len && len < handler->data_len) ||
1181	    (!var_len && len != handler->data_len)) {
1182		err = mgmt_cmd_status(sk, index, opcode,
1183				      MGMT_STATUS_INVALID_PARAMS);
1184		goto done;
1185	}
1186
1187	if (hdev && chan->hdev_init)
1188		chan->hdev_init(sk, hdev);
1189
1190	cp = buf + sizeof(*hdr);
1191
1192	err = handler->func(sk, hdev, cp, len);
1193	if (err < 0)
1194		goto done;
1195
1196	err = msglen;
1197
1198done:
1199	if (hdev)
1200		hci_dev_put(hdev);
1201
1202	kfree(buf);
1203	return err;
1204}
1205
1206static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
 
1207{
1208	struct hci_mon_hdr *hdr;
1209	struct sk_buff *skb;
1210	struct hci_dev *hdev;
1211	u16 index;
1212	int err;
1213
1214	/* The logging frame consists at minimum of the standard header,
1215	 * the priority byte, the ident length byte and at least one string
1216	 * terminator NUL byte. Anything shorter are invalid packets.
1217	 */
1218	if (len < sizeof(*hdr) + 3)
1219		return -EINVAL;
1220
1221	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1222	if (!skb)
1223		return err;
1224
1225	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1226		err = -EFAULT;
1227		goto drop;
1228	}
1229
1230	hdr = (void *)skb->data;
1231
1232	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1233		err = -EINVAL;
1234		goto drop;
1235	}
1236
1237	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1238		__u8 priority = skb->data[sizeof(*hdr)];
1239		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1240
1241		/* Only the priorities 0-7 are valid and with that any other
1242		 * value results in an invalid packet.
1243		 *
1244		 * The priority byte is followed by an ident length byte and
1245		 * the NUL terminated ident string. Check that the ident
1246		 * length is not overflowing the packet and also that the
1247		 * ident string itself is NUL terminated. In case the ident
1248		 * length is zero, the length value actually doubles as NUL
1249		 * terminator identifier.
1250		 *
1251		 * The message follows the ident string (if present) and
1252		 * must be NUL terminated. Otherwise it is not a valid packet.
1253		 */
1254		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1255		    ident_len > len - sizeof(*hdr) - 3 ||
1256		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1257			err = -EINVAL;
1258			goto drop;
1259		}
1260	} else {
1261		err = -EINVAL;
1262		goto drop;
1263	}
1264
1265	index = __le16_to_cpu(hdr->index);
1266
1267	if (index != MGMT_INDEX_NONE) {
1268		hdev = hci_dev_get(index);
1269		if (!hdev) {
1270			err = -ENODEV;
1271			goto drop;
1272		}
1273	} else {
1274		hdev = NULL;
1275	}
1276
1277	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1278
1279	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1280	err = len;
1281
1282	if (hdev)
1283		hci_dev_put(hdev);
1284
1285drop:
1286	kfree_skb(skb);
1287	return err;
1288}
1289
1290static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1291			    size_t len)
1292{
1293	struct sock *sk = sock->sk;
1294	struct hci_mgmt_chan *chan;
1295	struct hci_dev *hdev;
1296	struct sk_buff *skb;
1297	int err;
 
1298
1299	BT_DBG("sock %p sk %p", sock, sk);
1300
1301	if (msg->msg_flags & MSG_OOB)
1302		return -EOPNOTSUPP;
1303
1304	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1305		return -EINVAL;
1306
1307	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1308		return -EINVAL;
1309
 
 
 
 
1310	lock_sock(sk);
1311
1312	switch (hci_pi(sk)->channel) {
1313	case HCI_CHANNEL_RAW:
1314	case HCI_CHANNEL_USER:
1315		break;
1316	case HCI_CHANNEL_MONITOR:
1317		err = -EOPNOTSUPP;
1318		goto done;
1319	case HCI_CHANNEL_LOGGING:
1320		err = hci_logging_frame(sk, msg, len);
1321		goto done;
1322	default:
1323		mutex_lock(&mgmt_chan_list_lock);
1324		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1325		if (chan)
1326			err = hci_mgmt_cmd(chan, sk, msg, len);
1327		else
1328			err = -EINVAL;
1329
1330		mutex_unlock(&mgmt_chan_list_lock);
1331		goto done;
1332	}
1333
1334	hdev = hci_pi(sk)->hdev;
1335	if (!hdev) {
1336		err = -EBADFD;
1337		goto done;
1338	}
1339
1340	if (!test_bit(HCI_UP, &hdev->flags)) {
1341		err = -ENETDOWN;
1342		goto done;
1343	}
1344
1345	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1346	if (!skb)
1347		goto done;
1348
1349	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1350		err = -EFAULT;
1351		goto drop;
1352	}
1353
1354	hci_skb_pkt_type(skb) = skb->data[0];
1355	skb_pull(skb, 1);
1356
1357	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1358		/* No permission check is needed for user channel
1359		 * since that gets enforced when binding the socket.
1360		 *
1361		 * However check that the packet type is valid.
1362		 */
1363		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1364		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1365		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1366			err = -EINVAL;
1367			goto drop;
1368		}
1369
1370		skb_queue_tail(&hdev->raw_q, skb);
1371		queue_work(hdev->workqueue, &hdev->tx_work);
1372	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1373		u16 opcode = get_unaligned_le16(skb->data);
1374		u16 ogf = hci_opcode_ogf(opcode);
1375		u16 ocf = hci_opcode_ocf(opcode);
1376
1377		if (((ogf > HCI_SFLT_MAX_OGF) ||
1378		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1379				   &hci_sec_filter.ocf_mask[ogf])) &&
1380		    !capable(CAP_NET_RAW)) {
1381			err = -EPERM;
1382			goto drop;
1383		}
1384
1385		/* Since the opcode has already been extracted here, store
1386		 * a copy of the value for later use by the drivers.
1387		 */
1388		hci_skb_opcode(skb) = opcode;
1389
1390		if (ogf == 0x3f) {
1391			skb_queue_tail(&hdev->raw_q, skb);
1392			queue_work(hdev->workqueue, &hdev->tx_work);
1393		} else {
1394			/* Stand-alone HCI commands must be flagged as
1395			 * single-command requests.
1396			 */
1397			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1398
1399			skb_queue_tail(&hdev->cmd_q, skb);
1400			queue_work(hdev->workqueue, &hdev->cmd_work);
1401		}
1402	} else {
1403		if (!capable(CAP_NET_RAW)) {
1404			err = -EPERM;
1405			goto drop;
1406		}
1407
1408		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1409		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1410			err = -EINVAL;
1411			goto drop;
1412		}
1413
1414		skb_queue_tail(&hdev->raw_q, skb);
1415		queue_work(hdev->workqueue, &hdev->tx_work);
1416	}
1417
1418	err = len;
1419
1420done:
1421	release_sock(sk);
1422	return err;
1423
1424drop:
1425	kfree_skb(skb);
1426	goto done;
1427}
1428
1429static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1430			       char __user *optval, unsigned int len)
1431{
1432	struct hci_ufilter uf = { .opcode = 0 };
1433	struct sock *sk = sock->sk;
1434	int err = 0, opt = 0;
1435
1436	BT_DBG("sk %p, opt %d", sk, optname);
1437
1438	lock_sock(sk);
1439
1440	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1441		err = -EBADFD;
1442		goto done;
1443	}
1444
1445	switch (optname) {
1446	case HCI_DATA_DIR:
1447		if (get_user(opt, (int __user *)optval)) {
1448			err = -EFAULT;
1449			break;
1450		}
1451
1452		if (opt)
1453			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1454		else
1455			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1456		break;
1457
1458	case HCI_TIME_STAMP:
1459		if (get_user(opt, (int __user *)optval)) {
1460			err = -EFAULT;
1461			break;
1462		}
1463
1464		if (opt)
1465			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1466		else
1467			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1468		break;
1469
1470	case HCI_FILTER:
1471		{
1472			struct hci_filter *f = &hci_pi(sk)->filter;
1473
1474			uf.type_mask = f->type_mask;
1475			uf.opcode    = f->opcode;
1476			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1477			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1478		}
1479
1480		len = min_t(unsigned int, len, sizeof(uf));
1481		if (copy_from_user(&uf, optval, len)) {
1482			err = -EFAULT;
1483			break;
1484		}
1485
1486		if (!capable(CAP_NET_RAW)) {
1487			uf.type_mask &= hci_sec_filter.type_mask;
1488			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1489			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1490		}
1491
1492		{
1493			struct hci_filter *f = &hci_pi(sk)->filter;
1494
1495			f->type_mask = uf.type_mask;
1496			f->opcode    = uf.opcode;
1497			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1498			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1499		}
1500		break;
1501
1502	default:
1503		err = -ENOPROTOOPT;
1504		break;
1505	}
1506
1507done:
1508	release_sock(sk);
1509	return err;
1510}
1511
1512static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1513			       char __user *optval, int __user *optlen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1514{
1515	struct hci_ufilter uf;
1516	struct sock *sk = sock->sk;
1517	int len, opt, err = 0;
1518
1519	BT_DBG("sk %p, opt %d", sk, optname);
1520
1521	if (get_user(len, optlen))
1522		return -EFAULT;
1523
1524	lock_sock(sk);
1525
1526	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1527		err = -EBADFD;
1528		goto done;
1529	}
1530
1531	switch (optname) {
1532	case HCI_DATA_DIR:
1533		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1534			opt = 1;
1535		else
1536			opt = 0;
1537
1538		if (put_user(opt, optval))
1539			err = -EFAULT;
1540		break;
1541
1542	case HCI_TIME_STAMP:
1543		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1544			opt = 1;
1545		else
1546			opt = 0;
1547
1548		if (put_user(opt, optval))
1549			err = -EFAULT;
1550		break;
1551
1552	case HCI_FILTER:
1553		{
1554			struct hci_filter *f = &hci_pi(sk)->filter;
1555
1556			memset(&uf, 0, sizeof(uf));
1557			uf.type_mask = f->type_mask;
1558			uf.opcode    = f->opcode;
1559			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1560			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1561		}
1562
1563		len = min_t(unsigned int, len, sizeof(uf));
1564		if (copy_to_user(optval, &uf, len))
1565			err = -EFAULT;
1566		break;
1567
1568	default:
1569		err = -ENOPROTOOPT;
1570		break;
1571	}
1572
1573done:
1574	release_sock(sk);
1575	return err;
1576}
1577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1578static const struct proto_ops hci_sock_ops = {
1579	.family		= PF_BLUETOOTH,
1580	.owner		= THIS_MODULE,
1581	.release	= hci_sock_release,
1582	.bind		= hci_sock_bind,
1583	.getname	= hci_sock_getname,
1584	.sendmsg	= hci_sock_sendmsg,
1585	.recvmsg	= hci_sock_recvmsg,
1586	.ioctl		= hci_sock_ioctl,
 
 
 
1587	.poll		= datagram_poll,
1588	.listen		= sock_no_listen,
1589	.shutdown	= sock_no_shutdown,
1590	.setsockopt	= hci_sock_setsockopt,
1591	.getsockopt	= hci_sock_getsockopt,
1592	.connect	= sock_no_connect,
1593	.socketpair	= sock_no_socketpair,
1594	.accept		= sock_no_accept,
1595	.mmap		= sock_no_mmap
1596};
1597
1598static struct proto hci_sk_proto = {
1599	.name		= "HCI",
1600	.owner		= THIS_MODULE,
1601	.obj_size	= sizeof(struct hci_pinfo)
1602};
1603
1604static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1605			   int kern)
1606{
1607	struct sock *sk;
1608
1609	BT_DBG("sock %p", sock);
1610
1611	if (sock->type != SOCK_RAW)
1612		return -ESOCKTNOSUPPORT;
1613
1614	sock->ops = &hci_sock_ops;
1615
1616	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1617	if (!sk)
1618		return -ENOMEM;
1619
1620	sock_init_data(sock, sk);
1621
1622	sock_reset_flag(sk, SOCK_ZAPPED);
1623
1624	sk->sk_protocol = protocol;
1625
1626	sock->state = SS_UNCONNECTED;
1627	sk->sk_state = BT_OPEN;
 
1628
1629	bt_sock_link(&hci_sk_list, sk);
1630	return 0;
1631}
1632
1633static const struct net_proto_family hci_sock_family_ops = {
1634	.family	= PF_BLUETOOTH,
1635	.owner	= THIS_MODULE,
1636	.create	= hci_sock_create,
1637};
1638
1639int __init hci_sock_init(void)
1640{
1641	int err;
1642
1643	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1644
1645	err = proto_register(&hci_sk_proto, 0);
1646	if (err < 0)
1647		return err;
1648
1649	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1650	if (err < 0) {
1651		BT_ERR("HCI socket registration failed");
1652		goto error;
1653	}
1654
1655	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1656	if (err < 0) {
1657		BT_ERR("Failed to create HCI proc file");
1658		bt_sock_unregister(BTPROTO_HCI);
1659		goto error;
1660	}
1661
1662	BT_INFO("HCI socket layer initialized");
1663
1664	return 0;
1665
1666error:
1667	proto_unregister(&hci_sk_proto);
1668	return err;
1669}
1670
1671void hci_sock_cleanup(void)
1672{
1673	bt_procfs_cleanup(&init_net, "hci");
1674	bt_sock_unregister(BTPROTO_HCI);
1675	proto_unregister(&hci_sk_proto);
1676}
v6.2
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26#include <linux/compat.h>
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u8              cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
  60	__u16             mtu;
  61};
  62
  63static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
  64{
  65	struct hci_dev *hdev = hci_pi(sk)->hdev;
  66
  67	if (!hdev)
  68		return ERR_PTR(-EBADFD);
  69	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
  70		return ERR_PTR(-EPIPE);
  71	return hdev;
  72}
  73
  74void hci_sock_set_flag(struct sock *sk, int nr)
  75{
  76	set_bit(nr, &hci_pi(sk)->flags);
  77}
  78
  79void hci_sock_clear_flag(struct sock *sk, int nr)
  80{
  81	clear_bit(nr, &hci_pi(sk)->flags);
  82}
  83
  84int hci_sock_test_flag(struct sock *sk, int nr)
  85{
  86	return test_bit(nr, &hci_pi(sk)->flags);
  87}
  88
  89unsigned short hci_sock_get_channel(struct sock *sk)
  90{
  91	return hci_pi(sk)->channel;
  92}
  93
  94u32 hci_sock_get_cookie(struct sock *sk)
  95{
  96	return hci_pi(sk)->cookie;
  97}
  98
  99static bool hci_sock_gen_cookie(struct sock *sk)
 100{
 101	int id = hci_pi(sk)->cookie;
 102
 103	if (!id) {
 104		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
 105		if (id < 0)
 106			id = 0xffffffff;
 107
 108		hci_pi(sk)->cookie = id;
 109		get_task_comm(hci_pi(sk)->comm, current);
 110		return true;
 111	}
 112
 113	return false;
 114}
 115
 116static void hci_sock_free_cookie(struct sock *sk)
 117{
 118	int id = hci_pi(sk)->cookie;
 119
 120	if (id) {
 121		hci_pi(sk)->cookie = 0xffffffff;
 122		ida_simple_remove(&sock_cookie_ida, id);
 123	}
 124}
 125
 126static inline int hci_test_bit(int nr, const void *addr)
 127{
 128	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 129}
 130
 131/* Security filter */
 132#define HCI_SFLT_MAX_OGF  5
 133
 134struct hci_sec_filter {
 135	__u32 type_mask;
 136	__u32 event_mask[2];
 137	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 138};
 139
 140static const struct hci_sec_filter hci_sec_filter = {
 141	/* Packet types */
 142	0x10,
 143	/* Events */
 144	{ 0x1000d9fe, 0x0000b00c },
 145	/* Commands */
 146	{
 147		{ 0x0 },
 148		/* OGF_LINK_CTL */
 149		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 150		/* OGF_LINK_POLICY */
 151		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 152		/* OGF_HOST_CTL */
 153		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 154		/* OGF_INFO_PARAM */
 155		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 156		/* OGF_STATUS_PARAM */
 157		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 158	}
 159};
 160
 161static struct bt_sock_list hci_sk_list = {
 162	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 163};
 164
 165static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 166{
 167	struct hci_filter *flt;
 168	int flt_type, flt_event;
 169
 170	/* Apply filter */
 171	flt = &hci_pi(sk)->filter;
 172
 173	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 174
 175	if (!test_bit(flt_type, &flt->type_mask))
 176		return true;
 177
 178	/* Extra filter for event packets only */
 179	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 180		return false;
 181
 182	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 183
 184	if (!hci_test_bit(flt_event, &flt->event_mask))
 185		return true;
 186
 187	/* Check filter only when opcode is set */
 188	if (!flt->opcode)
 189		return false;
 190
 191	if (flt_event == HCI_EV_CMD_COMPLETE &&
 192	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 193		return true;
 194
 195	if (flt_event == HCI_EV_CMD_STATUS &&
 196	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 197		return true;
 198
 199	return false;
 200}
 201
 202/* Send frame to RAW socket */
 203void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 204{
 205	struct sock *sk;
 206	struct sk_buff *skb_copy = NULL;
 207
 208	BT_DBG("hdev %p len %d", hdev, skb->len);
 209
 210	read_lock(&hci_sk_list.lock);
 211
 212	sk_for_each(sk, &hci_sk_list.head) {
 213		struct sk_buff *nskb;
 214
 215		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 216			continue;
 217
 218		/* Don't send frame to the socket it came from */
 219		if (skb->sk == sk)
 220			continue;
 221
 222		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 223			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 224			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 225			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 226			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 227			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 228				continue;
 229			if (is_filtered_packet(sk, skb))
 230				continue;
 231		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 232			if (!bt_cb(skb)->incoming)
 233				continue;
 234			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 235			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 236			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 237			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 238				continue;
 239		} else {
 240			/* Don't send frame to other channel types */
 241			continue;
 242		}
 243
 244		if (!skb_copy) {
 245			/* Create a private copy with headroom */
 246			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 247			if (!skb_copy)
 248				continue;
 249
 250			/* Put type byte before the data */
 251			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 252		}
 253
 254		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 255		if (!nskb)
 256			continue;
 257
 258		if (sock_queue_rcv_skb(sk, nskb))
 259			kfree_skb(nskb);
 260	}
 261
 262	read_unlock(&hci_sk_list.lock);
 263
 264	kfree_skb(skb_copy);
 265}
 266
 267/* Send frame to sockets with specific channel */
 268static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 269				  int flag, struct sock *skip_sk)
 270{
 271	struct sock *sk;
 272
 273	BT_DBG("channel %u len %d", channel, skb->len);
 274
 
 
 275	sk_for_each(sk, &hci_sk_list.head) {
 276		struct sk_buff *nskb;
 277
 278		/* Ignore socket without the flag set */
 279		if (!hci_sock_test_flag(sk, flag))
 280			continue;
 281
 282		/* Skip the original socket */
 283		if (sk == skip_sk)
 284			continue;
 285
 286		if (sk->sk_state != BT_BOUND)
 287			continue;
 288
 289		if (hci_pi(sk)->channel != channel)
 290			continue;
 291
 292		nskb = skb_clone(skb, GFP_ATOMIC);
 293		if (!nskb)
 294			continue;
 295
 296		if (sock_queue_rcv_skb(sk, nskb))
 297			kfree_skb(nskb);
 298	}
 299
 300}
 301
 302void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 303			 int flag, struct sock *skip_sk)
 304{
 305	read_lock(&hci_sk_list.lock);
 306	__hci_send_to_channel(channel, skb, flag, skip_sk);
 307	read_unlock(&hci_sk_list.lock);
 308}
 309
 310/* Send frame to monitor socket */
 311void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 312{
 313	struct sk_buff *skb_copy = NULL;
 314	struct hci_mon_hdr *hdr;
 315	__le16 opcode;
 316
 317	if (!atomic_read(&monitor_promisc))
 318		return;
 319
 320	BT_DBG("hdev %p len %d", hdev, skb->len);
 321
 322	switch (hci_skb_pkt_type(skb)) {
 323	case HCI_COMMAND_PKT:
 324		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 325		break;
 326	case HCI_EVENT_PKT:
 327		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 328		break;
 329	case HCI_ACLDATA_PKT:
 330		if (bt_cb(skb)->incoming)
 331			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 332		else
 333			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 334		break;
 335	case HCI_SCODATA_PKT:
 336		if (bt_cb(skb)->incoming)
 337			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 338		else
 339			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 340		break;
 341	case HCI_ISODATA_PKT:
 342		if (bt_cb(skb)->incoming)
 343			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
 344		else
 345			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
 346		break;
 347	case HCI_DIAG_PKT:
 348		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 349		break;
 350	default:
 351		return;
 352	}
 353
 354	/* Create a private copy with headroom */
 355	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 356	if (!skb_copy)
 357		return;
 358
 359	/* Put header before the data */
 360	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
 361	hdr->opcode = opcode;
 362	hdr->index = cpu_to_le16(hdev->id);
 363	hdr->len = cpu_to_le16(skb->len);
 364
 365	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 366			    HCI_SOCK_TRUSTED, NULL);
 367	kfree_skb(skb_copy);
 368}
 369
 370void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 371				 void *data, u16 data_len, ktime_t tstamp,
 372				 int flag, struct sock *skip_sk)
 373{
 374	struct sock *sk;
 375	__le16 index;
 376
 377	if (hdev)
 378		index = cpu_to_le16(hdev->id);
 379	else
 380		index = cpu_to_le16(MGMT_INDEX_NONE);
 381
 382	read_lock(&hci_sk_list.lock);
 383
 384	sk_for_each(sk, &hci_sk_list.head) {
 385		struct hci_mon_hdr *hdr;
 386		struct sk_buff *skb;
 387
 388		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 389			continue;
 390
 391		/* Ignore socket without the flag set */
 392		if (!hci_sock_test_flag(sk, flag))
 393			continue;
 394
 395		/* Skip the original socket */
 396		if (sk == skip_sk)
 397			continue;
 398
 399		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 400		if (!skb)
 401			continue;
 402
 403		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 404		put_unaligned_le16(event, skb_put(skb, 2));
 405
 406		if (data)
 407			skb_put_data(skb, data, data_len);
 408
 409		skb->tstamp = tstamp;
 410
 411		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 412		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 413		hdr->index = index;
 414		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 415
 416		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 417				      HCI_SOCK_TRUSTED, NULL);
 418		kfree_skb(skb);
 419	}
 420
 421	read_unlock(&hci_sk_list.lock);
 422}
 423
 424static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 425{
 426	struct hci_mon_hdr *hdr;
 427	struct hci_mon_new_index *ni;
 428	struct hci_mon_index_info *ii;
 429	struct sk_buff *skb;
 430	__le16 opcode;
 431
 432	switch (event) {
 433	case HCI_DEV_REG:
 434		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 435		if (!skb)
 436			return NULL;
 437
 438		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 439		ni->type = hdev->dev_type;
 440		ni->bus = hdev->bus;
 441		bacpy(&ni->bdaddr, &hdev->bdaddr);
 442		memcpy(ni->name, hdev->name, 8);
 443
 444		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 445		break;
 446
 447	case HCI_DEV_UNREG:
 448		skb = bt_skb_alloc(0, GFP_ATOMIC);
 449		if (!skb)
 450			return NULL;
 451
 452		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 453		break;
 454
 455	case HCI_DEV_SETUP:
 456		if (hdev->manufacturer == 0xffff)
 457			return NULL;
 458		fallthrough;
 
 459
 460	case HCI_DEV_UP:
 461		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 462		if (!skb)
 463			return NULL;
 464
 465		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 466		bacpy(&ii->bdaddr, &hdev->bdaddr);
 467		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 468
 469		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 470		break;
 471
 472	case HCI_DEV_OPEN:
 473		skb = bt_skb_alloc(0, GFP_ATOMIC);
 474		if (!skb)
 475			return NULL;
 476
 477		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 478		break;
 479
 480	case HCI_DEV_CLOSE:
 481		skb = bt_skb_alloc(0, GFP_ATOMIC);
 482		if (!skb)
 483			return NULL;
 484
 485		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 486		break;
 487
 488	default:
 489		return NULL;
 490	}
 491
 492	__net_timestamp(skb);
 493
 494	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 495	hdr->opcode = opcode;
 496	hdr->index = cpu_to_le16(hdev->id);
 497	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 498
 499	return skb;
 500}
 501
 502static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 503{
 504	struct hci_mon_hdr *hdr;
 505	struct sk_buff *skb;
 506	u16 format;
 507	u8 ver[3];
 508	u32 flags;
 509
 510	/* No message needed when cookie is not present */
 511	if (!hci_pi(sk)->cookie)
 512		return NULL;
 513
 514	switch (hci_pi(sk)->channel) {
 515	case HCI_CHANNEL_RAW:
 516		format = 0x0000;
 517		ver[0] = BT_SUBSYS_VERSION;
 518		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 519		break;
 520	case HCI_CHANNEL_USER:
 521		format = 0x0001;
 522		ver[0] = BT_SUBSYS_VERSION;
 523		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 524		break;
 525	case HCI_CHANNEL_CONTROL:
 526		format = 0x0002;
 527		mgmt_fill_version_info(ver);
 528		break;
 529	default:
 530		/* No message for unsupported format */
 531		return NULL;
 532	}
 533
 534	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
 535	if (!skb)
 536		return NULL;
 537
 538	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 539
 540	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 541	put_unaligned_le16(format, skb_put(skb, 2));
 542	skb_put_data(skb, ver, sizeof(ver));
 543	put_unaligned_le32(flags, skb_put(skb, 4));
 544	skb_put_u8(skb, TASK_COMM_LEN);
 545	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
 546
 547	__net_timestamp(skb);
 548
 549	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 550	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 551	if (hci_pi(sk)->hdev)
 552		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 553	else
 554		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 555	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 556
 557	return skb;
 558}
 559
 560static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 561{
 562	struct hci_mon_hdr *hdr;
 563	struct sk_buff *skb;
 564
 565	/* No message needed when cookie is not present */
 566	if (!hci_pi(sk)->cookie)
 567		return NULL;
 568
 569	switch (hci_pi(sk)->channel) {
 570	case HCI_CHANNEL_RAW:
 571	case HCI_CHANNEL_USER:
 572	case HCI_CHANNEL_CONTROL:
 573		break;
 574	default:
 575		/* No message for unsupported format */
 576		return NULL;
 577	}
 578
 579	skb = bt_skb_alloc(4, GFP_ATOMIC);
 580	if (!skb)
 581		return NULL;
 582
 583	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 584
 585	__net_timestamp(skb);
 586
 587	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 588	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 589	if (hci_pi(sk)->hdev)
 590		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 591	else
 592		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 593	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 594
 595	return skb;
 596}
 597
 598static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 599						   u16 opcode, u16 len,
 600						   const void *buf)
 601{
 602	struct hci_mon_hdr *hdr;
 603	struct sk_buff *skb;
 604
 605	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 606	if (!skb)
 607		return NULL;
 608
 609	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 610	put_unaligned_le16(opcode, skb_put(skb, 2));
 611
 612	if (buf)
 613		skb_put_data(skb, buf, len);
 614
 615	__net_timestamp(skb);
 616
 617	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 618	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 619	hdr->index = cpu_to_le16(index);
 620	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 621
 622	return skb;
 623}
 624
 625static void __printf(2, 3)
 626send_monitor_note(struct sock *sk, const char *fmt, ...)
 627{
 628	size_t len;
 629	struct hci_mon_hdr *hdr;
 630	struct sk_buff *skb;
 631	va_list args;
 632
 633	va_start(args, fmt);
 634	len = vsnprintf(NULL, 0, fmt, args);
 635	va_end(args);
 636
 637	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 638	if (!skb)
 639		return;
 640
 641	va_start(args, fmt);
 642	vsprintf(skb_put(skb, len), fmt, args);
 643	*(u8 *)skb_put(skb, 1) = 0;
 644	va_end(args);
 645
 646	__net_timestamp(skb);
 647
 648	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 649	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 650	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 651	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 652
 653	if (sock_queue_rcv_skb(sk, skb))
 654		kfree_skb(skb);
 655}
 656
 657static void send_monitor_replay(struct sock *sk)
 658{
 659	struct hci_dev *hdev;
 660
 661	read_lock(&hci_dev_list_lock);
 662
 663	list_for_each_entry(hdev, &hci_dev_list, list) {
 664		struct sk_buff *skb;
 665
 666		skb = create_monitor_event(hdev, HCI_DEV_REG);
 667		if (!skb)
 668			continue;
 669
 670		if (sock_queue_rcv_skb(sk, skb))
 671			kfree_skb(skb);
 672
 673		if (!test_bit(HCI_RUNNING, &hdev->flags))
 674			continue;
 675
 676		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 677		if (!skb)
 678			continue;
 679
 680		if (sock_queue_rcv_skb(sk, skb))
 681			kfree_skb(skb);
 682
 683		if (test_bit(HCI_UP, &hdev->flags))
 684			skb = create_monitor_event(hdev, HCI_DEV_UP);
 685		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 686			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 687		else
 688			skb = NULL;
 689
 690		if (skb) {
 691			if (sock_queue_rcv_skb(sk, skb))
 692				kfree_skb(skb);
 693		}
 694	}
 695
 696	read_unlock(&hci_dev_list_lock);
 697}
 698
 699static void send_monitor_control_replay(struct sock *mon_sk)
 700{
 701	struct sock *sk;
 702
 703	read_lock(&hci_sk_list.lock);
 704
 705	sk_for_each(sk, &hci_sk_list.head) {
 706		struct sk_buff *skb;
 707
 708		skb = create_monitor_ctrl_open(sk);
 709		if (!skb)
 710			continue;
 711
 712		if (sock_queue_rcv_skb(mon_sk, skb))
 713			kfree_skb(skb);
 714	}
 715
 716	read_unlock(&hci_sk_list.lock);
 717}
 718
 719/* Generate internal stack event */
 720static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 721{
 722	struct hci_event_hdr *hdr;
 723	struct hci_ev_stack_internal *ev;
 724	struct sk_buff *skb;
 725
 726	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 727	if (!skb)
 728		return;
 729
 730	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
 731	hdr->evt  = HCI_EV_STACK_INTERNAL;
 732	hdr->plen = sizeof(*ev) + dlen;
 733
 734	ev = skb_put(skb, sizeof(*ev) + dlen);
 735	ev->type = type;
 736	memcpy(ev->data, data, dlen);
 737
 738	bt_cb(skb)->incoming = 1;
 739	__net_timestamp(skb);
 740
 741	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 742	hci_send_to_sock(hdev, skb);
 743	kfree_skb(skb);
 744}
 745
 746void hci_sock_dev_event(struct hci_dev *hdev, int event)
 747{
 748	BT_DBG("hdev %s event %d", hdev->name, event);
 749
 750	if (atomic_read(&monitor_promisc)) {
 751		struct sk_buff *skb;
 752
 753		/* Send event to monitor */
 754		skb = create_monitor_event(hdev, event);
 755		if (skb) {
 756			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 757					    HCI_SOCK_TRUSTED, NULL);
 758			kfree_skb(skb);
 759		}
 760	}
 761
 762	if (event <= HCI_DEV_DOWN) {
 763		struct hci_ev_si_device ev;
 764
 765		/* Send event to sockets */
 766		ev.event  = event;
 767		ev.dev_id = hdev->id;
 768		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 769	}
 770
 771	if (event == HCI_DEV_UNREG) {
 772		struct sock *sk;
 773
 774		/* Wake up sockets using this dead device */
 775		read_lock(&hci_sk_list.lock);
 776		sk_for_each(sk, &hci_sk_list.head) {
 
 777			if (hci_pi(sk)->hdev == hdev) {
 
 778				sk->sk_err = EPIPE;
 
 779				sk->sk_state_change(sk);
 
 
 780			}
 
 781		}
 782		read_unlock(&hci_sk_list.lock);
 783	}
 784}
 785
 786static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 787{
 788	struct hci_mgmt_chan *c;
 789
 790	list_for_each_entry(c, &mgmt_chan_list, list) {
 791		if (c->channel == channel)
 792			return c;
 793	}
 794
 795	return NULL;
 796}
 797
 798static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 799{
 800	struct hci_mgmt_chan *c;
 801
 802	mutex_lock(&mgmt_chan_list_lock);
 803	c = __hci_mgmt_chan_find(channel);
 804	mutex_unlock(&mgmt_chan_list_lock);
 805
 806	return c;
 807}
 808
 809int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 810{
 811	if (c->channel < HCI_CHANNEL_CONTROL)
 812		return -EINVAL;
 813
 814	mutex_lock(&mgmt_chan_list_lock);
 815	if (__hci_mgmt_chan_find(c->channel)) {
 816		mutex_unlock(&mgmt_chan_list_lock);
 817		return -EALREADY;
 818	}
 819
 820	list_add_tail(&c->list, &mgmt_chan_list);
 821
 822	mutex_unlock(&mgmt_chan_list_lock);
 823
 824	return 0;
 825}
 826EXPORT_SYMBOL(hci_mgmt_chan_register);
 827
 828void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 829{
 830	mutex_lock(&mgmt_chan_list_lock);
 831	list_del(&c->list);
 832	mutex_unlock(&mgmt_chan_list_lock);
 833}
 834EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 835
 836static int hci_sock_release(struct socket *sock)
 837{
 838	struct sock *sk = sock->sk;
 839	struct hci_dev *hdev;
 840	struct sk_buff *skb;
 841
 842	BT_DBG("sock %p sk %p", sock, sk);
 843
 844	if (!sk)
 845		return 0;
 846
 847	lock_sock(sk);
 848
 849	switch (hci_pi(sk)->channel) {
 850	case HCI_CHANNEL_MONITOR:
 851		atomic_dec(&monitor_promisc);
 852		break;
 853	case HCI_CHANNEL_RAW:
 854	case HCI_CHANNEL_USER:
 855	case HCI_CHANNEL_CONTROL:
 856		/* Send event to monitor */
 857		skb = create_monitor_ctrl_close(sk);
 858		if (skb) {
 859			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 860					    HCI_SOCK_TRUSTED, NULL);
 861			kfree_skb(skb);
 862		}
 863
 864		hci_sock_free_cookie(sk);
 865		break;
 866	}
 867
 868	bt_sock_unlink(&hci_sk_list, sk);
 869
 870	hdev = hci_pi(sk)->hdev;
 871	if (hdev) {
 872		if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
 873		    !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
 874			/* When releasing a user channel exclusive access,
 875			 * call hci_dev_do_close directly instead of calling
 876			 * hci_dev_close to ensure the exclusive access will
 877			 * be released and the controller brought back down.
 878			 *
 879			 * The checking of HCI_AUTO_OFF is not needed in this
 880			 * case since it will have been cleared already when
 881			 * opening the user channel.
 882			 *
 883			 * Make sure to also check that we haven't already
 884			 * unregistered since all the cleanup will have already
 885			 * been complete and hdev will get released when we put
 886			 * below.
 887			 */
 888			hci_dev_do_close(hdev);
 889			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 890			mgmt_index_added(hdev);
 891		}
 892
 893		atomic_dec(&hdev->promisc);
 894		hci_dev_put(hdev);
 895	}
 896
 897	sock_orphan(sk);
 898	release_sock(sk);
 
 
 
 899	sock_put(sk);
 900	return 0;
 901}
 902
 903static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
 904{
 905	bdaddr_t bdaddr;
 906	int err;
 907
 908	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 909		return -EFAULT;
 910
 911	hci_dev_lock(hdev);
 912
 913	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 914
 915	hci_dev_unlock(hdev);
 916
 917	return err;
 918}
 919
 920static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
 921{
 922	bdaddr_t bdaddr;
 923	int err;
 924
 925	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 926		return -EFAULT;
 927
 928	hci_dev_lock(hdev);
 929
 930	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 931
 932	hci_dev_unlock(hdev);
 933
 934	return err;
 935}
 936
 937/* Ioctls that require bound socket */
 938static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 939				unsigned long arg)
 940{
 941	struct hci_dev *hdev = hci_hdev_from_sock(sk);
 942
 943	if (IS_ERR(hdev))
 944		return PTR_ERR(hdev);
 945
 946	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 947		return -EBUSY;
 948
 949	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 950		return -EOPNOTSUPP;
 951
 952	if (hdev->dev_type != HCI_PRIMARY)
 953		return -EOPNOTSUPP;
 954
 955	switch (cmd) {
 956	case HCISETRAW:
 957		if (!capable(CAP_NET_ADMIN))
 958			return -EPERM;
 959		return -EOPNOTSUPP;
 960
 961	case HCIGETCONNINFO:
 962		return hci_get_conn_info(hdev, (void __user *)arg);
 963
 964	case HCIGETAUTHINFO:
 965		return hci_get_auth_info(hdev, (void __user *)arg);
 966
 967	case HCIBLOCKADDR:
 968		if (!capable(CAP_NET_ADMIN))
 969			return -EPERM;
 970		return hci_sock_reject_list_add(hdev, (void __user *)arg);
 971
 972	case HCIUNBLOCKADDR:
 973		if (!capable(CAP_NET_ADMIN))
 974			return -EPERM;
 975		return hci_sock_reject_list_del(hdev, (void __user *)arg);
 976	}
 977
 978	return -ENOIOCTLCMD;
 979}
 980
 981static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 982			  unsigned long arg)
 983{
 984	void __user *argp = (void __user *)arg;
 985	struct sock *sk = sock->sk;
 986	int err;
 987
 988	BT_DBG("cmd %x arg %lx", cmd, arg);
 989
 990	lock_sock(sk);
 991
 992	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 993		err = -EBADFD;
 994		goto done;
 995	}
 996
 997	/* When calling an ioctl on an unbound raw socket, then ensure
 998	 * that the monitor gets informed. Ensure that the resulting event
 999	 * is only send once by checking if the cookie exists or not. The
1000	 * socket cookie will be only ever generated once for the lifetime
1001	 * of a given socket.
1002	 */
1003	if (hci_sock_gen_cookie(sk)) {
1004		struct sk_buff *skb;
1005
1006		if (capable(CAP_NET_ADMIN))
1007			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1008
1009		/* Send event to monitor */
1010		skb = create_monitor_ctrl_open(sk);
1011		if (skb) {
1012			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1013					    HCI_SOCK_TRUSTED, NULL);
1014			kfree_skb(skb);
1015		}
1016	}
1017
1018	release_sock(sk);
1019
1020	switch (cmd) {
1021	case HCIGETDEVLIST:
1022		return hci_get_dev_list(argp);
1023
1024	case HCIGETDEVINFO:
1025		return hci_get_dev_info(argp);
1026
1027	case HCIGETCONNLIST:
1028		return hci_get_conn_list(argp);
1029
1030	case HCIDEVUP:
1031		if (!capable(CAP_NET_ADMIN))
1032			return -EPERM;
1033		return hci_dev_open(arg);
1034
1035	case HCIDEVDOWN:
1036		if (!capable(CAP_NET_ADMIN))
1037			return -EPERM;
1038		return hci_dev_close(arg);
1039
1040	case HCIDEVRESET:
1041		if (!capable(CAP_NET_ADMIN))
1042			return -EPERM;
1043		return hci_dev_reset(arg);
1044
1045	case HCIDEVRESTAT:
1046		if (!capable(CAP_NET_ADMIN))
1047			return -EPERM;
1048		return hci_dev_reset_stat(arg);
1049
1050	case HCISETSCAN:
1051	case HCISETAUTH:
1052	case HCISETENCRYPT:
1053	case HCISETPTYPE:
1054	case HCISETLINKPOL:
1055	case HCISETLINKMODE:
1056	case HCISETACLMTU:
1057	case HCISETSCOMTU:
1058		if (!capable(CAP_NET_ADMIN))
1059			return -EPERM;
1060		return hci_dev_cmd(cmd, argp);
1061
1062	case HCIINQUIRY:
1063		return hci_inquiry(argp);
1064	}
1065
1066	lock_sock(sk);
1067
1068	err = hci_sock_bound_ioctl(sk, cmd, arg);
1069
1070done:
1071	release_sock(sk);
1072	return err;
1073}
1074
1075#ifdef CONFIG_COMPAT
1076static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1077				 unsigned long arg)
1078{
1079	switch (cmd) {
1080	case HCIDEVUP:
1081	case HCIDEVDOWN:
1082	case HCIDEVRESET:
1083	case HCIDEVRESTAT:
1084		return hci_sock_ioctl(sock, cmd, arg);
1085	}
1086
1087	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1088}
1089#endif
1090
1091static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1092			 int addr_len)
1093{
1094	struct sockaddr_hci haddr;
1095	struct sock *sk = sock->sk;
1096	struct hci_dev *hdev = NULL;
1097	struct sk_buff *skb;
1098	int len, err = 0;
1099
1100	BT_DBG("sock %p sk %p", sock, sk);
1101
1102	if (!addr)
1103		return -EINVAL;
1104
1105	memset(&haddr, 0, sizeof(haddr));
1106	len = min_t(unsigned int, sizeof(haddr), addr_len);
1107	memcpy(&haddr, addr, len);
1108
1109	if (haddr.hci_family != AF_BLUETOOTH)
1110		return -EINVAL;
1111
1112	lock_sock(sk);
1113
1114	/* Allow detaching from dead device and attaching to alive device, if
1115	 * the caller wants to re-bind (instead of close) this socket in
1116	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1117	 */
1118	hdev = hci_pi(sk)->hdev;
1119	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1120		hci_pi(sk)->hdev = NULL;
1121		sk->sk_state = BT_OPEN;
1122		hci_dev_put(hdev);
1123	}
1124	hdev = NULL;
1125
1126	if (sk->sk_state == BT_BOUND) {
1127		err = -EALREADY;
1128		goto done;
1129	}
1130
1131	switch (haddr.hci_channel) {
1132	case HCI_CHANNEL_RAW:
1133		if (hci_pi(sk)->hdev) {
1134			err = -EALREADY;
1135			goto done;
1136		}
1137
1138		if (haddr.hci_dev != HCI_DEV_NONE) {
1139			hdev = hci_dev_get(haddr.hci_dev);
1140			if (!hdev) {
1141				err = -ENODEV;
1142				goto done;
1143			}
1144
1145			atomic_inc(&hdev->promisc);
1146		}
1147
1148		hci_pi(sk)->channel = haddr.hci_channel;
1149
1150		if (!hci_sock_gen_cookie(sk)) {
1151			/* In the case when a cookie has already been assigned,
1152			 * then there has been already an ioctl issued against
1153			 * an unbound socket and with that triggered an open
1154			 * notification. Send a close notification first to
1155			 * allow the state transition to bounded.
1156			 */
1157			skb = create_monitor_ctrl_close(sk);
1158			if (skb) {
1159				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1160						    HCI_SOCK_TRUSTED, NULL);
1161				kfree_skb(skb);
1162			}
1163		}
1164
1165		if (capable(CAP_NET_ADMIN))
1166			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1167
1168		hci_pi(sk)->hdev = hdev;
1169
1170		/* Send event to monitor */
1171		skb = create_monitor_ctrl_open(sk);
1172		if (skb) {
1173			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1174					    HCI_SOCK_TRUSTED, NULL);
1175			kfree_skb(skb);
1176		}
1177		break;
1178
1179	case HCI_CHANNEL_USER:
1180		if (hci_pi(sk)->hdev) {
1181			err = -EALREADY;
1182			goto done;
1183		}
1184
1185		if (haddr.hci_dev == HCI_DEV_NONE) {
1186			err = -EINVAL;
1187			goto done;
1188		}
1189
1190		if (!capable(CAP_NET_ADMIN)) {
1191			err = -EPERM;
1192			goto done;
1193		}
1194
1195		hdev = hci_dev_get(haddr.hci_dev);
1196		if (!hdev) {
1197			err = -ENODEV;
1198			goto done;
1199		}
1200
1201		if (test_bit(HCI_INIT, &hdev->flags) ||
1202		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1203		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1204		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1205		     test_bit(HCI_UP, &hdev->flags))) {
1206			err = -EBUSY;
1207			hci_dev_put(hdev);
1208			goto done;
1209		}
1210
1211		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1212			err = -EUSERS;
1213			hci_dev_put(hdev);
1214			goto done;
1215		}
1216
1217		mgmt_index_removed(hdev);
1218
1219		err = hci_dev_open(hdev->id);
1220		if (err) {
1221			if (err == -EALREADY) {
1222				/* In case the transport is already up and
1223				 * running, clear the error here.
1224				 *
1225				 * This can happen when opening a user
1226				 * channel and HCI_AUTO_OFF grace period
1227				 * is still active.
1228				 */
1229				err = 0;
1230			} else {
1231				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1232				mgmt_index_added(hdev);
1233				hci_dev_put(hdev);
1234				goto done;
1235			}
1236		}
1237
1238		hci_pi(sk)->channel = haddr.hci_channel;
1239
1240		if (!hci_sock_gen_cookie(sk)) {
1241			/* In the case when a cookie has already been assigned,
1242			 * this socket will transition from a raw socket into
1243			 * a user channel socket. For a clean transition, send
1244			 * the close notification first.
1245			 */
1246			skb = create_monitor_ctrl_close(sk);
1247			if (skb) {
1248				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1249						    HCI_SOCK_TRUSTED, NULL);
1250				kfree_skb(skb);
1251			}
1252		}
1253
1254		/* The user channel is restricted to CAP_NET_ADMIN
1255		 * capabilities and with that implicitly trusted.
1256		 */
1257		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1258
1259		hci_pi(sk)->hdev = hdev;
1260
1261		/* Send event to monitor */
1262		skb = create_monitor_ctrl_open(sk);
1263		if (skb) {
1264			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1265					    HCI_SOCK_TRUSTED, NULL);
1266			kfree_skb(skb);
1267		}
1268
1269		atomic_inc(&hdev->promisc);
1270		break;
1271
1272	case HCI_CHANNEL_MONITOR:
1273		if (haddr.hci_dev != HCI_DEV_NONE) {
1274			err = -EINVAL;
1275			goto done;
1276		}
1277
1278		if (!capable(CAP_NET_RAW)) {
1279			err = -EPERM;
1280			goto done;
1281		}
1282
1283		hci_pi(sk)->channel = haddr.hci_channel;
1284
1285		/* The monitor interface is restricted to CAP_NET_RAW
1286		 * capabilities and with that implicitly trusted.
1287		 */
1288		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1289
1290		send_monitor_note(sk, "Linux version %s (%s)",
1291				  init_utsname()->release,
1292				  init_utsname()->machine);
1293		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1294				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1295		send_monitor_replay(sk);
1296		send_monitor_control_replay(sk);
1297
1298		atomic_inc(&monitor_promisc);
1299		break;
1300
1301	case HCI_CHANNEL_LOGGING:
1302		if (haddr.hci_dev != HCI_DEV_NONE) {
1303			err = -EINVAL;
1304			goto done;
1305		}
1306
1307		if (!capable(CAP_NET_ADMIN)) {
1308			err = -EPERM;
1309			goto done;
1310		}
1311
1312		hci_pi(sk)->channel = haddr.hci_channel;
1313		break;
1314
1315	default:
1316		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1317			err = -EINVAL;
1318			goto done;
1319		}
1320
1321		if (haddr.hci_dev != HCI_DEV_NONE) {
1322			err = -EINVAL;
1323			goto done;
1324		}
1325
1326		/* Users with CAP_NET_ADMIN capabilities are allowed
1327		 * access to all management commands and events. For
1328		 * untrusted users the interface is restricted and
1329		 * also only untrusted events are sent.
1330		 */
1331		if (capable(CAP_NET_ADMIN))
1332			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1333
1334		hci_pi(sk)->channel = haddr.hci_channel;
1335
1336		/* At the moment the index and unconfigured index events
1337		 * are enabled unconditionally. Setting them on each
1338		 * socket when binding keeps this functionality. They
1339		 * however might be cleared later and then sending of these
1340		 * events will be disabled, but that is then intentional.
1341		 *
1342		 * This also enables generic events that are safe to be
1343		 * received by untrusted users. Example for such events
1344		 * are changes to settings, class of device, name etc.
1345		 */
1346		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1347			if (!hci_sock_gen_cookie(sk)) {
1348				/* In the case when a cookie has already been
1349				 * assigned, this socket will transition from
1350				 * a raw socket into a control socket. To
1351				 * allow for a clean transition, send the
1352				 * close notification first.
1353				 */
1354				skb = create_monitor_ctrl_close(sk);
1355				if (skb) {
1356					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1357							    HCI_SOCK_TRUSTED, NULL);
1358					kfree_skb(skb);
1359				}
1360			}
1361
1362			/* Send event to monitor */
1363			skb = create_monitor_ctrl_open(sk);
1364			if (skb) {
1365				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1366						    HCI_SOCK_TRUSTED, NULL);
1367				kfree_skb(skb);
1368			}
1369
1370			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1371			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1372			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1373			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1374			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1375			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1376		}
1377		break;
1378	}
1379
1380	/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1381	if (!hci_pi(sk)->mtu)
1382		hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1383
 
1384	sk->sk_state = BT_BOUND;
1385
1386done:
1387	release_sock(sk);
1388	return err;
1389}
1390
1391static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1392			    int peer)
1393{
1394	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1395	struct sock *sk = sock->sk;
1396	struct hci_dev *hdev;
1397	int err = 0;
1398
1399	BT_DBG("sock %p sk %p", sock, sk);
1400
1401	if (peer)
1402		return -EOPNOTSUPP;
1403
1404	lock_sock(sk);
1405
1406	hdev = hci_hdev_from_sock(sk);
1407	if (IS_ERR(hdev)) {
1408		err = PTR_ERR(hdev);
1409		goto done;
1410	}
1411
 
1412	haddr->hci_family = AF_BLUETOOTH;
1413	haddr->hci_dev    = hdev->id;
1414	haddr->hci_channel= hci_pi(sk)->channel;
1415	err = sizeof(*haddr);
1416
1417done:
1418	release_sock(sk);
1419	return err;
1420}
1421
1422static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1423			  struct sk_buff *skb)
1424{
1425	__u8 mask = hci_pi(sk)->cmsg_mask;
1426
1427	if (mask & HCI_CMSG_DIR) {
1428		int incoming = bt_cb(skb)->incoming;
1429		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1430			 &incoming);
1431	}
1432
1433	if (mask & HCI_CMSG_TSTAMP) {
1434#ifdef CONFIG_COMPAT
1435		struct old_timeval32 ctv;
1436#endif
1437		struct __kernel_old_timeval tv;
1438		void *data;
1439		int len;
1440
1441		skb_get_timestamp(skb, &tv);
1442
1443		data = &tv;
1444		len = sizeof(tv);
1445#ifdef CONFIG_COMPAT
1446		if (!COMPAT_USE_64BIT_TIME &&
1447		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1448			ctv.tv_sec = tv.tv_sec;
1449			ctv.tv_usec = tv.tv_usec;
1450			data = &ctv;
1451			len = sizeof(ctv);
1452		}
1453#endif
1454
1455		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1456	}
1457}
1458
1459static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1460			    size_t len, int flags)
1461{
 
1462	struct sock *sk = sock->sk;
1463	struct sk_buff *skb;
1464	int copied, err;
1465	unsigned int skblen;
1466
1467	BT_DBG("sock %p, sk %p", sock, sk);
1468
1469	if (flags & MSG_OOB)
1470		return -EOPNOTSUPP;
1471
1472	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1473		return -EOPNOTSUPP;
1474
1475	if (sk->sk_state == BT_CLOSED)
1476		return 0;
1477
1478	skb = skb_recv_datagram(sk, flags, &err);
1479	if (!skb)
1480		return err;
1481
1482	skblen = skb->len;
1483	copied = skb->len;
1484	if (len < copied) {
1485		msg->msg_flags |= MSG_TRUNC;
1486		copied = len;
1487	}
1488
1489	skb_reset_transport_header(skb);
1490	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1491
1492	switch (hci_pi(sk)->channel) {
1493	case HCI_CHANNEL_RAW:
1494		hci_sock_cmsg(sk, msg, skb);
1495		break;
1496	case HCI_CHANNEL_USER:
1497	case HCI_CHANNEL_MONITOR:
1498		sock_recv_timestamp(msg, sk, skb);
1499		break;
1500	default:
1501		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1502			sock_recv_timestamp(msg, sk, skb);
1503		break;
1504	}
1505
1506	skb_free_datagram(sk, skb);
1507
1508	if (flags & MSG_TRUNC)
1509		copied = skblen;
1510
1511	return err ? : copied;
1512}
1513
1514static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1515			struct sk_buff *skb)
1516{
 
1517	u8 *cp;
1518	struct mgmt_hdr *hdr;
1519	u16 opcode, index, len;
1520	struct hci_dev *hdev = NULL;
1521	const struct hci_mgmt_handler *handler;
1522	bool var_len, no_hdev;
1523	int err;
1524
1525	BT_DBG("got %d bytes", skb->len);
1526
1527	if (skb->len < sizeof(*hdr))
1528		return -EINVAL;
1529
1530	hdr = (void *)skb->data;
 
 
 
 
 
 
 
 
 
1531	opcode = __le16_to_cpu(hdr->opcode);
1532	index = __le16_to_cpu(hdr->index);
1533	len = __le16_to_cpu(hdr->len);
1534
1535	if (len != skb->len - sizeof(*hdr)) {
1536		err = -EINVAL;
1537		goto done;
1538	}
1539
1540	if (chan->channel == HCI_CHANNEL_CONTROL) {
1541		struct sk_buff *cmd;
1542
1543		/* Send event to monitor */
1544		cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1545						  skb->data + sizeof(*hdr));
1546		if (cmd) {
1547			hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1548					    HCI_SOCK_TRUSTED, NULL);
1549			kfree_skb(cmd);
1550		}
1551	}
1552
1553	if (opcode >= chan->handler_count ||
1554	    chan->handlers[opcode].func == NULL) {
1555		BT_DBG("Unknown op %u", opcode);
1556		err = mgmt_cmd_status(sk, index, opcode,
1557				      MGMT_STATUS_UNKNOWN_COMMAND);
1558		goto done;
1559	}
1560
1561	handler = &chan->handlers[opcode];
1562
1563	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1564	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1565		err = mgmt_cmd_status(sk, index, opcode,
1566				      MGMT_STATUS_PERMISSION_DENIED);
1567		goto done;
1568	}
1569
1570	if (index != MGMT_INDEX_NONE) {
1571		hdev = hci_dev_get(index);
1572		if (!hdev) {
1573			err = mgmt_cmd_status(sk, index, opcode,
1574					      MGMT_STATUS_INVALID_INDEX);
1575			goto done;
1576		}
1577
1578		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1579		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1580		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1581			err = mgmt_cmd_status(sk, index, opcode,
1582					      MGMT_STATUS_INVALID_INDEX);
1583			goto done;
1584		}
1585
1586		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1587		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1588			err = mgmt_cmd_status(sk, index, opcode,
1589					      MGMT_STATUS_INVALID_INDEX);
1590			goto done;
1591		}
1592	}
1593
1594	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1595		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1596		if (no_hdev != !hdev) {
1597			err = mgmt_cmd_status(sk, index, opcode,
1598					      MGMT_STATUS_INVALID_INDEX);
1599			goto done;
1600		}
1601	}
1602
1603	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1604	if ((var_len && len < handler->data_len) ||
1605	    (!var_len && len != handler->data_len)) {
1606		err = mgmt_cmd_status(sk, index, opcode,
1607				      MGMT_STATUS_INVALID_PARAMS);
1608		goto done;
1609	}
1610
1611	if (hdev && chan->hdev_init)
1612		chan->hdev_init(sk, hdev);
1613
1614	cp = skb->data + sizeof(*hdr);
1615
1616	err = handler->func(sk, hdev, cp, len);
1617	if (err < 0)
1618		goto done;
1619
1620	err = skb->len;
1621
1622done:
1623	if (hdev)
1624		hci_dev_put(hdev);
1625
 
1626	return err;
1627}
1628
1629static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1630			     unsigned int flags)
1631{
1632	struct hci_mon_hdr *hdr;
 
1633	struct hci_dev *hdev;
1634	u16 index;
1635	int err;
1636
1637	/* The logging frame consists at minimum of the standard header,
1638	 * the priority byte, the ident length byte and at least one string
1639	 * terminator NUL byte. Anything shorter are invalid packets.
1640	 */
1641	if (skb->len < sizeof(*hdr) + 3)
1642		return -EINVAL;
1643
 
 
 
 
 
 
 
 
 
1644	hdr = (void *)skb->data;
1645
1646	if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1647		return -EINVAL;
 
 
1648
1649	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1650		__u8 priority = skb->data[sizeof(*hdr)];
1651		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1652
1653		/* Only the priorities 0-7 are valid and with that any other
1654		 * value results in an invalid packet.
1655		 *
1656		 * The priority byte is followed by an ident length byte and
1657		 * the NUL terminated ident string. Check that the ident
1658		 * length is not overflowing the packet and also that the
1659		 * ident string itself is NUL terminated. In case the ident
1660		 * length is zero, the length value actually doubles as NUL
1661		 * terminator identifier.
1662		 *
1663		 * The message follows the ident string (if present) and
1664		 * must be NUL terminated. Otherwise it is not a valid packet.
1665		 */
1666		if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1667		    ident_len > skb->len - sizeof(*hdr) - 3 ||
1668		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1669			return -EINVAL;
 
 
1670	} else {
1671		return -EINVAL;
 
1672	}
1673
1674	index = __le16_to_cpu(hdr->index);
1675
1676	if (index != MGMT_INDEX_NONE) {
1677		hdev = hci_dev_get(index);
1678		if (!hdev)
1679			return -ENODEV;
 
 
1680	} else {
1681		hdev = NULL;
1682	}
1683
1684	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1685
1686	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1687	err = skb->len;
1688
1689	if (hdev)
1690		hci_dev_put(hdev);
1691
 
 
1692	return err;
1693}
1694
1695static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1696			    size_t len)
1697{
1698	struct sock *sk = sock->sk;
1699	struct hci_mgmt_chan *chan;
1700	struct hci_dev *hdev;
1701	struct sk_buff *skb;
1702	int err;
1703	const unsigned int flags = msg->msg_flags;
1704
1705	BT_DBG("sock %p sk %p", sock, sk);
1706
1707	if (flags & MSG_OOB)
1708		return -EOPNOTSUPP;
1709
1710	if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1711		return -EINVAL;
1712
1713	if (len < 4 || len > hci_pi(sk)->mtu)
1714		return -EINVAL;
1715
1716	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1717	if (IS_ERR(skb))
1718		return PTR_ERR(skb);
1719
1720	lock_sock(sk);
1721
1722	switch (hci_pi(sk)->channel) {
1723	case HCI_CHANNEL_RAW:
1724	case HCI_CHANNEL_USER:
1725		break;
1726	case HCI_CHANNEL_MONITOR:
1727		err = -EOPNOTSUPP;
1728		goto drop;
1729	case HCI_CHANNEL_LOGGING:
1730		err = hci_logging_frame(sk, skb, flags);
1731		goto drop;
1732	default:
1733		mutex_lock(&mgmt_chan_list_lock);
1734		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1735		if (chan)
1736			err = hci_mgmt_cmd(chan, sk, skb);
1737		else
1738			err = -EINVAL;
1739
1740		mutex_unlock(&mgmt_chan_list_lock);
1741		goto drop;
1742	}
1743
1744	hdev = hci_hdev_from_sock(sk);
1745	if (IS_ERR(hdev)) {
1746		err = PTR_ERR(hdev);
1747		goto drop;
1748	}
1749
1750	if (!test_bit(HCI_UP, &hdev->flags)) {
1751		err = -ENETDOWN;
 
 
 
 
 
 
 
 
 
1752		goto drop;
1753	}
1754
1755	hci_skb_pkt_type(skb) = skb->data[0];
1756	skb_pull(skb, 1);
1757
1758	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1759		/* No permission check is needed for user channel
1760		 * since that gets enforced when binding the socket.
1761		 *
1762		 * However check that the packet type is valid.
1763		 */
1764		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1765		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1766		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1767		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1768			err = -EINVAL;
1769			goto drop;
1770		}
1771
1772		skb_queue_tail(&hdev->raw_q, skb);
1773		queue_work(hdev->workqueue, &hdev->tx_work);
1774	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1775		u16 opcode = get_unaligned_le16(skb->data);
1776		u16 ogf = hci_opcode_ogf(opcode);
1777		u16 ocf = hci_opcode_ocf(opcode);
1778
1779		if (((ogf > HCI_SFLT_MAX_OGF) ||
1780		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1781				   &hci_sec_filter.ocf_mask[ogf])) &&
1782		    !capable(CAP_NET_RAW)) {
1783			err = -EPERM;
1784			goto drop;
1785		}
1786
1787		/* Since the opcode has already been extracted here, store
1788		 * a copy of the value for later use by the drivers.
1789		 */
1790		hci_skb_opcode(skb) = opcode;
1791
1792		if (ogf == 0x3f) {
1793			skb_queue_tail(&hdev->raw_q, skb);
1794			queue_work(hdev->workqueue, &hdev->tx_work);
1795		} else {
1796			/* Stand-alone HCI commands must be flagged as
1797			 * single-command requests.
1798			 */
1799			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1800
1801			skb_queue_tail(&hdev->cmd_q, skb);
1802			queue_work(hdev->workqueue, &hdev->cmd_work);
1803		}
1804	} else {
1805		if (!capable(CAP_NET_RAW)) {
1806			err = -EPERM;
1807			goto drop;
1808		}
1809
1810		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1811		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1812		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1813			err = -EINVAL;
1814			goto drop;
1815		}
1816
1817		skb_queue_tail(&hdev->raw_q, skb);
1818		queue_work(hdev->workqueue, &hdev->tx_work);
1819	}
1820
1821	err = len;
1822
1823done:
1824	release_sock(sk);
1825	return err;
1826
1827drop:
1828	kfree_skb(skb);
1829	goto done;
1830}
1831
1832static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1833				   sockptr_t optval, unsigned int len)
1834{
1835	struct hci_ufilter uf = { .opcode = 0 };
1836	struct sock *sk = sock->sk;
1837	int err = 0, opt = 0;
1838
1839	BT_DBG("sk %p, opt %d", sk, optname);
1840
1841	lock_sock(sk);
1842
1843	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1844		err = -EBADFD;
1845		goto done;
1846	}
1847
1848	switch (optname) {
1849	case HCI_DATA_DIR:
1850		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1851			err = -EFAULT;
1852			break;
1853		}
1854
1855		if (opt)
1856			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1857		else
1858			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1859		break;
1860
1861	case HCI_TIME_STAMP:
1862		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1863			err = -EFAULT;
1864			break;
1865		}
1866
1867		if (opt)
1868			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1869		else
1870			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1871		break;
1872
1873	case HCI_FILTER:
1874		{
1875			struct hci_filter *f = &hci_pi(sk)->filter;
1876
1877			uf.type_mask = f->type_mask;
1878			uf.opcode    = f->opcode;
1879			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1880			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1881		}
1882
1883		len = min_t(unsigned int, len, sizeof(uf));
1884		if (copy_from_sockptr(&uf, optval, len)) {
1885			err = -EFAULT;
1886			break;
1887		}
1888
1889		if (!capable(CAP_NET_RAW)) {
1890			uf.type_mask &= hci_sec_filter.type_mask;
1891			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1892			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1893		}
1894
1895		{
1896			struct hci_filter *f = &hci_pi(sk)->filter;
1897
1898			f->type_mask = uf.type_mask;
1899			f->opcode    = uf.opcode;
1900			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1901			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1902		}
1903		break;
1904
1905	default:
1906		err = -ENOPROTOOPT;
1907		break;
1908	}
1909
1910done:
1911	release_sock(sk);
1912	return err;
1913}
1914
1915static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1916			       sockptr_t optval, unsigned int len)
1917{
1918	struct sock *sk = sock->sk;
1919	int err = 0;
1920	u16 opt;
1921
1922	BT_DBG("sk %p, opt %d", sk, optname);
1923
1924	if (level == SOL_HCI)
1925		return hci_sock_setsockopt_old(sock, level, optname, optval,
1926					       len);
1927
1928	if (level != SOL_BLUETOOTH)
1929		return -ENOPROTOOPT;
1930
1931	lock_sock(sk);
1932
1933	switch (optname) {
1934	case BT_SNDMTU:
1935	case BT_RCVMTU:
1936		switch (hci_pi(sk)->channel) {
1937		/* Don't allow changing MTU for channels that are meant for HCI
1938		 * traffic only.
1939		 */
1940		case HCI_CHANNEL_RAW:
1941		case HCI_CHANNEL_USER:
1942			err = -ENOPROTOOPT;
1943			goto done;
1944		}
1945
1946		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1947			err = -EFAULT;
1948			break;
1949		}
1950
1951		hci_pi(sk)->mtu = opt;
1952		break;
1953
1954	default:
1955		err = -ENOPROTOOPT;
1956		break;
1957	}
1958
1959done:
1960	release_sock(sk);
1961	return err;
1962}
1963
1964static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
1965				   char __user *optval, int __user *optlen)
1966{
1967	struct hci_ufilter uf;
1968	struct sock *sk = sock->sk;
1969	int len, opt, err = 0;
1970
1971	BT_DBG("sk %p, opt %d", sk, optname);
1972
1973	if (get_user(len, optlen))
1974		return -EFAULT;
1975
1976	lock_sock(sk);
1977
1978	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1979		err = -EBADFD;
1980		goto done;
1981	}
1982
1983	switch (optname) {
1984	case HCI_DATA_DIR:
1985		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1986			opt = 1;
1987		else
1988			opt = 0;
1989
1990		if (put_user(opt, optval))
1991			err = -EFAULT;
1992		break;
1993
1994	case HCI_TIME_STAMP:
1995		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1996			opt = 1;
1997		else
1998			opt = 0;
1999
2000		if (put_user(opt, optval))
2001			err = -EFAULT;
2002		break;
2003
2004	case HCI_FILTER:
2005		{
2006			struct hci_filter *f = &hci_pi(sk)->filter;
2007
2008			memset(&uf, 0, sizeof(uf));
2009			uf.type_mask = f->type_mask;
2010			uf.opcode    = f->opcode;
2011			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2012			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2013		}
2014
2015		len = min_t(unsigned int, len, sizeof(uf));
2016		if (copy_to_user(optval, &uf, len))
2017			err = -EFAULT;
2018		break;
2019
2020	default:
2021		err = -ENOPROTOOPT;
2022		break;
2023	}
2024
2025done:
2026	release_sock(sk);
2027	return err;
2028}
2029
2030static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2031			       char __user *optval, int __user *optlen)
2032{
2033	struct sock *sk = sock->sk;
2034	int err = 0;
2035
2036	BT_DBG("sk %p, opt %d", sk, optname);
2037
2038	if (level == SOL_HCI)
2039		return hci_sock_getsockopt_old(sock, level, optname, optval,
2040					       optlen);
2041
2042	if (level != SOL_BLUETOOTH)
2043		return -ENOPROTOOPT;
2044
2045	lock_sock(sk);
2046
2047	switch (optname) {
2048	case BT_SNDMTU:
2049	case BT_RCVMTU:
2050		if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2051			err = -EFAULT;
2052		break;
2053
2054	default:
2055		err = -ENOPROTOOPT;
2056		break;
2057	}
2058
2059	release_sock(sk);
2060	return err;
2061}
2062
2063static void hci_sock_destruct(struct sock *sk)
2064{
2065	mgmt_cleanup(sk);
2066	skb_queue_purge(&sk->sk_receive_queue);
2067	skb_queue_purge(&sk->sk_write_queue);
2068}
2069
2070static const struct proto_ops hci_sock_ops = {
2071	.family		= PF_BLUETOOTH,
2072	.owner		= THIS_MODULE,
2073	.release	= hci_sock_release,
2074	.bind		= hci_sock_bind,
2075	.getname	= hci_sock_getname,
2076	.sendmsg	= hci_sock_sendmsg,
2077	.recvmsg	= hci_sock_recvmsg,
2078	.ioctl		= hci_sock_ioctl,
2079#ifdef CONFIG_COMPAT
2080	.compat_ioctl	= hci_sock_compat_ioctl,
2081#endif
2082	.poll		= datagram_poll,
2083	.listen		= sock_no_listen,
2084	.shutdown	= sock_no_shutdown,
2085	.setsockopt	= hci_sock_setsockopt,
2086	.getsockopt	= hci_sock_getsockopt,
2087	.connect	= sock_no_connect,
2088	.socketpair	= sock_no_socketpair,
2089	.accept		= sock_no_accept,
2090	.mmap		= sock_no_mmap
2091};
2092
2093static struct proto hci_sk_proto = {
2094	.name		= "HCI",
2095	.owner		= THIS_MODULE,
2096	.obj_size	= sizeof(struct hci_pinfo)
2097};
2098
2099static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2100			   int kern)
2101{
2102	struct sock *sk;
2103
2104	BT_DBG("sock %p", sock);
2105
2106	if (sock->type != SOCK_RAW)
2107		return -ESOCKTNOSUPPORT;
2108
2109	sock->ops = &hci_sock_ops;
2110
2111	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2112	if (!sk)
2113		return -ENOMEM;
2114
2115	sock_init_data(sock, sk);
2116
2117	sock_reset_flag(sk, SOCK_ZAPPED);
2118
2119	sk->sk_protocol = protocol;
2120
2121	sock->state = SS_UNCONNECTED;
2122	sk->sk_state = BT_OPEN;
2123	sk->sk_destruct = hci_sock_destruct;
2124
2125	bt_sock_link(&hci_sk_list, sk);
2126	return 0;
2127}
2128
2129static const struct net_proto_family hci_sock_family_ops = {
2130	.family	= PF_BLUETOOTH,
2131	.owner	= THIS_MODULE,
2132	.create	= hci_sock_create,
2133};
2134
2135int __init hci_sock_init(void)
2136{
2137	int err;
2138
2139	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2140
2141	err = proto_register(&hci_sk_proto, 0);
2142	if (err < 0)
2143		return err;
2144
2145	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2146	if (err < 0) {
2147		BT_ERR("HCI socket registration failed");
2148		goto error;
2149	}
2150
2151	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2152	if (err < 0) {
2153		BT_ERR("Failed to create HCI proc file");
2154		bt_sock_unregister(BTPROTO_HCI);
2155		goto error;
2156	}
2157
2158	BT_INFO("HCI socket layer initialized");
2159
2160	return 0;
2161
2162error:
2163	proto_unregister(&hci_sk_proto);
2164	return err;
2165}
2166
2167void hci_sock_cleanup(void)
2168{
2169	bt_procfs_cleanup(&init_net, "hci");
2170	bt_sock_unregister(BTPROTO_HCI);
2171	proto_unregister(&hci_sk_proto);
2172}