Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <asm/unaligned.h>
  30
  31#include <net/bluetooth/bluetooth.h>
  32#include <net/bluetooth/hci_core.h>
  33#include <net/bluetooth/hci_mon.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "mgmt_util.h"
  37
  38static LIST_HEAD(mgmt_chan_list);
  39static DEFINE_MUTEX(mgmt_chan_list_lock);
  40
  41static atomic_t monitor_promisc = ATOMIC_INIT(0);
  42
  43/* ----- HCI socket interface ----- */
  44
  45/* Socket info */
  46#define hci_pi(sk) ((struct hci_pinfo *) sk)
  47
  48struct hci_pinfo {
  49	struct bt_sock    bt;
  50	struct hci_dev    *hdev;
  51	struct hci_filter filter;
  52	__u32             cmsg_mask;
  53	unsigned short    channel;
  54	unsigned long     flags;
  55};
  56
  57void hci_sock_set_flag(struct sock *sk, int nr)
  58{
  59	set_bit(nr, &hci_pi(sk)->flags);
  60}
  61
  62void hci_sock_clear_flag(struct sock *sk, int nr)
  63{
  64	clear_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67int hci_sock_test_flag(struct sock *sk, int nr)
  68{
  69	return test_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72unsigned short hci_sock_get_channel(struct sock *sk)
  73{
  74	return hci_pi(sk)->channel;
  75}
  76
  77static inline int hci_test_bit(int nr, const void *addr)
  78{
  79	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
  80}
  81
  82/* Security filter */
  83#define HCI_SFLT_MAX_OGF  5
  84
  85struct hci_sec_filter {
  86	__u32 type_mask;
  87	__u32 event_mask[2];
  88	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
  89};
  90
  91static const struct hci_sec_filter hci_sec_filter = {
  92	/* Packet types */
  93	0x10,
  94	/* Events */
  95	{ 0x1000d9fe, 0x0000b00c },
  96	/* Commands */
  97	{
  98		{ 0x0 },
  99		/* OGF_LINK_CTL */
 100		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 101		/* OGF_LINK_POLICY */
 102		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 103		/* OGF_HOST_CTL */
 104		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 105		/* OGF_INFO_PARAM */
 106		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 107		/* OGF_STATUS_PARAM */
 108		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 109	}
 110};
 111
 112static struct bt_sock_list hci_sk_list = {
 113	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 114};
 115
 116static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 117{
 118	struct hci_filter *flt;
 119	int flt_type, flt_event;
 120
 121	/* Apply filter */
 122	flt = &hci_pi(sk)->filter;
 123
 124	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 
 
 
 125
 126	if (!test_bit(flt_type, &flt->type_mask))
 127		return true;
 128
 129	/* Extra filter for event packets only */
 130	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 131		return false;
 132
 133	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 134
 135	if (!hci_test_bit(flt_event, &flt->event_mask))
 136		return true;
 137
 138	/* Check filter only when opcode is set */
 139	if (!flt->opcode)
 140		return false;
 141
 142	if (flt_event == HCI_EV_CMD_COMPLETE &&
 143	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 144		return true;
 145
 146	if (flt_event == HCI_EV_CMD_STATUS &&
 147	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 148		return true;
 149
 150	return false;
 151}
 152
 153/* Send frame to RAW socket */
 154void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 155{
 156	struct sock *sk;
 157	struct sk_buff *skb_copy = NULL;
 158
 159	BT_DBG("hdev %p len %d", hdev, skb->len);
 160
 161	read_lock(&hci_sk_list.lock);
 162
 163	sk_for_each(sk, &hci_sk_list.head) {
 164		struct sk_buff *nskb;
 165
 166		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 167			continue;
 168
 169		/* Don't send frame to the socket it came from */
 170		if (skb->sk == sk)
 171			continue;
 172
 173		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 174			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 175			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 176			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 177			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 178				continue;
 179			if (is_filtered_packet(sk, skb))
 180				continue;
 181		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 182			if (!bt_cb(skb)->incoming)
 183				continue;
 184			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 185			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 186			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 187				continue;
 188		} else {
 189			/* Don't send frame to other channel types */
 190			continue;
 191		}
 192
 193		if (!skb_copy) {
 194			/* Create a private copy with headroom */
 195			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 196			if (!skb_copy)
 197				continue;
 198
 199			/* Put type byte before the data */
 200			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 201		}
 202
 203		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 204		if (!nskb)
 205			continue;
 206
 207		if (sock_queue_rcv_skb(sk, nskb))
 208			kfree_skb(nskb);
 209	}
 210
 211	read_unlock(&hci_sk_list.lock);
 212
 213	kfree_skb(skb_copy);
 214}
 215
 216/* Send frame to sockets with specific channel */
 217void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 218			 int flag, struct sock *skip_sk)
 219{
 220	struct sock *sk;
 221
 222	BT_DBG("channel %u len %d", channel, skb->len);
 223
 224	read_lock(&hci_sk_list.lock);
 225
 226	sk_for_each(sk, &hci_sk_list.head) {
 227		struct sk_buff *nskb;
 228
 229		/* Ignore socket without the flag set */
 230		if (!hci_sock_test_flag(sk, flag))
 231			continue;
 232
 233		/* Skip the original socket */
 234		if (sk == skip_sk)
 235			continue;
 236
 237		if (sk->sk_state != BT_BOUND)
 238			continue;
 239
 240		if (hci_pi(sk)->channel != channel)
 241			continue;
 242
 243		nskb = skb_clone(skb, GFP_ATOMIC);
 244		if (!nskb)
 245			continue;
 246
 247		if (sock_queue_rcv_skb(sk, nskb))
 248			kfree_skb(nskb);
 249	}
 250
 251	read_unlock(&hci_sk_list.lock);
 252}
 253
 254/* Send frame to monitor socket */
 255void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 256{
 
 257	struct sk_buff *skb_copy = NULL;
 258	struct hci_mon_hdr *hdr;
 259	__le16 opcode;
 260
 261	if (!atomic_read(&monitor_promisc))
 262		return;
 263
 264	BT_DBG("hdev %p len %d", hdev, skb->len);
 265
 266	switch (hci_skb_pkt_type(skb)) {
 267	case HCI_COMMAND_PKT:
 268		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 269		break;
 270	case HCI_EVENT_PKT:
 271		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 272		break;
 273	case HCI_ACLDATA_PKT:
 274		if (bt_cb(skb)->incoming)
 275			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 276		else
 277			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 278		break;
 279	case HCI_SCODATA_PKT:
 280		if (bt_cb(skb)->incoming)
 281			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 282		else
 283			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 284		break;
 285	case HCI_DIAG_PKT:
 286		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 287		break;
 288	default:
 289		return;
 290	}
 291
 292	/* Create a private copy with headroom */
 293	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 294	if (!skb_copy)
 295		return;
 296
 297	/* Put header before the data */
 298	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 299	hdr->opcode = opcode;
 300	hdr->index = cpu_to_le16(hdev->id);
 301	hdr->len = cpu_to_le16(skb->len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 302
 303	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 304			    HCI_SOCK_TRUSTED, NULL);
 305	kfree_skb(skb_copy);
 306}
 307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 309{
 310	struct hci_mon_hdr *hdr;
 311	struct hci_mon_new_index *ni;
 312	struct hci_mon_index_info *ii;
 313	struct sk_buff *skb;
 314	__le16 opcode;
 315
 316	switch (event) {
 317	case HCI_DEV_REG:
 318		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 319		if (!skb)
 320			return NULL;
 321
 322		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 323		ni->type = hdev->dev_type;
 324		ni->bus = hdev->bus;
 325		bacpy(&ni->bdaddr, &hdev->bdaddr);
 326		memcpy(ni->name, hdev->name, 8);
 327
 328		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 329		break;
 330
 331	case HCI_DEV_UNREG:
 332		skb = bt_skb_alloc(0, GFP_ATOMIC);
 333		if (!skb)
 334			return NULL;
 335
 336		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 337		break;
 338
 339	case HCI_DEV_SETUP:
 340		if (hdev->manufacturer == 0xffff)
 341			return NULL;
 342
 343		/* fall through */
 344
 345	case HCI_DEV_UP:
 346		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 347		if (!skb)
 348			return NULL;
 349
 350		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 351		bacpy(&ii->bdaddr, &hdev->bdaddr);
 352		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 353
 354		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 355		break;
 356
 357	case HCI_DEV_OPEN:
 358		skb = bt_skb_alloc(0, GFP_ATOMIC);
 359		if (!skb)
 360			return NULL;
 361
 362		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 363		break;
 364
 365	case HCI_DEV_CLOSE:
 366		skb = bt_skb_alloc(0, GFP_ATOMIC);
 367		if (!skb)
 368			return NULL;
 369
 370		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 371		break;
 372
 373	default:
 374		return NULL;
 375	}
 376
 377	__net_timestamp(skb);
 378
 379	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 380	hdr->opcode = opcode;
 381	hdr->index = cpu_to_le16(hdev->id);
 382	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 383
 384	return skb;
 385}
 386
 387static void __printf(2, 3)
 388send_monitor_note(struct sock *sk, const char *fmt, ...)
 389{
 390	size_t len;
 391	struct hci_mon_hdr *hdr;
 392	struct sk_buff *skb;
 393	va_list args;
 394
 395	va_start(args, fmt);
 396	len = vsnprintf(NULL, 0, fmt, args);
 397	va_end(args);
 398
 399	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 400	if (!skb)
 401		return;
 402
 403	va_start(args, fmt);
 404	vsprintf(skb_put(skb, len), fmt, args);
 405	*skb_put(skb, 1) = 0;
 406	va_end(args);
 407
 408	__net_timestamp(skb);
 409
 410	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 411	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 412	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 413	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 414
 415	if (sock_queue_rcv_skb(sk, skb))
 416		kfree_skb(skb);
 417}
 418
 419static void send_monitor_replay(struct sock *sk)
 420{
 421	struct hci_dev *hdev;
 422
 423	read_lock(&hci_dev_list_lock);
 424
 425	list_for_each_entry(hdev, &hci_dev_list, list) {
 426		struct sk_buff *skb;
 427
 428		skb = create_monitor_event(hdev, HCI_DEV_REG);
 429		if (!skb)
 430			continue;
 431
 432		if (sock_queue_rcv_skb(sk, skb))
 433			kfree_skb(skb);
 434
 435		if (!test_bit(HCI_RUNNING, &hdev->flags))
 436			continue;
 437
 438		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 439		if (!skb)
 440			continue;
 441
 442		if (sock_queue_rcv_skb(sk, skb))
 443			kfree_skb(skb);
 444
 445		if (test_bit(HCI_UP, &hdev->flags))
 446			skb = create_monitor_event(hdev, HCI_DEV_UP);
 447		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 448			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 449		else
 450			skb = NULL;
 451
 452		if (skb) {
 453			if (sock_queue_rcv_skb(sk, skb))
 454				kfree_skb(skb);
 455		}
 456	}
 457
 458	read_unlock(&hci_dev_list_lock);
 459}
 460
 461/* Generate internal stack event */
 462static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 463{
 464	struct hci_event_hdr *hdr;
 465	struct hci_ev_stack_internal *ev;
 466	struct sk_buff *skb;
 467
 468	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 469	if (!skb)
 470		return;
 471
 472	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 473	hdr->evt  = HCI_EV_STACK_INTERNAL;
 474	hdr->plen = sizeof(*ev) + dlen;
 475
 476	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 477	ev->type = type;
 478	memcpy(ev->data, data, dlen);
 479
 480	bt_cb(skb)->incoming = 1;
 481	__net_timestamp(skb);
 482
 483	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 484	hci_send_to_sock(hdev, skb);
 485	kfree_skb(skb);
 486}
 487
 488void hci_sock_dev_event(struct hci_dev *hdev, int event)
 489{
 
 
 490	BT_DBG("hdev %s event %d", hdev->name, event);
 491
 
 492	if (atomic_read(&monitor_promisc)) {
 493		struct sk_buff *skb;
 494
 495		/* Send event to monitor */
 496		skb = create_monitor_event(hdev, event);
 497		if (skb) {
 498			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 499					    HCI_SOCK_TRUSTED, NULL);
 500			kfree_skb(skb);
 501		}
 502	}
 503
 504	if (event <= HCI_DEV_DOWN) {
 505		struct hci_ev_si_device ev;
 506
 507		/* Send event to sockets */
 508		ev.event  = event;
 509		ev.dev_id = hdev->id;
 510		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 511	}
 512
 513	if (event == HCI_DEV_UNREG) {
 514		struct sock *sk;
 515
 516		/* Detach sockets from device */
 517		read_lock(&hci_sk_list.lock);
 518		sk_for_each(sk, &hci_sk_list.head) {
 519			bh_lock_sock_nested(sk);
 520			if (hci_pi(sk)->hdev == hdev) {
 521				hci_pi(sk)->hdev = NULL;
 522				sk->sk_err = EPIPE;
 523				sk->sk_state = BT_OPEN;
 524				sk->sk_state_change(sk);
 525
 526				hci_dev_put(hdev);
 527			}
 528			bh_unlock_sock(sk);
 529		}
 530		read_unlock(&hci_sk_list.lock);
 531	}
 532}
 533
 534static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 535{
 536	struct hci_mgmt_chan *c;
 537
 538	list_for_each_entry(c, &mgmt_chan_list, list) {
 539		if (c->channel == channel)
 540			return c;
 541	}
 542
 543	return NULL;
 544}
 545
 546static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 547{
 548	struct hci_mgmt_chan *c;
 549
 550	mutex_lock(&mgmt_chan_list_lock);
 551	c = __hci_mgmt_chan_find(channel);
 552	mutex_unlock(&mgmt_chan_list_lock);
 553
 554	return c;
 555}
 556
 557int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 558{
 559	if (c->channel < HCI_CHANNEL_CONTROL)
 560		return -EINVAL;
 561
 562	mutex_lock(&mgmt_chan_list_lock);
 563	if (__hci_mgmt_chan_find(c->channel)) {
 564		mutex_unlock(&mgmt_chan_list_lock);
 565		return -EALREADY;
 566	}
 567
 568	list_add_tail(&c->list, &mgmt_chan_list);
 569
 570	mutex_unlock(&mgmt_chan_list_lock);
 571
 572	return 0;
 573}
 574EXPORT_SYMBOL(hci_mgmt_chan_register);
 575
 576void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 577{
 578	mutex_lock(&mgmt_chan_list_lock);
 579	list_del(&c->list);
 580	mutex_unlock(&mgmt_chan_list_lock);
 581}
 582EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 583
 584static int hci_sock_release(struct socket *sock)
 585{
 586	struct sock *sk = sock->sk;
 587	struct hci_dev *hdev;
 588
 589	BT_DBG("sock %p sk %p", sock, sk);
 590
 591	if (!sk)
 592		return 0;
 593
 594	hdev = hci_pi(sk)->hdev;
 595
 596	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
 597		atomic_dec(&monitor_promisc);
 598
 599	bt_sock_unlink(&hci_sk_list, sk);
 600
 601	if (hdev) {
 602		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 603			/* When releasing an user channel exclusive access,
 604			 * call hci_dev_do_close directly instead of calling
 605			 * hci_dev_close to ensure the exclusive access will
 606			 * be released and the controller brought back down.
 607			 *
 608			 * The checking of HCI_AUTO_OFF is not needed in this
 609			 * case since it will have been cleared already when
 610			 * opening the user channel.
 611			 */
 612			hci_dev_do_close(hdev);
 613			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 614			mgmt_index_added(hdev);
 
 
 615		}
 616
 617		atomic_dec(&hdev->promisc);
 618		hci_dev_put(hdev);
 619	}
 620
 621	sock_orphan(sk);
 622
 623	skb_queue_purge(&sk->sk_receive_queue);
 624	skb_queue_purge(&sk->sk_write_queue);
 625
 626	sock_put(sk);
 627	return 0;
 628}
 629
 630static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 631{
 632	bdaddr_t bdaddr;
 633	int err;
 634
 635	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 636		return -EFAULT;
 637
 638	hci_dev_lock(hdev);
 639
 640	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 641
 642	hci_dev_unlock(hdev);
 643
 644	return err;
 645}
 646
 647static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 648{
 649	bdaddr_t bdaddr;
 650	int err;
 651
 652	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 653		return -EFAULT;
 654
 655	hci_dev_lock(hdev);
 656
 657	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 658
 659	hci_dev_unlock(hdev);
 660
 661	return err;
 662}
 663
 664/* Ioctls that require bound socket */
 665static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 666				unsigned long arg)
 667{
 668	struct hci_dev *hdev = hci_pi(sk)->hdev;
 669
 670	if (!hdev)
 671		return -EBADFD;
 672
 673	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 674		return -EBUSY;
 675
 676	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 677		return -EOPNOTSUPP;
 678
 679	if (hdev->dev_type != HCI_BREDR)
 680		return -EOPNOTSUPP;
 681
 682	switch (cmd) {
 683	case HCISETRAW:
 684		if (!capable(CAP_NET_ADMIN))
 685			return -EPERM;
 686		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 687
 688	case HCIGETCONNINFO:
 689		return hci_get_conn_info(hdev, (void __user *)arg);
 690
 691	case HCIGETAUTHINFO:
 692		return hci_get_auth_info(hdev, (void __user *)arg);
 693
 694	case HCIBLOCKADDR:
 695		if (!capable(CAP_NET_ADMIN))
 696			return -EPERM;
 697		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 698
 699	case HCIUNBLOCKADDR:
 700		if (!capable(CAP_NET_ADMIN))
 701			return -EPERM;
 702		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 703	}
 704
 705	return -ENOIOCTLCMD;
 706}
 707
 708static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 709			  unsigned long arg)
 710{
 711	void __user *argp = (void __user *)arg;
 712	struct sock *sk = sock->sk;
 713	int err;
 714
 715	BT_DBG("cmd %x arg %lx", cmd, arg);
 716
 717	lock_sock(sk);
 718
 719	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 720		err = -EBADFD;
 721		goto done;
 722	}
 723
 724	release_sock(sk);
 725
 726	switch (cmd) {
 727	case HCIGETDEVLIST:
 728		return hci_get_dev_list(argp);
 729
 730	case HCIGETDEVINFO:
 731		return hci_get_dev_info(argp);
 732
 733	case HCIGETCONNLIST:
 734		return hci_get_conn_list(argp);
 735
 736	case HCIDEVUP:
 737		if (!capable(CAP_NET_ADMIN))
 738			return -EPERM;
 739		return hci_dev_open(arg);
 740
 741	case HCIDEVDOWN:
 742		if (!capable(CAP_NET_ADMIN))
 743			return -EPERM;
 744		return hci_dev_close(arg);
 745
 746	case HCIDEVRESET:
 747		if (!capable(CAP_NET_ADMIN))
 748			return -EPERM;
 749		return hci_dev_reset(arg);
 750
 751	case HCIDEVRESTAT:
 752		if (!capable(CAP_NET_ADMIN))
 753			return -EPERM;
 754		return hci_dev_reset_stat(arg);
 755
 756	case HCISETSCAN:
 757	case HCISETAUTH:
 758	case HCISETENCRYPT:
 759	case HCISETPTYPE:
 760	case HCISETLINKPOL:
 761	case HCISETLINKMODE:
 762	case HCISETACLMTU:
 763	case HCISETSCOMTU:
 764		if (!capable(CAP_NET_ADMIN))
 765			return -EPERM;
 766		return hci_dev_cmd(cmd, argp);
 767
 768	case HCIINQUIRY:
 769		return hci_inquiry(argp);
 770	}
 771
 772	lock_sock(sk);
 773
 774	err = hci_sock_bound_ioctl(sk, cmd, arg);
 775
 776done:
 777	release_sock(sk);
 778	return err;
 779}
 780
 781static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 782			 int addr_len)
 783{
 784	struct sockaddr_hci haddr;
 785	struct sock *sk = sock->sk;
 786	struct hci_dev *hdev = NULL;
 787	int len, err = 0;
 788
 789	BT_DBG("sock %p sk %p", sock, sk);
 790
 791	if (!addr)
 792		return -EINVAL;
 793
 794	memset(&haddr, 0, sizeof(haddr));
 795	len = min_t(unsigned int, sizeof(haddr), addr_len);
 796	memcpy(&haddr, addr, len);
 797
 798	if (haddr.hci_family != AF_BLUETOOTH)
 799		return -EINVAL;
 800
 801	lock_sock(sk);
 802
 803	if (sk->sk_state == BT_BOUND) {
 804		err = -EALREADY;
 805		goto done;
 806	}
 807
 808	switch (haddr.hci_channel) {
 809	case HCI_CHANNEL_RAW:
 810		if (hci_pi(sk)->hdev) {
 811			err = -EALREADY;
 812			goto done;
 813		}
 814
 815		if (haddr.hci_dev != HCI_DEV_NONE) {
 816			hdev = hci_dev_get(haddr.hci_dev);
 817			if (!hdev) {
 818				err = -ENODEV;
 819				goto done;
 820			}
 821
 822			atomic_inc(&hdev->promisc);
 823		}
 824
 825		hci_pi(sk)->hdev = hdev;
 826		break;
 827
 828	case HCI_CHANNEL_USER:
 829		if (hci_pi(sk)->hdev) {
 830			err = -EALREADY;
 831			goto done;
 832		}
 833
 834		if (haddr.hci_dev == HCI_DEV_NONE) {
 835			err = -EINVAL;
 836			goto done;
 837		}
 838
 839		if (!capable(CAP_NET_ADMIN)) {
 840			err = -EPERM;
 841			goto done;
 842		}
 843
 844		hdev = hci_dev_get(haddr.hci_dev);
 845		if (!hdev) {
 846			err = -ENODEV;
 847			goto done;
 848		}
 849
 850		if (test_bit(HCI_INIT, &hdev->flags) ||
 851		    hci_dev_test_flag(hdev, HCI_SETUP) ||
 852		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 853		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
 854		     test_bit(HCI_UP, &hdev->flags))) {
 855			err = -EBUSY;
 856			hci_dev_put(hdev);
 857			goto done;
 858		}
 859
 860		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
 861			err = -EUSERS;
 862			hci_dev_put(hdev);
 863			goto done;
 864		}
 865
 866		mgmt_index_removed(hdev);
 867
 868		err = hci_dev_open(hdev->id);
 869		if (err) {
 870			if (err == -EALREADY) {
 871				/* In case the transport is already up and
 872				 * running, clear the error here.
 873				 *
 874				 * This can happen when opening an user
 875				 * channel and HCI_AUTO_OFF grace period
 876				 * is still active.
 877				 */
 878				err = 0;
 879			} else {
 880				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 881				mgmt_index_added(hdev);
 882				hci_dev_put(hdev);
 883				goto done;
 884			}
 885		}
 886
 887		atomic_inc(&hdev->promisc);
 888
 889		hci_pi(sk)->hdev = hdev;
 890		break;
 891
 892	case HCI_CHANNEL_MONITOR:
 893		if (haddr.hci_dev != HCI_DEV_NONE) {
 894			err = -EINVAL;
 895			goto done;
 896		}
 897
 898		if (!capable(CAP_NET_RAW)) {
 899			err = -EPERM;
 900			goto done;
 901		}
 902
 903		/* The monitor interface is restricted to CAP_NET_RAW
 904		 * capabilities and with that implicitly trusted.
 905		 */
 906		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 907
 908		send_monitor_note(sk, "Linux version %s (%s)",
 909				  init_utsname()->release,
 910				  init_utsname()->machine);
 911		send_monitor_note(sk, "Bluetooth subsystem version %s",
 912				  BT_SUBSYS_VERSION);
 913		send_monitor_replay(sk);
 914
 915		atomic_inc(&monitor_promisc);
 916		break;
 917
 918	case HCI_CHANNEL_LOGGING:
 919		if (haddr.hci_dev != HCI_DEV_NONE) {
 920			err = -EINVAL;
 921			goto done;
 922		}
 923
 924		if (!capable(CAP_NET_ADMIN)) {
 925			err = -EPERM;
 926			goto done;
 927		}
 928		break;
 929
 930	default:
 931		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
 932			err = -EINVAL;
 933			goto done;
 934		}
 935
 936		if (haddr.hci_dev != HCI_DEV_NONE) {
 937			err = -EINVAL;
 938			goto done;
 939		}
 940
 941		/* Users with CAP_NET_ADMIN capabilities are allowed
 942		 * access to all management commands and events. For
 943		 * untrusted users the interface is restricted and
 944		 * also only untrusted events are sent.
 945		 */
 946		if (capable(CAP_NET_ADMIN))
 947			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 948
 949		/* At the moment the index and unconfigured index events
 950		 * are enabled unconditionally. Setting them on each
 951		 * socket when binding keeps this functionality. They
 952		 * however might be cleared later and then sending of these
 953		 * events will be disabled, but that is then intentional.
 954		 *
 955		 * This also enables generic events that are safe to be
 956		 * received by untrusted users. Example for such events
 957		 * are changes to settings, class of device, name etc.
 958		 */
 959		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
 960			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
 961			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
 962			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
 963		}
 964		break;
 
 
 
 
 965	}
 966
 967
 968	hci_pi(sk)->channel = haddr.hci_channel;
 969	sk->sk_state = BT_BOUND;
 970
 971done:
 972	release_sock(sk);
 973	return err;
 974}
 975
 976static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 977			    int *addr_len, int peer)
 978{
 979	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
 980	struct sock *sk = sock->sk;
 981	struct hci_dev *hdev;
 982	int err = 0;
 983
 984	BT_DBG("sock %p sk %p", sock, sk);
 985
 986	if (peer)
 987		return -EOPNOTSUPP;
 988
 989	lock_sock(sk);
 990
 991	hdev = hci_pi(sk)->hdev;
 992	if (!hdev) {
 993		err = -EBADFD;
 994		goto done;
 995	}
 996
 997	*addr_len = sizeof(*haddr);
 998	haddr->hci_family = AF_BLUETOOTH;
 999	haddr->hci_dev    = hdev->id;
1000	haddr->hci_channel= hci_pi(sk)->channel;
1001
1002done:
1003	release_sock(sk);
1004	return err;
1005}
1006
1007static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1008			  struct sk_buff *skb)
1009{
1010	__u32 mask = hci_pi(sk)->cmsg_mask;
1011
1012	if (mask & HCI_CMSG_DIR) {
1013		int incoming = bt_cb(skb)->incoming;
1014		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1015			 &incoming);
1016	}
1017
1018	if (mask & HCI_CMSG_TSTAMP) {
1019#ifdef CONFIG_COMPAT
1020		struct compat_timeval ctv;
1021#endif
1022		struct timeval tv;
1023		void *data;
1024		int len;
1025
1026		skb_get_timestamp(skb, &tv);
1027
1028		data = &tv;
1029		len = sizeof(tv);
1030#ifdef CONFIG_COMPAT
1031		if (!COMPAT_USE_64BIT_TIME &&
1032		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1033			ctv.tv_sec = tv.tv_sec;
1034			ctv.tv_usec = tv.tv_usec;
1035			data = &ctv;
1036			len = sizeof(ctv);
1037		}
1038#endif
1039
1040		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1041	}
1042}
1043
1044static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1045			    size_t len, int flags)
1046{
1047	int noblock = flags & MSG_DONTWAIT;
1048	struct sock *sk = sock->sk;
1049	struct sk_buff *skb;
1050	int copied, err;
1051
1052	BT_DBG("sock %p, sk %p", sock, sk);
1053
1054	if (flags & MSG_OOB)
1055		return -EOPNOTSUPP;
1056
1057	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1058		return -EOPNOTSUPP;
1059
1060	if (sk->sk_state == BT_CLOSED)
1061		return 0;
1062
1063	skb = skb_recv_datagram(sk, flags, noblock, &err);
1064	if (!skb)
1065		return err;
1066
1067	copied = skb->len;
1068	if (len < copied) {
1069		msg->msg_flags |= MSG_TRUNC;
1070		copied = len;
1071	}
1072
1073	skb_reset_transport_header(skb);
1074	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1075
1076	switch (hci_pi(sk)->channel) {
1077	case HCI_CHANNEL_RAW:
1078		hci_sock_cmsg(sk, msg, skb);
1079		break;
1080	case HCI_CHANNEL_USER:
 
1081	case HCI_CHANNEL_MONITOR:
1082		sock_recv_timestamp(msg, sk, skb);
1083		break;
1084	default:
1085		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1086			sock_recv_timestamp(msg, sk, skb);
1087		break;
1088	}
1089
1090	skb_free_datagram(sk, skb);
1091
1092	return err ? : copied;
1093}
1094
1095static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1096			struct msghdr *msg, size_t msglen)
1097{
1098	void *buf;
1099	u8 *cp;
1100	struct mgmt_hdr *hdr;
1101	u16 opcode, index, len;
1102	struct hci_dev *hdev = NULL;
1103	const struct hci_mgmt_handler *handler;
1104	bool var_len, no_hdev;
1105	int err;
1106
1107	BT_DBG("got %zu bytes", msglen);
1108
1109	if (msglen < sizeof(*hdr))
1110		return -EINVAL;
1111
1112	buf = kmalloc(msglen, GFP_KERNEL);
1113	if (!buf)
1114		return -ENOMEM;
1115
1116	if (memcpy_from_msg(buf, msg, msglen)) {
1117		err = -EFAULT;
1118		goto done;
1119	}
1120
1121	hdr = buf;
1122	opcode = __le16_to_cpu(hdr->opcode);
1123	index = __le16_to_cpu(hdr->index);
1124	len = __le16_to_cpu(hdr->len);
1125
1126	if (len != msglen - sizeof(*hdr)) {
1127		err = -EINVAL;
1128		goto done;
1129	}
1130
1131	if (opcode >= chan->handler_count ||
1132	    chan->handlers[opcode].func == NULL) {
1133		BT_DBG("Unknown op %u", opcode);
1134		err = mgmt_cmd_status(sk, index, opcode,
1135				      MGMT_STATUS_UNKNOWN_COMMAND);
1136		goto done;
1137	}
1138
1139	handler = &chan->handlers[opcode];
1140
1141	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1142	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1143		err = mgmt_cmd_status(sk, index, opcode,
1144				      MGMT_STATUS_PERMISSION_DENIED);
1145		goto done;
1146	}
1147
1148	if (index != MGMT_INDEX_NONE) {
1149		hdev = hci_dev_get(index);
1150		if (!hdev) {
1151			err = mgmt_cmd_status(sk, index, opcode,
1152					      MGMT_STATUS_INVALID_INDEX);
1153			goto done;
1154		}
1155
1156		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1157		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1158		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1159			err = mgmt_cmd_status(sk, index, opcode,
1160					      MGMT_STATUS_INVALID_INDEX);
1161			goto done;
1162		}
1163
1164		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1165		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1166			err = mgmt_cmd_status(sk, index, opcode,
1167					      MGMT_STATUS_INVALID_INDEX);
1168			goto done;
1169		}
1170	}
1171
1172	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1173	if (no_hdev != !hdev) {
1174		err = mgmt_cmd_status(sk, index, opcode,
1175				      MGMT_STATUS_INVALID_INDEX);
1176		goto done;
1177	}
1178
1179	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1180	if ((var_len && len < handler->data_len) ||
1181	    (!var_len && len != handler->data_len)) {
1182		err = mgmt_cmd_status(sk, index, opcode,
1183				      MGMT_STATUS_INVALID_PARAMS);
1184		goto done;
1185	}
1186
1187	if (hdev && chan->hdev_init)
1188		chan->hdev_init(sk, hdev);
1189
1190	cp = buf + sizeof(*hdr);
1191
1192	err = handler->func(sk, hdev, cp, len);
1193	if (err < 0)
1194		goto done;
1195
1196	err = msglen;
1197
1198done:
1199	if (hdev)
1200		hci_dev_put(hdev);
1201
1202	kfree(buf);
1203	return err;
1204}
1205
1206static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1207{
1208	struct hci_mon_hdr *hdr;
1209	struct sk_buff *skb;
1210	struct hci_dev *hdev;
1211	u16 index;
1212	int err;
1213
1214	/* The logging frame consists at minimum of the standard header,
1215	 * the priority byte, the ident length byte and at least one string
1216	 * terminator NUL byte. Anything shorter are invalid packets.
1217	 */
1218	if (len < sizeof(*hdr) + 3)
1219		return -EINVAL;
1220
1221	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1222	if (!skb)
1223		return err;
1224
1225	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1226		err = -EFAULT;
1227		goto drop;
1228	}
1229
1230	hdr = (void *)skb->data;
1231
1232	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1233		err = -EINVAL;
1234		goto drop;
1235	}
1236
1237	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1238		__u8 priority = skb->data[sizeof(*hdr)];
1239		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1240
1241		/* Only the priorities 0-7 are valid and with that any other
1242		 * value results in an invalid packet.
1243		 *
1244		 * The priority byte is followed by an ident length byte and
1245		 * the NUL terminated ident string. Check that the ident
1246		 * length is not overflowing the packet and also that the
1247		 * ident string itself is NUL terminated. In case the ident
1248		 * length is zero, the length value actually doubles as NUL
1249		 * terminator identifier.
1250		 *
1251		 * The message follows the ident string (if present) and
1252		 * must be NUL terminated. Otherwise it is not a valid packet.
1253		 */
1254		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1255		    ident_len > len - sizeof(*hdr) - 3 ||
1256		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1257			err = -EINVAL;
1258			goto drop;
1259		}
1260	} else {
1261		err = -EINVAL;
1262		goto drop;
1263	}
1264
1265	index = __le16_to_cpu(hdr->index);
1266
1267	if (index != MGMT_INDEX_NONE) {
1268		hdev = hci_dev_get(index);
1269		if (!hdev) {
1270			err = -ENODEV;
1271			goto drop;
1272		}
1273	} else {
1274		hdev = NULL;
1275	}
1276
1277	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1278
1279	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1280	err = len;
1281
1282	if (hdev)
1283		hci_dev_put(hdev);
1284
1285drop:
1286	kfree_skb(skb);
1287	return err;
1288}
1289
1290static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1291			    size_t len)
1292{
1293	struct sock *sk = sock->sk;
1294	struct hci_mgmt_chan *chan;
1295	struct hci_dev *hdev;
1296	struct sk_buff *skb;
1297	int err;
1298
1299	BT_DBG("sock %p sk %p", sock, sk);
1300
1301	if (msg->msg_flags & MSG_OOB)
1302		return -EOPNOTSUPP;
1303
1304	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1305		return -EINVAL;
1306
1307	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1308		return -EINVAL;
1309
1310	lock_sock(sk);
1311
1312	switch (hci_pi(sk)->channel) {
1313	case HCI_CHANNEL_RAW:
1314	case HCI_CHANNEL_USER:
1315		break;
 
 
 
1316	case HCI_CHANNEL_MONITOR:
1317		err = -EOPNOTSUPP;
1318		goto done;
1319	case HCI_CHANNEL_LOGGING:
1320		err = hci_logging_frame(sk, msg, len);
1321		goto done;
1322	default:
1323		mutex_lock(&mgmt_chan_list_lock);
1324		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1325		if (chan)
1326			err = hci_mgmt_cmd(chan, sk, msg, len);
1327		else
1328			err = -EINVAL;
1329
1330		mutex_unlock(&mgmt_chan_list_lock);
1331		goto done;
1332	}
1333
1334	hdev = hci_pi(sk)->hdev;
1335	if (!hdev) {
1336		err = -EBADFD;
1337		goto done;
1338	}
1339
1340	if (!test_bit(HCI_UP, &hdev->flags)) {
1341		err = -ENETDOWN;
1342		goto done;
1343	}
1344
1345	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1346	if (!skb)
1347		goto done;
1348
1349	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1350		err = -EFAULT;
1351		goto drop;
1352	}
1353
1354	hci_skb_pkt_type(skb) = skb->data[0];
1355	skb_pull(skb, 1);
1356
1357	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1358		/* No permission check is needed for user channel
1359		 * since that gets enforced when binding the socket.
1360		 *
1361		 * However check that the packet type is valid.
1362		 */
1363		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1364		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1365		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1366			err = -EINVAL;
1367			goto drop;
1368		}
1369
1370		skb_queue_tail(&hdev->raw_q, skb);
1371		queue_work(hdev->workqueue, &hdev->tx_work);
1372	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1373		u16 opcode = get_unaligned_le16(skb->data);
1374		u16 ogf = hci_opcode_ogf(opcode);
1375		u16 ocf = hci_opcode_ocf(opcode);
1376
1377		if (((ogf > HCI_SFLT_MAX_OGF) ||
1378		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1379				   &hci_sec_filter.ocf_mask[ogf])) &&
1380		    !capable(CAP_NET_RAW)) {
1381			err = -EPERM;
1382			goto drop;
1383		}
1384
1385		/* Since the opcode has already been extracted here, store
1386		 * a copy of the value for later use by the drivers.
1387		 */
1388		hci_skb_opcode(skb) = opcode;
1389
1390		if (ogf == 0x3f) {
1391			skb_queue_tail(&hdev->raw_q, skb);
1392			queue_work(hdev->workqueue, &hdev->tx_work);
1393		} else {
1394			/* Stand-alone HCI commands must be flagged as
1395			 * single-command requests.
1396			 */
1397			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1398
1399			skb_queue_tail(&hdev->cmd_q, skb);
1400			queue_work(hdev->workqueue, &hdev->cmd_work);
1401		}
1402	} else {
1403		if (!capable(CAP_NET_RAW)) {
1404			err = -EPERM;
1405			goto drop;
1406		}
1407
1408		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1409		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1410			err = -EINVAL;
1411			goto drop;
1412		}
1413
1414		skb_queue_tail(&hdev->raw_q, skb);
1415		queue_work(hdev->workqueue, &hdev->tx_work);
1416	}
1417
1418	err = len;
1419
1420done:
1421	release_sock(sk);
1422	return err;
1423
1424drop:
1425	kfree_skb(skb);
1426	goto done;
1427}
1428
1429static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1430			       char __user *optval, unsigned int len)
1431{
1432	struct hci_ufilter uf = { .opcode = 0 };
1433	struct sock *sk = sock->sk;
1434	int err = 0, opt = 0;
1435
1436	BT_DBG("sk %p, opt %d", sk, optname);
1437
1438	lock_sock(sk);
1439
1440	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1441		err = -EBADFD;
1442		goto done;
1443	}
1444
1445	switch (optname) {
1446	case HCI_DATA_DIR:
1447		if (get_user(opt, (int __user *)optval)) {
1448			err = -EFAULT;
1449			break;
1450		}
1451
1452		if (opt)
1453			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1454		else
1455			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1456		break;
1457
1458	case HCI_TIME_STAMP:
1459		if (get_user(opt, (int __user *)optval)) {
1460			err = -EFAULT;
1461			break;
1462		}
1463
1464		if (opt)
1465			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1466		else
1467			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1468		break;
1469
1470	case HCI_FILTER:
1471		{
1472			struct hci_filter *f = &hci_pi(sk)->filter;
1473
1474			uf.type_mask = f->type_mask;
1475			uf.opcode    = f->opcode;
1476			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1477			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1478		}
1479
1480		len = min_t(unsigned int, len, sizeof(uf));
1481		if (copy_from_user(&uf, optval, len)) {
1482			err = -EFAULT;
1483			break;
1484		}
1485
1486		if (!capable(CAP_NET_RAW)) {
1487			uf.type_mask &= hci_sec_filter.type_mask;
1488			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1489			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1490		}
1491
1492		{
1493			struct hci_filter *f = &hci_pi(sk)->filter;
1494
1495			f->type_mask = uf.type_mask;
1496			f->opcode    = uf.opcode;
1497			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1498			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1499		}
1500		break;
1501
1502	default:
1503		err = -ENOPROTOOPT;
1504		break;
1505	}
1506
1507done:
1508	release_sock(sk);
1509	return err;
1510}
1511
1512static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1513			       char __user *optval, int __user *optlen)
1514{
1515	struct hci_ufilter uf;
1516	struct sock *sk = sock->sk;
1517	int len, opt, err = 0;
1518
1519	BT_DBG("sk %p, opt %d", sk, optname);
1520
1521	if (get_user(len, optlen))
1522		return -EFAULT;
1523
1524	lock_sock(sk);
1525
1526	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1527		err = -EBADFD;
1528		goto done;
1529	}
1530
1531	switch (optname) {
1532	case HCI_DATA_DIR:
1533		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1534			opt = 1;
1535		else
1536			opt = 0;
1537
1538		if (put_user(opt, optval))
1539			err = -EFAULT;
1540		break;
1541
1542	case HCI_TIME_STAMP:
1543		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1544			opt = 1;
1545		else
1546			opt = 0;
1547
1548		if (put_user(opt, optval))
1549			err = -EFAULT;
1550		break;
1551
1552	case HCI_FILTER:
1553		{
1554			struct hci_filter *f = &hci_pi(sk)->filter;
1555
1556			memset(&uf, 0, sizeof(uf));
1557			uf.type_mask = f->type_mask;
1558			uf.opcode    = f->opcode;
1559			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1560			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1561		}
1562
1563		len = min_t(unsigned int, len, sizeof(uf));
1564		if (copy_to_user(optval, &uf, len))
1565			err = -EFAULT;
1566		break;
1567
1568	default:
1569		err = -ENOPROTOOPT;
1570		break;
1571	}
1572
1573done:
1574	release_sock(sk);
1575	return err;
1576}
1577
1578static const struct proto_ops hci_sock_ops = {
1579	.family		= PF_BLUETOOTH,
1580	.owner		= THIS_MODULE,
1581	.release	= hci_sock_release,
1582	.bind		= hci_sock_bind,
1583	.getname	= hci_sock_getname,
1584	.sendmsg	= hci_sock_sendmsg,
1585	.recvmsg	= hci_sock_recvmsg,
1586	.ioctl		= hci_sock_ioctl,
1587	.poll		= datagram_poll,
1588	.listen		= sock_no_listen,
1589	.shutdown	= sock_no_shutdown,
1590	.setsockopt	= hci_sock_setsockopt,
1591	.getsockopt	= hci_sock_getsockopt,
1592	.connect	= sock_no_connect,
1593	.socketpair	= sock_no_socketpair,
1594	.accept		= sock_no_accept,
1595	.mmap		= sock_no_mmap
1596};
1597
1598static struct proto hci_sk_proto = {
1599	.name		= "HCI",
1600	.owner		= THIS_MODULE,
1601	.obj_size	= sizeof(struct hci_pinfo)
1602};
1603
1604static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1605			   int kern)
1606{
1607	struct sock *sk;
1608
1609	BT_DBG("sock %p", sock);
1610
1611	if (sock->type != SOCK_RAW)
1612		return -ESOCKTNOSUPPORT;
1613
1614	sock->ops = &hci_sock_ops;
1615
1616	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1617	if (!sk)
1618		return -ENOMEM;
1619
1620	sock_init_data(sock, sk);
1621
1622	sock_reset_flag(sk, SOCK_ZAPPED);
1623
1624	sk->sk_protocol = protocol;
1625
1626	sock->state = SS_UNCONNECTED;
1627	sk->sk_state = BT_OPEN;
1628
1629	bt_sock_link(&hci_sk_list, sk);
1630	return 0;
1631}
1632
1633static const struct net_proto_family hci_sock_family_ops = {
1634	.family	= PF_BLUETOOTH,
1635	.owner	= THIS_MODULE,
1636	.create	= hci_sock_create,
1637};
1638
1639int __init hci_sock_init(void)
1640{
1641	int err;
1642
1643	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1644
1645	err = proto_register(&hci_sk_proto, 0);
1646	if (err < 0)
1647		return err;
1648
1649	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1650	if (err < 0) {
1651		BT_ERR("HCI socket registration failed");
1652		goto error;
1653	}
1654
1655	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1656	if (err < 0) {
1657		BT_ERR("Failed to create HCI proc file");
1658		bt_sock_unregister(BTPROTO_HCI);
1659		goto error;
1660	}
1661
1662	BT_INFO("HCI socket layer initialized");
1663
1664	return 0;
1665
1666error:
1667	proto_unregister(&hci_sk_proto);
1668	return err;
1669}
1670
1671void hci_sock_cleanup(void)
1672{
1673	bt_procfs_cleanup(&init_net, "hci");
1674	bt_sock_unregister(BTPROTO_HCI);
1675	proto_unregister(&hci_sk_proto);
1676}
v3.15
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
 
  28#include <asm/unaligned.h>
  29
  30#include <net/bluetooth/bluetooth.h>
  31#include <net/bluetooth/hci_core.h>
  32#include <net/bluetooth/hci_mon.h>
 
 
 
 
 
 
  33
  34static atomic_t monitor_promisc = ATOMIC_INIT(0);
  35
  36/* ----- HCI socket interface ----- */
  37
  38static inline int hci_test_bit(int nr, void *addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  39{
  40	return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 
 
 
 
 
 
 
 
 
 
  41}
  42
  43/* Security filter */
  44static struct hci_sec_filter hci_sec_filter = {
 
 
 
 
 
 
 
 
  45	/* Packet types */
  46	0x10,
  47	/* Events */
  48	{ 0x1000d9fe, 0x0000b00c },
  49	/* Commands */
  50	{
  51		{ 0x0 },
  52		/* OGF_LINK_CTL */
  53		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
  54		/* OGF_LINK_POLICY */
  55		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
  56		/* OGF_HOST_CTL */
  57		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
  58		/* OGF_INFO_PARAM */
  59		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
  60		/* OGF_STATUS_PARAM */
  61		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
  62	}
  63};
  64
  65static struct bt_sock_list hci_sk_list = {
  66	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
  67};
  68
  69static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
  70{
  71	struct hci_filter *flt;
  72	int flt_type, flt_event;
  73
  74	/* Apply filter */
  75	flt = &hci_pi(sk)->filter;
  76
  77	if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
  78		flt_type = 0;
  79	else
  80		flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
  81
  82	if (!test_bit(flt_type, &flt->type_mask))
  83		return true;
  84
  85	/* Extra filter for event packets only */
  86	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
  87		return false;
  88
  89	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
  90
  91	if (!hci_test_bit(flt_event, &flt->event_mask))
  92		return true;
  93
  94	/* Check filter only when opcode is set */
  95	if (!flt->opcode)
  96		return false;
  97
  98	if (flt_event == HCI_EV_CMD_COMPLETE &&
  99	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 100		return true;
 101
 102	if (flt_event == HCI_EV_CMD_STATUS &&
 103	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 104		return true;
 105
 106	return false;
 107}
 108
 109/* Send frame to RAW socket */
 110void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 111{
 112	struct sock *sk;
 113	struct sk_buff *skb_copy = NULL;
 114
 115	BT_DBG("hdev %p len %d", hdev, skb->len);
 116
 117	read_lock(&hci_sk_list.lock);
 118
 119	sk_for_each(sk, &hci_sk_list.head) {
 120		struct sk_buff *nskb;
 121
 122		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 123			continue;
 124
 125		/* Don't send frame to the socket it came from */
 126		if (skb->sk == sk)
 127			continue;
 128
 129		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 
 
 
 
 
 130			if (is_filtered_packet(sk, skb))
 131				continue;
 132		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 133			if (!bt_cb(skb)->incoming)
 134				continue;
 135			if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
 136			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
 137			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
 138				continue;
 139		} else {
 140			/* Don't send frame to other channel types */
 141			continue;
 142		}
 143
 144		if (!skb_copy) {
 145			/* Create a private copy with headroom */
 146			skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
 147			if (!skb_copy)
 148				continue;
 149
 150			/* Put type byte before the data */
 151			memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
 152		}
 153
 154		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 155		if (!nskb)
 156			continue;
 157
 158		if (sock_queue_rcv_skb(sk, nskb))
 159			kfree_skb(nskb);
 160	}
 161
 162	read_unlock(&hci_sk_list.lock);
 163
 164	kfree_skb(skb_copy);
 165}
 166
 167/* Send frame to control socket */
 168void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
 
 169{
 170	struct sock *sk;
 171
 172	BT_DBG("len %d", skb->len);
 173
 174	read_lock(&hci_sk_list.lock);
 175
 176	sk_for_each(sk, &hci_sk_list.head) {
 177		struct sk_buff *nskb;
 178
 
 
 
 
 179		/* Skip the original socket */
 180		if (sk == skip_sk)
 181			continue;
 182
 183		if (sk->sk_state != BT_BOUND)
 184			continue;
 185
 186		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 187			continue;
 188
 189		nskb = skb_clone(skb, GFP_ATOMIC);
 190		if (!nskb)
 191			continue;
 192
 193		if (sock_queue_rcv_skb(sk, nskb))
 194			kfree_skb(nskb);
 195	}
 196
 197	read_unlock(&hci_sk_list.lock);
 198}
 199
 200/* Send frame to monitor socket */
 201void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 202{
 203	struct sock *sk;
 204	struct sk_buff *skb_copy = NULL;
 
 205	__le16 opcode;
 206
 207	if (!atomic_read(&monitor_promisc))
 208		return;
 209
 210	BT_DBG("hdev %p len %d", hdev, skb->len);
 211
 212	switch (bt_cb(skb)->pkt_type) {
 213	case HCI_COMMAND_PKT:
 214		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 215		break;
 216	case HCI_EVENT_PKT:
 217		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 218		break;
 219	case HCI_ACLDATA_PKT:
 220		if (bt_cb(skb)->incoming)
 221			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 222		else
 223			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 224		break;
 225	case HCI_SCODATA_PKT:
 226		if (bt_cb(skb)->incoming)
 227			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 228		else
 229			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 230		break;
 
 
 
 231	default:
 232		return;
 233	}
 234
 235	read_lock(&hci_sk_list.lock);
 
 
 
 236
 237	sk_for_each(sk, &hci_sk_list.head) {
 238		struct sk_buff *nskb;
 239
 240		if (sk->sk_state != BT_BOUND)
 241			continue;
 242
 243		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
 244			continue;
 245
 246		if (!skb_copy) {
 247			struct hci_mon_hdr *hdr;
 248
 249			/* Create a private copy with headroom */
 250			skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
 251					       GFP_ATOMIC);
 252			if (!skb_copy)
 253				continue;
 254
 255			/* Put header before the data */
 256			hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
 257			hdr->opcode = opcode;
 258			hdr->index = cpu_to_le16(hdev->id);
 259			hdr->len = cpu_to_le16(skb->len);
 260		}
 261
 262		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 263		if (!nskb)
 264			continue;
 265
 266		if (sock_queue_rcv_skb(sk, nskb))
 267			kfree_skb(nskb);
 268	}
 269
 270	read_unlock(&hci_sk_list.lock);
 271
 
 
 272	kfree_skb(skb_copy);
 273}
 274
 275static void send_monitor_event(struct sk_buff *skb)
 276{
 277	struct sock *sk;
 278
 279	BT_DBG("len %d", skb->len);
 280
 281	read_lock(&hci_sk_list.lock);
 282
 283	sk_for_each(sk, &hci_sk_list.head) {
 284		struct sk_buff *nskb;
 285
 286		if (sk->sk_state != BT_BOUND)
 287			continue;
 288
 289		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
 290			continue;
 291
 292		nskb = skb_clone(skb, GFP_ATOMIC);
 293		if (!nskb)
 294			continue;
 295
 296		if (sock_queue_rcv_skb(sk, nskb))
 297			kfree_skb(nskb);
 298	}
 299
 300	read_unlock(&hci_sk_list.lock);
 301}
 302
 303static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 304{
 305	struct hci_mon_hdr *hdr;
 306	struct hci_mon_new_index *ni;
 
 307	struct sk_buff *skb;
 308	__le16 opcode;
 309
 310	switch (event) {
 311	case HCI_DEV_REG:
 312		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 313		if (!skb)
 314			return NULL;
 315
 316		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 317		ni->type = hdev->dev_type;
 318		ni->bus = hdev->bus;
 319		bacpy(&ni->bdaddr, &hdev->bdaddr);
 320		memcpy(ni->name, hdev->name, 8);
 321
 322		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 323		break;
 324
 325	case HCI_DEV_UNREG:
 326		skb = bt_skb_alloc(0, GFP_ATOMIC);
 327		if (!skb)
 328			return NULL;
 329
 330		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 331		break;
 332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333	default:
 334		return NULL;
 335	}
 336
 337	__net_timestamp(skb);
 338
 339	hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
 340	hdr->opcode = opcode;
 341	hdr->index = cpu_to_le16(hdev->id);
 342	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 343
 344	return skb;
 345}
 346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347static void send_monitor_replay(struct sock *sk)
 348{
 349	struct hci_dev *hdev;
 350
 351	read_lock(&hci_dev_list_lock);
 352
 353	list_for_each_entry(hdev, &hci_dev_list, list) {
 354		struct sk_buff *skb;
 355
 356		skb = create_monitor_event(hdev, HCI_DEV_REG);
 357		if (!skb)
 358			continue;
 359
 360		if (sock_queue_rcv_skb(sk, skb))
 361			kfree_skb(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 362	}
 363
 364	read_unlock(&hci_dev_list_lock);
 365}
 366
 367/* Generate internal stack event */
 368static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 369{
 370	struct hci_event_hdr *hdr;
 371	struct hci_ev_stack_internal *ev;
 372	struct sk_buff *skb;
 373
 374	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 375	if (!skb)
 376		return;
 377
 378	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
 379	hdr->evt  = HCI_EV_STACK_INTERNAL;
 380	hdr->plen = sizeof(*ev) + dlen;
 381
 382	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
 383	ev->type = type;
 384	memcpy(ev->data, data, dlen);
 385
 386	bt_cb(skb)->incoming = 1;
 387	__net_timestamp(skb);
 388
 389	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
 390	hci_send_to_sock(hdev, skb);
 391	kfree_skb(skb);
 392}
 393
 394void hci_sock_dev_event(struct hci_dev *hdev, int event)
 395{
 396	struct hci_ev_si_device ev;
 397
 398	BT_DBG("hdev %s event %d", hdev->name, event);
 399
 400	/* Send event to monitor */
 401	if (atomic_read(&monitor_promisc)) {
 402		struct sk_buff *skb;
 403
 
 404		skb = create_monitor_event(hdev, event);
 405		if (skb) {
 406			send_monitor_event(skb);
 
 407			kfree_skb(skb);
 408		}
 409	}
 410
 411	/* Send event to sockets */
 412	ev.event  = event;
 413	ev.dev_id = hdev->id;
 414	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 
 
 
 
 415
 416	if (event == HCI_DEV_UNREG) {
 417		struct sock *sk;
 418
 419		/* Detach sockets from device */
 420		read_lock(&hci_sk_list.lock);
 421		sk_for_each(sk, &hci_sk_list.head) {
 422			bh_lock_sock_nested(sk);
 423			if (hci_pi(sk)->hdev == hdev) {
 424				hci_pi(sk)->hdev = NULL;
 425				sk->sk_err = EPIPE;
 426				sk->sk_state = BT_OPEN;
 427				sk->sk_state_change(sk);
 428
 429				hci_dev_put(hdev);
 430			}
 431			bh_unlock_sock(sk);
 432		}
 433		read_unlock(&hci_sk_list.lock);
 434	}
 435}
 436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437static int hci_sock_release(struct socket *sock)
 438{
 439	struct sock *sk = sock->sk;
 440	struct hci_dev *hdev;
 441
 442	BT_DBG("sock %p sk %p", sock, sk);
 443
 444	if (!sk)
 445		return 0;
 446
 447	hdev = hci_pi(sk)->hdev;
 448
 449	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
 450		atomic_dec(&monitor_promisc);
 451
 452	bt_sock_unlink(&hci_sk_list, sk);
 453
 454	if (hdev) {
 455		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 
 
 
 
 
 
 
 
 
 
 
 456			mgmt_index_added(hdev);
 457			clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
 458			hci_dev_close(hdev->id);
 459		}
 460
 461		atomic_dec(&hdev->promisc);
 462		hci_dev_put(hdev);
 463	}
 464
 465	sock_orphan(sk);
 466
 467	skb_queue_purge(&sk->sk_receive_queue);
 468	skb_queue_purge(&sk->sk_write_queue);
 469
 470	sock_put(sk);
 471	return 0;
 472}
 473
 474static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 475{
 476	bdaddr_t bdaddr;
 477	int err;
 478
 479	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 480		return -EFAULT;
 481
 482	hci_dev_lock(hdev);
 483
 484	err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
 485
 486	hci_dev_unlock(hdev);
 487
 488	return err;
 489}
 490
 491static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 492{
 493	bdaddr_t bdaddr;
 494	int err;
 495
 496	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 497		return -EFAULT;
 498
 499	hci_dev_lock(hdev);
 500
 501	err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
 502
 503	hci_dev_unlock(hdev);
 504
 505	return err;
 506}
 507
 508/* Ioctls that require bound socket */
 509static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 510				unsigned long arg)
 511{
 512	struct hci_dev *hdev = hci_pi(sk)->hdev;
 513
 514	if (!hdev)
 515		return -EBADFD;
 516
 517	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
 518		return -EBUSY;
 519
 
 
 
 520	if (hdev->dev_type != HCI_BREDR)
 521		return -EOPNOTSUPP;
 522
 523	switch (cmd) {
 524	case HCISETRAW:
 525		if (!capable(CAP_NET_ADMIN))
 526			return -EPERM;
 527
 528		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 529			return -EPERM;
 530
 531		if (arg)
 532			set_bit(HCI_RAW, &hdev->flags);
 533		else
 534			clear_bit(HCI_RAW, &hdev->flags);
 535
 536		return 0;
 537
 538	case HCIGETCONNINFO:
 539		return hci_get_conn_info(hdev, (void __user *) arg);
 540
 541	case HCIGETAUTHINFO:
 542		return hci_get_auth_info(hdev, (void __user *) arg);
 543
 544	case HCIBLOCKADDR:
 545		if (!capable(CAP_NET_ADMIN))
 546			return -EPERM;
 547		return hci_sock_blacklist_add(hdev, (void __user *) arg);
 548
 549	case HCIUNBLOCKADDR:
 550		if (!capable(CAP_NET_ADMIN))
 551			return -EPERM;
 552		return hci_sock_blacklist_del(hdev, (void __user *) arg);
 553	}
 554
 555	return -ENOIOCTLCMD;
 556}
 557
 558static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 559			  unsigned long arg)
 560{
 561	void __user *argp = (void __user *) arg;
 562	struct sock *sk = sock->sk;
 563	int err;
 564
 565	BT_DBG("cmd %x arg %lx", cmd, arg);
 566
 567	lock_sock(sk);
 568
 569	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 570		err = -EBADFD;
 571		goto done;
 572	}
 573
 574	release_sock(sk);
 575
 576	switch (cmd) {
 577	case HCIGETDEVLIST:
 578		return hci_get_dev_list(argp);
 579
 580	case HCIGETDEVINFO:
 581		return hci_get_dev_info(argp);
 582
 583	case HCIGETCONNLIST:
 584		return hci_get_conn_list(argp);
 585
 586	case HCIDEVUP:
 587		if (!capable(CAP_NET_ADMIN))
 588			return -EPERM;
 589		return hci_dev_open(arg);
 590
 591	case HCIDEVDOWN:
 592		if (!capable(CAP_NET_ADMIN))
 593			return -EPERM;
 594		return hci_dev_close(arg);
 595
 596	case HCIDEVRESET:
 597		if (!capable(CAP_NET_ADMIN))
 598			return -EPERM;
 599		return hci_dev_reset(arg);
 600
 601	case HCIDEVRESTAT:
 602		if (!capable(CAP_NET_ADMIN))
 603			return -EPERM;
 604		return hci_dev_reset_stat(arg);
 605
 606	case HCISETSCAN:
 607	case HCISETAUTH:
 608	case HCISETENCRYPT:
 609	case HCISETPTYPE:
 610	case HCISETLINKPOL:
 611	case HCISETLINKMODE:
 612	case HCISETACLMTU:
 613	case HCISETSCOMTU:
 614		if (!capable(CAP_NET_ADMIN))
 615			return -EPERM;
 616		return hci_dev_cmd(cmd, argp);
 617
 618	case HCIINQUIRY:
 619		return hci_inquiry(argp);
 620	}
 621
 622	lock_sock(sk);
 623
 624	err = hci_sock_bound_ioctl(sk, cmd, arg);
 625
 626done:
 627	release_sock(sk);
 628	return err;
 629}
 630
 631static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 632			 int addr_len)
 633{
 634	struct sockaddr_hci haddr;
 635	struct sock *sk = sock->sk;
 636	struct hci_dev *hdev = NULL;
 637	int len, err = 0;
 638
 639	BT_DBG("sock %p sk %p", sock, sk);
 640
 641	if (!addr)
 642		return -EINVAL;
 643
 644	memset(&haddr, 0, sizeof(haddr));
 645	len = min_t(unsigned int, sizeof(haddr), addr_len);
 646	memcpy(&haddr, addr, len);
 647
 648	if (haddr.hci_family != AF_BLUETOOTH)
 649		return -EINVAL;
 650
 651	lock_sock(sk);
 652
 653	if (sk->sk_state == BT_BOUND) {
 654		err = -EALREADY;
 655		goto done;
 656	}
 657
 658	switch (haddr.hci_channel) {
 659	case HCI_CHANNEL_RAW:
 660		if (hci_pi(sk)->hdev) {
 661			err = -EALREADY;
 662			goto done;
 663		}
 664
 665		if (haddr.hci_dev != HCI_DEV_NONE) {
 666			hdev = hci_dev_get(haddr.hci_dev);
 667			if (!hdev) {
 668				err = -ENODEV;
 669				goto done;
 670			}
 671
 672			atomic_inc(&hdev->promisc);
 673		}
 674
 675		hci_pi(sk)->hdev = hdev;
 676		break;
 677
 678	case HCI_CHANNEL_USER:
 679		if (hci_pi(sk)->hdev) {
 680			err = -EALREADY;
 681			goto done;
 682		}
 683
 684		if (haddr.hci_dev == HCI_DEV_NONE) {
 685			err = -EINVAL;
 686			goto done;
 687		}
 688
 689		if (!capable(CAP_NET_ADMIN)) {
 690			err = -EPERM;
 691			goto done;
 692		}
 693
 694		hdev = hci_dev_get(haddr.hci_dev);
 695		if (!hdev) {
 696			err = -ENODEV;
 697			goto done;
 698		}
 699
 700		if (test_bit(HCI_UP, &hdev->flags) ||
 701		    test_bit(HCI_INIT, &hdev->flags) ||
 702		    test_bit(HCI_SETUP, &hdev->dev_flags)) {
 
 
 703			err = -EBUSY;
 704			hci_dev_put(hdev);
 705			goto done;
 706		}
 707
 708		if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
 709			err = -EUSERS;
 710			hci_dev_put(hdev);
 711			goto done;
 712		}
 713
 714		mgmt_index_removed(hdev);
 715
 716		err = hci_dev_open(hdev->id);
 717		if (err) {
 718			clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
 719			mgmt_index_added(hdev);
 720			hci_dev_put(hdev);
 721			goto done;
 
 
 
 
 
 
 
 
 
 
 
 722		}
 723
 724		atomic_inc(&hdev->promisc);
 725
 726		hci_pi(sk)->hdev = hdev;
 727		break;
 728
 729	case HCI_CHANNEL_CONTROL:
 730		if (haddr.hci_dev != HCI_DEV_NONE) {
 731			err = -EINVAL;
 732			goto done;
 733		}
 734
 735		if (!capable(CAP_NET_ADMIN)) {
 736			err = -EPERM;
 737			goto done;
 738		}
 739
 
 
 
 
 
 
 
 
 
 
 
 
 
 740		break;
 741
 742	case HCI_CHANNEL_MONITOR:
 743		if (haddr.hci_dev != HCI_DEV_NONE) {
 744			err = -EINVAL;
 745			goto done;
 746		}
 747
 748		if (!capable(CAP_NET_RAW)) {
 749			err = -EPERM;
 750			goto done;
 751		}
 
 752
 753		send_monitor_replay(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 754
 755		atomic_inc(&monitor_promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 756		break;
 757
 758	default:
 759		err = -EINVAL;
 760		goto done;
 761	}
 762
 763
 764	hci_pi(sk)->channel = haddr.hci_channel;
 765	sk->sk_state = BT_BOUND;
 766
 767done:
 768	release_sock(sk);
 769	return err;
 770}
 771
 772static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 773			    int *addr_len, int peer)
 774{
 775	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
 776	struct sock *sk = sock->sk;
 777	struct hci_dev *hdev;
 778	int err = 0;
 779
 780	BT_DBG("sock %p sk %p", sock, sk);
 781
 782	if (peer)
 783		return -EOPNOTSUPP;
 784
 785	lock_sock(sk);
 786
 787	hdev = hci_pi(sk)->hdev;
 788	if (!hdev) {
 789		err = -EBADFD;
 790		goto done;
 791	}
 792
 793	*addr_len = sizeof(*haddr);
 794	haddr->hci_family = AF_BLUETOOTH;
 795	haddr->hci_dev    = hdev->id;
 796	haddr->hci_channel= hci_pi(sk)->channel;
 797
 798done:
 799	release_sock(sk);
 800	return err;
 801}
 802
 803static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
 804			  struct sk_buff *skb)
 805{
 806	__u32 mask = hci_pi(sk)->cmsg_mask;
 807
 808	if (mask & HCI_CMSG_DIR) {
 809		int incoming = bt_cb(skb)->incoming;
 810		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
 811			 &incoming);
 812	}
 813
 814	if (mask & HCI_CMSG_TSTAMP) {
 815#ifdef CONFIG_COMPAT
 816		struct compat_timeval ctv;
 817#endif
 818		struct timeval tv;
 819		void *data;
 820		int len;
 821
 822		skb_get_timestamp(skb, &tv);
 823
 824		data = &tv;
 825		len = sizeof(tv);
 826#ifdef CONFIG_COMPAT
 827		if (!COMPAT_USE_64BIT_TIME &&
 828		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
 829			ctv.tv_sec = tv.tv_sec;
 830			ctv.tv_usec = tv.tv_usec;
 831			data = &ctv;
 832			len = sizeof(ctv);
 833		}
 834#endif
 835
 836		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
 837	}
 838}
 839
 840static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 841			    struct msghdr *msg, size_t len, int flags)
 842{
 843	int noblock = flags & MSG_DONTWAIT;
 844	struct sock *sk = sock->sk;
 845	struct sk_buff *skb;
 846	int copied, err;
 847
 848	BT_DBG("sock %p, sk %p", sock, sk);
 849
 850	if (flags & (MSG_OOB))
 
 
 
 851		return -EOPNOTSUPP;
 852
 853	if (sk->sk_state == BT_CLOSED)
 854		return 0;
 855
 856	skb = skb_recv_datagram(sk, flags, noblock, &err);
 857	if (!skb)
 858		return err;
 859
 860	copied = skb->len;
 861	if (len < copied) {
 862		msg->msg_flags |= MSG_TRUNC;
 863		copied = len;
 864	}
 865
 866	skb_reset_transport_header(skb);
 867	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 868
 869	switch (hci_pi(sk)->channel) {
 870	case HCI_CHANNEL_RAW:
 871		hci_sock_cmsg(sk, msg, skb);
 872		break;
 873	case HCI_CHANNEL_USER:
 874	case HCI_CHANNEL_CONTROL:
 875	case HCI_CHANNEL_MONITOR:
 876		sock_recv_timestamp(msg, sk, skb);
 877		break;
 
 
 
 
 878	}
 879
 880	skb_free_datagram(sk, skb);
 881
 882	return err ? : copied;
 883}
 884
 885static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 886			    struct msghdr *msg, size_t len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 887{
 888	struct sock *sk = sock->sk;
 
 889	struct hci_dev *hdev;
 890	struct sk_buff *skb;
 891	int err;
 892
 893	BT_DBG("sock %p sk %p", sock, sk);
 894
 895	if (msg->msg_flags & MSG_OOB)
 896		return -EOPNOTSUPP;
 897
 898	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
 899		return -EINVAL;
 900
 901	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
 902		return -EINVAL;
 903
 904	lock_sock(sk);
 905
 906	switch (hci_pi(sk)->channel) {
 907	case HCI_CHANNEL_RAW:
 908	case HCI_CHANNEL_USER:
 909		break;
 910	case HCI_CHANNEL_CONTROL:
 911		err = mgmt_control(sk, msg, len);
 912		goto done;
 913	case HCI_CHANNEL_MONITOR:
 914		err = -EOPNOTSUPP;
 915		goto done;
 
 
 
 916	default:
 917		err = -EINVAL;
 
 
 
 
 
 
 
 918		goto done;
 919	}
 920
 921	hdev = hci_pi(sk)->hdev;
 922	if (!hdev) {
 923		err = -EBADFD;
 924		goto done;
 925	}
 926
 927	if (!test_bit(HCI_UP, &hdev->flags)) {
 928		err = -ENETDOWN;
 929		goto done;
 930	}
 931
 932	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
 933	if (!skb)
 934		goto done;
 935
 936	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
 937		err = -EFAULT;
 938		goto drop;
 939	}
 940
 941	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
 942	skb_pull(skb, 1);
 943
 944	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 945		/* No permission check is needed for user channel
 946		 * since that gets enforced when binding the socket.
 947		 *
 948		 * However check that the packet type is valid.
 949		 */
 950		if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
 951		    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
 952		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
 953			err = -EINVAL;
 954			goto drop;
 955		}
 956
 957		skb_queue_tail(&hdev->raw_q, skb);
 958		queue_work(hdev->workqueue, &hdev->tx_work);
 959	} else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
 960		u16 opcode = get_unaligned_le16(skb->data);
 961		u16 ogf = hci_opcode_ogf(opcode);
 962		u16 ocf = hci_opcode_ocf(opcode);
 963
 964		if (((ogf > HCI_SFLT_MAX_OGF) ||
 965		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
 966				   &hci_sec_filter.ocf_mask[ogf])) &&
 967		    !capable(CAP_NET_RAW)) {
 968			err = -EPERM;
 969			goto drop;
 970		}
 971
 972		if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
 
 
 
 
 
 973			skb_queue_tail(&hdev->raw_q, skb);
 974			queue_work(hdev->workqueue, &hdev->tx_work);
 975		} else {
 976			/* Stand-alone HCI commands must be flaged as
 977			 * single-command requests.
 978			 */
 979			bt_cb(skb)->req.start = true;
 980
 981			skb_queue_tail(&hdev->cmd_q, skb);
 982			queue_work(hdev->workqueue, &hdev->cmd_work);
 983		}
 984	} else {
 985		if (!capable(CAP_NET_RAW)) {
 986			err = -EPERM;
 987			goto drop;
 988		}
 989
 
 
 
 
 
 
 990		skb_queue_tail(&hdev->raw_q, skb);
 991		queue_work(hdev->workqueue, &hdev->tx_work);
 992	}
 993
 994	err = len;
 995
 996done:
 997	release_sock(sk);
 998	return err;
 999
1000drop:
1001	kfree_skb(skb);
1002	goto done;
1003}
1004
1005static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1006			       char __user *optval, unsigned int len)
1007{
1008	struct hci_ufilter uf = { .opcode = 0 };
1009	struct sock *sk = sock->sk;
1010	int err = 0, opt = 0;
1011
1012	BT_DBG("sk %p, opt %d", sk, optname);
1013
1014	lock_sock(sk);
1015
1016	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1017		err = -EBADFD;
1018		goto done;
1019	}
1020
1021	switch (optname) {
1022	case HCI_DATA_DIR:
1023		if (get_user(opt, (int __user *)optval)) {
1024			err = -EFAULT;
1025			break;
1026		}
1027
1028		if (opt)
1029			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1030		else
1031			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1032		break;
1033
1034	case HCI_TIME_STAMP:
1035		if (get_user(opt, (int __user *)optval)) {
1036			err = -EFAULT;
1037			break;
1038		}
1039
1040		if (opt)
1041			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1042		else
1043			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1044		break;
1045
1046	case HCI_FILTER:
1047		{
1048			struct hci_filter *f = &hci_pi(sk)->filter;
1049
1050			uf.type_mask = f->type_mask;
1051			uf.opcode    = f->opcode;
1052			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1053			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1054		}
1055
1056		len = min_t(unsigned int, len, sizeof(uf));
1057		if (copy_from_user(&uf, optval, len)) {
1058			err = -EFAULT;
1059			break;
1060		}
1061
1062		if (!capable(CAP_NET_RAW)) {
1063			uf.type_mask &= hci_sec_filter.type_mask;
1064			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1065			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1066		}
1067
1068		{
1069			struct hci_filter *f = &hci_pi(sk)->filter;
1070
1071			f->type_mask = uf.type_mask;
1072			f->opcode    = uf.opcode;
1073			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1074			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1075		}
1076		break;
1077
1078	default:
1079		err = -ENOPROTOOPT;
1080		break;
1081	}
1082
1083done:
1084	release_sock(sk);
1085	return err;
1086}
1087
1088static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1089			       char __user *optval, int __user *optlen)
1090{
1091	struct hci_ufilter uf;
1092	struct sock *sk = sock->sk;
1093	int len, opt, err = 0;
1094
1095	BT_DBG("sk %p, opt %d", sk, optname);
1096
1097	if (get_user(len, optlen))
1098		return -EFAULT;
1099
1100	lock_sock(sk);
1101
1102	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1103		err = -EBADFD;
1104		goto done;
1105	}
1106
1107	switch (optname) {
1108	case HCI_DATA_DIR:
1109		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1110			opt = 1;
1111		else
1112			opt = 0;
1113
1114		if (put_user(opt, optval))
1115			err = -EFAULT;
1116		break;
1117
1118	case HCI_TIME_STAMP:
1119		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1120			opt = 1;
1121		else
1122			opt = 0;
1123
1124		if (put_user(opt, optval))
1125			err = -EFAULT;
1126		break;
1127
1128	case HCI_FILTER:
1129		{
1130			struct hci_filter *f = &hci_pi(sk)->filter;
1131
1132			memset(&uf, 0, sizeof(uf));
1133			uf.type_mask = f->type_mask;
1134			uf.opcode    = f->opcode;
1135			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1136			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1137		}
1138
1139		len = min_t(unsigned int, len, sizeof(uf));
1140		if (copy_to_user(optval, &uf, len))
1141			err = -EFAULT;
1142		break;
1143
1144	default:
1145		err = -ENOPROTOOPT;
1146		break;
1147	}
1148
1149done:
1150	release_sock(sk);
1151	return err;
1152}
1153
1154static const struct proto_ops hci_sock_ops = {
1155	.family		= PF_BLUETOOTH,
1156	.owner		= THIS_MODULE,
1157	.release	= hci_sock_release,
1158	.bind		= hci_sock_bind,
1159	.getname	= hci_sock_getname,
1160	.sendmsg	= hci_sock_sendmsg,
1161	.recvmsg	= hci_sock_recvmsg,
1162	.ioctl		= hci_sock_ioctl,
1163	.poll		= datagram_poll,
1164	.listen		= sock_no_listen,
1165	.shutdown	= sock_no_shutdown,
1166	.setsockopt	= hci_sock_setsockopt,
1167	.getsockopt	= hci_sock_getsockopt,
1168	.connect	= sock_no_connect,
1169	.socketpair	= sock_no_socketpair,
1170	.accept		= sock_no_accept,
1171	.mmap		= sock_no_mmap
1172};
1173
1174static struct proto hci_sk_proto = {
1175	.name		= "HCI",
1176	.owner		= THIS_MODULE,
1177	.obj_size	= sizeof(struct hci_pinfo)
1178};
1179
1180static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1181			   int kern)
1182{
1183	struct sock *sk;
1184
1185	BT_DBG("sock %p", sock);
1186
1187	if (sock->type != SOCK_RAW)
1188		return -ESOCKTNOSUPPORT;
1189
1190	sock->ops = &hci_sock_ops;
1191
1192	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1193	if (!sk)
1194		return -ENOMEM;
1195
1196	sock_init_data(sock, sk);
1197
1198	sock_reset_flag(sk, SOCK_ZAPPED);
1199
1200	sk->sk_protocol = protocol;
1201
1202	sock->state = SS_UNCONNECTED;
1203	sk->sk_state = BT_OPEN;
1204
1205	bt_sock_link(&hci_sk_list, sk);
1206	return 0;
1207}
1208
1209static const struct net_proto_family hci_sock_family_ops = {
1210	.family	= PF_BLUETOOTH,
1211	.owner	= THIS_MODULE,
1212	.create	= hci_sock_create,
1213};
1214
1215int __init hci_sock_init(void)
1216{
1217	int err;
 
 
1218
1219	err = proto_register(&hci_sk_proto, 0);
1220	if (err < 0)
1221		return err;
1222
1223	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1224	if (err < 0) {
1225		BT_ERR("HCI socket registration failed");
1226		goto error;
1227	}
1228
1229	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1230	if (err < 0) {
1231		BT_ERR("Failed to create HCI proc file");
1232		bt_sock_unregister(BTPROTO_HCI);
1233		goto error;
1234	}
1235
1236	BT_INFO("HCI socket layer initialized");
1237
1238	return 0;
1239
1240error:
1241	proto_unregister(&hci_sk_proto);
1242	return err;
1243}
1244
1245void hci_sock_cleanup(void)
1246{
1247	bt_procfs_cleanup(&init_net, "hci");
1248	bt_sock_unregister(BTPROTO_HCI);
1249	proto_unregister(&hci_sk_proto);
1250}