Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26#include <linux/compat.h>
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u8              cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
  60	__u16             mtu;
  61};
  62
  63static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
  64{
  65	struct hci_dev *hdev = hci_pi(sk)->hdev;
  66
  67	if (!hdev)
  68		return ERR_PTR(-EBADFD);
  69	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
  70		return ERR_PTR(-EPIPE);
  71	return hdev;
  72}
  73
  74void hci_sock_set_flag(struct sock *sk, int nr)
  75{
  76	set_bit(nr, &hci_pi(sk)->flags);
  77}
  78
  79void hci_sock_clear_flag(struct sock *sk, int nr)
  80{
  81	clear_bit(nr, &hci_pi(sk)->flags);
  82}
  83
  84int hci_sock_test_flag(struct sock *sk, int nr)
  85{
  86	return test_bit(nr, &hci_pi(sk)->flags);
  87}
  88
  89unsigned short hci_sock_get_channel(struct sock *sk)
  90{
  91	return hci_pi(sk)->channel;
  92}
  93
  94u32 hci_sock_get_cookie(struct sock *sk)
  95{
  96	return hci_pi(sk)->cookie;
  97}
  98
  99static bool hci_sock_gen_cookie(struct sock *sk)
 100{
 101	int id = hci_pi(sk)->cookie;
 102
 103	if (!id) {
 104		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
 105		if (id < 0)
 106			id = 0xffffffff;
 107
 108		hci_pi(sk)->cookie = id;
 109		get_task_comm(hci_pi(sk)->comm, current);
 110		return true;
 111	}
 112
 113	return false;
 114}
 115
 116static void hci_sock_free_cookie(struct sock *sk)
 117{
 118	int id = hci_pi(sk)->cookie;
 119
 120	if (id) {
 121		hci_pi(sk)->cookie = 0xffffffff;
 122		ida_simple_remove(&sock_cookie_ida, id);
 123	}
 124}
 125
 126static inline int hci_test_bit(int nr, const void *addr)
 127{
 128	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 129}
 130
 131/* Security filter */
 132#define HCI_SFLT_MAX_OGF  5
 133
 134struct hci_sec_filter {
 135	__u32 type_mask;
 136	__u32 event_mask[2];
 137	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 138};
 139
 140static const struct hci_sec_filter hci_sec_filter = {
 141	/* Packet types */
 142	0x10,
 143	/* Events */
 144	{ 0x1000d9fe, 0x0000b00c },
 145	/* Commands */
 146	{
 147		{ 0x0 },
 148		/* OGF_LINK_CTL */
 149		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 150		/* OGF_LINK_POLICY */
 151		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 152		/* OGF_HOST_CTL */
 153		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 154		/* OGF_INFO_PARAM */
 155		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 156		/* OGF_STATUS_PARAM */
 157		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 158	}
 159};
 160
 161static struct bt_sock_list hci_sk_list = {
 162	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 163};
 164
 165static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 166{
 167	struct hci_filter *flt;
 168	int flt_type, flt_event;
 169
 170	/* Apply filter */
 171	flt = &hci_pi(sk)->filter;
 172
 173	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 174
 175	if (!test_bit(flt_type, &flt->type_mask))
 176		return true;
 177
 178	/* Extra filter for event packets only */
 179	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 180		return false;
 181
 182	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 183
 184	if (!hci_test_bit(flt_event, &flt->event_mask))
 185		return true;
 186
 187	/* Check filter only when opcode is set */
 188	if (!flt->opcode)
 189		return false;
 190
 191	if (flt_event == HCI_EV_CMD_COMPLETE &&
 192	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 193		return true;
 194
 195	if (flt_event == HCI_EV_CMD_STATUS &&
 196	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 197		return true;
 198
 199	return false;
 200}
 201
 202/* Send frame to RAW socket */
 203void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 204{
 205	struct sock *sk;
 206	struct sk_buff *skb_copy = NULL;
 207
 208	BT_DBG("hdev %p len %d", hdev, skb->len);
 209
 210	read_lock(&hci_sk_list.lock);
 211
 212	sk_for_each(sk, &hci_sk_list.head) {
 213		struct sk_buff *nskb;
 214
 215		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 216			continue;
 217
 218		/* Don't send frame to the socket it came from */
 219		if (skb->sk == sk)
 220			continue;
 221
 222		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 223			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 224			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 225			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 226			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 227			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 228				continue;
 229			if (is_filtered_packet(sk, skb))
 230				continue;
 231		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 232			if (!bt_cb(skb)->incoming)
 233				continue;
 234			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 235			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 236			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 237			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 238				continue;
 239		} else {
 240			/* Don't send frame to other channel types */
 241			continue;
 242		}
 243
 244		if (!skb_copy) {
 245			/* Create a private copy with headroom */
 246			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 247			if (!skb_copy)
 248				continue;
 249
 250			/* Put type byte before the data */
 251			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 252		}
 253
 254		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 255		if (!nskb)
 256			continue;
 257
 258		if (sock_queue_rcv_skb(sk, nskb))
 259			kfree_skb(nskb);
 260	}
 261
 262	read_unlock(&hci_sk_list.lock);
 263
 264	kfree_skb(skb_copy);
 265}
 266
 267static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
 268{
 269	struct scm_creds *creds;
 270
 271	if (!sk || WARN_ON(!skb))
 272		return;
 273
 274	creds = &bt_cb(skb)->creds;
 275
 276	/* Check if peer credentials is set */
 277	if (!sk->sk_peer_pid) {
 278		/* Check if parent peer credentials is set */
 279		if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
 280			sk = bt_sk(sk)->parent;
 281		else
 282			return;
 283	}
 284
 285	/* Check if scm_creds already set */
 286	if (creds->pid == pid_vnr(sk->sk_peer_pid))
 287		return;
 288
 289	memset(creds, 0, sizeof(*creds));
 290
 291	creds->pid = pid_vnr(sk->sk_peer_pid);
 292	if (sk->sk_peer_cred) {
 293		creds->uid = sk->sk_peer_cred->uid;
 294		creds->gid = sk->sk_peer_cred->gid;
 295	}
 296}
 297
 298static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
 299{
 300	struct sk_buff *nskb;
 301
 302	if (!skb)
 303		return NULL;
 304
 305	nskb = skb_clone(skb, GFP_ATOMIC);
 306	if (!nskb)
 307		return NULL;
 308
 309	hci_sock_copy_creds(skb->sk, nskb);
 310
 311	return nskb;
 312}
 313
 314/* Send frame to sockets with specific channel */
 315static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 316				  int flag, struct sock *skip_sk)
 317{
 318	struct sock *sk;
 319
 320	BT_DBG("channel %u len %d", channel, skb->len);
 321
 
 
 322	sk_for_each(sk, &hci_sk_list.head) {
 323		struct sk_buff *nskb;
 324
 325		/* Ignore socket without the flag set */
 326		if (!hci_sock_test_flag(sk, flag))
 327			continue;
 328
 329		/* Skip the original socket */
 330		if (sk == skip_sk)
 331			continue;
 332
 333		if (sk->sk_state != BT_BOUND)
 334			continue;
 335
 336		if (hci_pi(sk)->channel != channel)
 337			continue;
 338
 339		nskb = hci_skb_clone(skb);
 340		if (!nskb)
 341			continue;
 342
 343		if (sock_queue_rcv_skb(sk, nskb))
 344			kfree_skb(nskb);
 345	}
 346
 347}
 348
 349void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 350			 int flag, struct sock *skip_sk)
 351{
 352	read_lock(&hci_sk_list.lock);
 353	__hci_send_to_channel(channel, skb, flag, skip_sk);
 354	read_unlock(&hci_sk_list.lock);
 355}
 356
 357/* Send frame to monitor socket */
 358void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 359{
 360	struct sk_buff *skb_copy = NULL;
 361	struct hci_mon_hdr *hdr;
 362	__le16 opcode;
 363
 364	if (!atomic_read(&monitor_promisc))
 365		return;
 366
 367	BT_DBG("hdev %p len %d", hdev, skb->len);
 368
 369	switch (hci_skb_pkt_type(skb)) {
 370	case HCI_COMMAND_PKT:
 371		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 372		break;
 373	case HCI_EVENT_PKT:
 374		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 375		break;
 376	case HCI_ACLDATA_PKT:
 377		if (bt_cb(skb)->incoming)
 378			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 379		else
 380			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 381		break;
 382	case HCI_SCODATA_PKT:
 383		if (bt_cb(skb)->incoming)
 384			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 385		else
 386			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 387		break;
 388	case HCI_ISODATA_PKT:
 389		if (bt_cb(skb)->incoming)
 390			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
 391		else
 392			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
 393		break;
 394	case HCI_DIAG_PKT:
 395		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 396		break;
 397	default:
 398		return;
 399	}
 400
 401	/* Create a private copy with headroom */
 402	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 403	if (!skb_copy)
 404		return;
 405
 406	hci_sock_copy_creds(skb->sk, skb_copy);
 407
 408	/* Put header before the data */
 409	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
 410	hdr->opcode = opcode;
 411	hdr->index = cpu_to_le16(hdev->id);
 412	hdr->len = cpu_to_le16(skb->len);
 413
 414	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 415			    HCI_SOCK_TRUSTED, NULL);
 416	kfree_skb(skb_copy);
 417}
 418
 419void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 420				 void *data, u16 data_len, ktime_t tstamp,
 421				 int flag, struct sock *skip_sk)
 422{
 423	struct sock *sk;
 424	__le16 index;
 425
 426	if (hdev)
 427		index = cpu_to_le16(hdev->id);
 428	else
 429		index = cpu_to_le16(MGMT_INDEX_NONE);
 430
 431	read_lock(&hci_sk_list.lock);
 432
 433	sk_for_each(sk, &hci_sk_list.head) {
 434		struct hci_mon_hdr *hdr;
 435		struct sk_buff *skb;
 436
 437		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 438			continue;
 439
 440		/* Ignore socket without the flag set */
 441		if (!hci_sock_test_flag(sk, flag))
 442			continue;
 443
 444		/* Skip the original socket */
 445		if (sk == skip_sk)
 446			continue;
 447
 448		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 449		if (!skb)
 450			continue;
 451
 452		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 453		put_unaligned_le16(event, skb_put(skb, 2));
 454
 455		if (data)
 456			skb_put_data(skb, data, data_len);
 457
 458		skb->tstamp = tstamp;
 459
 460		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 461		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 462		hdr->index = index;
 463		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 464
 465		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 466				      HCI_SOCK_TRUSTED, NULL);
 467		kfree_skb(skb);
 468	}
 469
 470	read_unlock(&hci_sk_list.lock);
 471}
 472
 473static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 474{
 475	struct hci_mon_hdr *hdr;
 476	struct hci_mon_new_index *ni;
 477	struct hci_mon_index_info *ii;
 478	struct sk_buff *skb;
 479	__le16 opcode;
 480
 481	switch (event) {
 482	case HCI_DEV_REG:
 483		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 484		if (!skb)
 485			return NULL;
 486
 487		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 488		ni->type = hdev->dev_type;
 489		ni->bus = hdev->bus;
 490		bacpy(&ni->bdaddr, &hdev->bdaddr);
 491		memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
 492			       strnlen(hdev->name, sizeof(ni->name)), '\0');
 493
 494		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 495		break;
 496
 497	case HCI_DEV_UNREG:
 498		skb = bt_skb_alloc(0, GFP_ATOMIC);
 499		if (!skb)
 500			return NULL;
 501
 502		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 503		break;
 504
 505	case HCI_DEV_SETUP:
 506		if (hdev->manufacturer == 0xffff)
 507			return NULL;
 508		fallthrough;
 
 509
 510	case HCI_DEV_UP:
 511		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 512		if (!skb)
 513			return NULL;
 514
 515		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 516		bacpy(&ii->bdaddr, &hdev->bdaddr);
 517		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 518
 519		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 520		break;
 521
 522	case HCI_DEV_OPEN:
 523		skb = bt_skb_alloc(0, GFP_ATOMIC);
 524		if (!skb)
 525			return NULL;
 526
 527		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 528		break;
 529
 530	case HCI_DEV_CLOSE:
 531		skb = bt_skb_alloc(0, GFP_ATOMIC);
 532		if (!skb)
 533			return NULL;
 534
 535		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 536		break;
 537
 538	default:
 539		return NULL;
 540	}
 541
 542	__net_timestamp(skb);
 543
 544	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 545	hdr->opcode = opcode;
 546	hdr->index = cpu_to_le16(hdev->id);
 547	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 548
 549	return skb;
 550}
 551
 552static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 553{
 554	struct hci_mon_hdr *hdr;
 555	struct sk_buff *skb;
 556	u16 format;
 557	u8 ver[3];
 558	u32 flags;
 559
 560	/* No message needed when cookie is not present */
 561	if (!hci_pi(sk)->cookie)
 562		return NULL;
 563
 564	switch (hci_pi(sk)->channel) {
 565	case HCI_CHANNEL_RAW:
 566		format = 0x0000;
 567		ver[0] = BT_SUBSYS_VERSION;
 568		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 569		break;
 570	case HCI_CHANNEL_USER:
 571		format = 0x0001;
 572		ver[0] = BT_SUBSYS_VERSION;
 573		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 574		break;
 575	case HCI_CHANNEL_CONTROL:
 576		format = 0x0002;
 577		mgmt_fill_version_info(ver);
 578		break;
 579	default:
 580		/* No message for unsupported format */
 581		return NULL;
 582	}
 583
 584	skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
 585	if (!skb)
 586		return NULL;
 587
 588	hci_sock_copy_creds(sk, skb);
 589
 590	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 591
 592	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 593	put_unaligned_le16(format, skb_put(skb, 2));
 594	skb_put_data(skb, ver, sizeof(ver));
 595	put_unaligned_le32(flags, skb_put(skb, 4));
 596	skb_put_u8(skb, TASK_COMM_LEN);
 597	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
 598
 599	__net_timestamp(skb);
 600
 601	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 602	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 603	if (hci_pi(sk)->hdev)
 604		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 605	else
 606		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 607	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 608
 609	return skb;
 610}
 611
 612static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 613{
 614	struct hci_mon_hdr *hdr;
 615	struct sk_buff *skb;
 616
 617	/* No message needed when cookie is not present */
 618	if (!hci_pi(sk)->cookie)
 619		return NULL;
 620
 621	switch (hci_pi(sk)->channel) {
 622	case HCI_CHANNEL_RAW:
 623	case HCI_CHANNEL_USER:
 624	case HCI_CHANNEL_CONTROL:
 625		break;
 626	default:
 627		/* No message for unsupported format */
 628		return NULL;
 629	}
 630
 631	skb = bt_skb_alloc(4, GFP_ATOMIC);
 632	if (!skb)
 633		return NULL;
 634
 635	hci_sock_copy_creds(sk, skb);
 636
 637	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 638
 639	__net_timestamp(skb);
 640
 641	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 642	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 643	if (hci_pi(sk)->hdev)
 644		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 645	else
 646		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 647	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 648
 649	return skb;
 650}
 651
 652static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 653						   u16 opcode, u16 len,
 654						   const void *buf)
 655{
 656	struct hci_mon_hdr *hdr;
 657	struct sk_buff *skb;
 658
 659	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 660	if (!skb)
 661		return NULL;
 662
 663	hci_sock_copy_creds(sk, skb);
 664
 665	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 666	put_unaligned_le16(opcode, skb_put(skb, 2));
 667
 668	if (buf)
 669		skb_put_data(skb, buf, len);
 670
 671	__net_timestamp(skb);
 672
 673	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 674	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 675	hdr->index = cpu_to_le16(index);
 676	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 677
 678	return skb;
 679}
 680
 681static void __printf(2, 3)
 682send_monitor_note(struct sock *sk, const char *fmt, ...)
 683{
 684	size_t len;
 685	struct hci_mon_hdr *hdr;
 686	struct sk_buff *skb;
 687	va_list args;
 688
 689	va_start(args, fmt);
 690	len = vsnprintf(NULL, 0, fmt, args);
 691	va_end(args);
 692
 693	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 694	if (!skb)
 695		return;
 696
 697	hci_sock_copy_creds(sk, skb);
 698
 699	va_start(args, fmt);
 700	vsprintf(skb_put(skb, len), fmt, args);
 701	*(u8 *)skb_put(skb, 1) = 0;
 702	va_end(args);
 703
 704	__net_timestamp(skb);
 705
 706	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 707	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 708	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 709	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 710
 711	if (sock_queue_rcv_skb(sk, skb))
 712		kfree_skb(skb);
 713}
 714
 715static void send_monitor_replay(struct sock *sk)
 716{
 717	struct hci_dev *hdev;
 718
 719	read_lock(&hci_dev_list_lock);
 720
 721	list_for_each_entry(hdev, &hci_dev_list, list) {
 722		struct sk_buff *skb;
 723
 724		skb = create_monitor_event(hdev, HCI_DEV_REG);
 725		if (!skb)
 726			continue;
 727
 728		if (sock_queue_rcv_skb(sk, skb))
 729			kfree_skb(skb);
 730
 731		if (!test_bit(HCI_RUNNING, &hdev->flags))
 732			continue;
 733
 734		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 735		if (!skb)
 736			continue;
 737
 738		if (sock_queue_rcv_skb(sk, skb))
 739			kfree_skb(skb);
 740
 741		if (test_bit(HCI_UP, &hdev->flags))
 742			skb = create_monitor_event(hdev, HCI_DEV_UP);
 743		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 744			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 745		else
 746			skb = NULL;
 747
 748		if (skb) {
 749			if (sock_queue_rcv_skb(sk, skb))
 750				kfree_skb(skb);
 751		}
 752	}
 753
 754	read_unlock(&hci_dev_list_lock);
 755}
 756
 757static void send_monitor_control_replay(struct sock *mon_sk)
 758{
 759	struct sock *sk;
 760
 761	read_lock(&hci_sk_list.lock);
 762
 763	sk_for_each(sk, &hci_sk_list.head) {
 764		struct sk_buff *skb;
 765
 766		skb = create_monitor_ctrl_open(sk);
 767		if (!skb)
 768			continue;
 769
 770		if (sock_queue_rcv_skb(mon_sk, skb))
 771			kfree_skb(skb);
 772	}
 773
 774	read_unlock(&hci_sk_list.lock);
 775}
 776
 777/* Generate internal stack event */
 778static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 779{
 780	struct hci_event_hdr *hdr;
 781	struct hci_ev_stack_internal *ev;
 782	struct sk_buff *skb;
 783
 784	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 785	if (!skb)
 786		return;
 787
 788	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
 789	hdr->evt  = HCI_EV_STACK_INTERNAL;
 790	hdr->plen = sizeof(*ev) + dlen;
 791
 792	ev = skb_put(skb, sizeof(*ev) + dlen);
 793	ev->type = type;
 794	memcpy(ev->data, data, dlen);
 795
 796	bt_cb(skb)->incoming = 1;
 797	__net_timestamp(skb);
 798
 799	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 800	hci_send_to_sock(hdev, skb);
 801	kfree_skb(skb);
 802}
 803
 804void hci_sock_dev_event(struct hci_dev *hdev, int event)
 805{
 806	BT_DBG("hdev %s event %d", hdev->name, event);
 807
 808	if (atomic_read(&monitor_promisc)) {
 809		struct sk_buff *skb;
 810
 811		/* Send event to monitor */
 812		skb = create_monitor_event(hdev, event);
 813		if (skb) {
 814			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 815					    HCI_SOCK_TRUSTED, NULL);
 816			kfree_skb(skb);
 817		}
 818	}
 819
 820	if (event <= HCI_DEV_DOWN) {
 821		struct hci_ev_si_device ev;
 822
 823		/* Send event to sockets */
 824		ev.event  = event;
 825		ev.dev_id = hdev->id;
 826		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 827	}
 828
 829	if (event == HCI_DEV_UNREG) {
 830		struct sock *sk;
 831
 832		/* Wake up sockets using this dead device */
 833		read_lock(&hci_sk_list.lock);
 834		sk_for_each(sk, &hci_sk_list.head) {
 
 835			if (hci_pi(sk)->hdev == hdev) {
 
 836				sk->sk_err = EPIPE;
 
 837				sk->sk_state_change(sk);
 
 
 838			}
 
 839		}
 840		read_unlock(&hci_sk_list.lock);
 841	}
 842}
 843
 844static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 845{
 846	struct hci_mgmt_chan *c;
 847
 848	list_for_each_entry(c, &mgmt_chan_list, list) {
 849		if (c->channel == channel)
 850			return c;
 851	}
 852
 853	return NULL;
 854}
 855
 856static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 857{
 858	struct hci_mgmt_chan *c;
 859
 860	mutex_lock(&mgmt_chan_list_lock);
 861	c = __hci_mgmt_chan_find(channel);
 862	mutex_unlock(&mgmt_chan_list_lock);
 863
 864	return c;
 865}
 866
 867int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 868{
 869	if (c->channel < HCI_CHANNEL_CONTROL)
 870		return -EINVAL;
 871
 872	mutex_lock(&mgmt_chan_list_lock);
 873	if (__hci_mgmt_chan_find(c->channel)) {
 874		mutex_unlock(&mgmt_chan_list_lock);
 875		return -EALREADY;
 876	}
 877
 878	list_add_tail(&c->list, &mgmt_chan_list);
 879
 880	mutex_unlock(&mgmt_chan_list_lock);
 881
 882	return 0;
 883}
 884EXPORT_SYMBOL(hci_mgmt_chan_register);
 885
 886void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 887{
 888	mutex_lock(&mgmt_chan_list_lock);
 889	list_del(&c->list);
 890	mutex_unlock(&mgmt_chan_list_lock);
 891}
 892EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 893
 894static int hci_sock_release(struct socket *sock)
 895{
 896	struct sock *sk = sock->sk;
 897	struct hci_dev *hdev;
 898	struct sk_buff *skb;
 899
 900	BT_DBG("sock %p sk %p", sock, sk);
 901
 902	if (!sk)
 903		return 0;
 904
 905	lock_sock(sk);
 906
 907	switch (hci_pi(sk)->channel) {
 908	case HCI_CHANNEL_MONITOR:
 909		atomic_dec(&monitor_promisc);
 910		break;
 911	case HCI_CHANNEL_RAW:
 912	case HCI_CHANNEL_USER:
 913	case HCI_CHANNEL_CONTROL:
 914		/* Send event to monitor */
 915		skb = create_monitor_ctrl_close(sk);
 916		if (skb) {
 917			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 918					    HCI_SOCK_TRUSTED, NULL);
 919			kfree_skb(skb);
 920		}
 921
 922		hci_sock_free_cookie(sk);
 923		break;
 924	}
 925
 926	bt_sock_unlink(&hci_sk_list, sk);
 927
 928	hdev = hci_pi(sk)->hdev;
 929	if (hdev) {
 930		if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
 931		    !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
 932			/* When releasing a user channel exclusive access,
 933			 * call hci_dev_do_close directly instead of calling
 934			 * hci_dev_close to ensure the exclusive access will
 935			 * be released and the controller brought back down.
 936			 *
 937			 * The checking of HCI_AUTO_OFF is not needed in this
 938			 * case since it will have been cleared already when
 939			 * opening the user channel.
 940			 *
 941			 * Make sure to also check that we haven't already
 942			 * unregistered since all the cleanup will have already
 943			 * been complete and hdev will get released when we put
 944			 * below.
 945			 */
 946			hci_dev_do_close(hdev);
 947			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 948			mgmt_index_added(hdev);
 949		}
 950
 951		atomic_dec(&hdev->promisc);
 952		hci_dev_put(hdev);
 953	}
 954
 955	sock_orphan(sk);
 956	release_sock(sk);
 
 
 
 957	sock_put(sk);
 958	return 0;
 959}
 960
 961static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
 962{
 963	bdaddr_t bdaddr;
 964	int err;
 965
 966	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 967		return -EFAULT;
 968
 969	hci_dev_lock(hdev);
 970
 971	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 972
 973	hci_dev_unlock(hdev);
 974
 975	return err;
 976}
 977
 978static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
 979{
 980	bdaddr_t bdaddr;
 981	int err;
 982
 983	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 984		return -EFAULT;
 985
 986	hci_dev_lock(hdev);
 987
 988	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 989
 990	hci_dev_unlock(hdev);
 991
 992	return err;
 993}
 994
 995/* Ioctls that require bound socket */
 996static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 997				unsigned long arg)
 998{
 999	struct hci_dev *hdev = hci_hdev_from_sock(sk);
1000
1001	if (IS_ERR(hdev))
1002		return PTR_ERR(hdev);
1003
1004	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1005		return -EBUSY;
1006
1007	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1008		return -EOPNOTSUPP;
1009
1010	if (hdev->dev_type != HCI_PRIMARY)
1011		return -EOPNOTSUPP;
1012
1013	switch (cmd) {
1014	case HCISETRAW:
1015		if (!capable(CAP_NET_ADMIN))
1016			return -EPERM;
1017		return -EOPNOTSUPP;
1018
1019	case HCIGETCONNINFO:
1020		return hci_get_conn_info(hdev, (void __user *)arg);
1021
1022	case HCIGETAUTHINFO:
1023		return hci_get_auth_info(hdev, (void __user *)arg);
1024
1025	case HCIBLOCKADDR:
1026		if (!capable(CAP_NET_ADMIN))
1027			return -EPERM;
1028		return hci_sock_reject_list_add(hdev, (void __user *)arg);
1029
1030	case HCIUNBLOCKADDR:
1031		if (!capable(CAP_NET_ADMIN))
1032			return -EPERM;
1033		return hci_sock_reject_list_del(hdev, (void __user *)arg);
1034	}
1035
1036	return -ENOIOCTLCMD;
1037}
1038
1039static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1040			  unsigned long arg)
1041{
1042	void __user *argp = (void __user *)arg;
1043	struct sock *sk = sock->sk;
1044	int err;
1045
1046	BT_DBG("cmd %x arg %lx", cmd, arg);
1047
1048	/* Make sure the cmd is valid before doing anything */
1049	switch (cmd) {
1050	case HCIGETDEVLIST:
1051	case HCIGETDEVINFO:
1052	case HCIGETCONNLIST:
1053	case HCIDEVUP:
1054	case HCIDEVDOWN:
1055	case HCIDEVRESET:
1056	case HCIDEVRESTAT:
1057	case HCISETSCAN:
1058	case HCISETAUTH:
1059	case HCISETENCRYPT:
1060	case HCISETPTYPE:
1061	case HCISETLINKPOL:
1062	case HCISETLINKMODE:
1063	case HCISETACLMTU:
1064	case HCISETSCOMTU:
1065	case HCIINQUIRY:
1066	case HCISETRAW:
1067	case HCIGETCONNINFO:
1068	case HCIGETAUTHINFO:
1069	case HCIBLOCKADDR:
1070	case HCIUNBLOCKADDR:
1071		break;
1072	default:
1073		return -ENOIOCTLCMD;
1074	}
1075
1076	lock_sock(sk);
1077
1078	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1079		err = -EBADFD;
1080		goto done;
1081	}
1082
1083	/* When calling an ioctl on an unbound raw socket, then ensure
1084	 * that the monitor gets informed. Ensure that the resulting event
1085	 * is only send once by checking if the cookie exists or not. The
1086	 * socket cookie will be only ever generated once for the lifetime
1087	 * of a given socket.
1088	 */
1089	if (hci_sock_gen_cookie(sk)) {
1090		struct sk_buff *skb;
1091
1092		/* Perform careful checks before setting the HCI_SOCK_TRUSTED
1093		 * flag. Make sure that not only the current task but also
1094		 * the socket opener has the required capability, since
1095		 * privileged programs can be tricked into making ioctl calls
1096		 * on HCI sockets, and the socket should not be marked as
1097		 * trusted simply because the ioctl caller is privileged.
1098		 */
1099		if (sk_capable(sk, CAP_NET_ADMIN))
1100			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1101
1102		/* Send event to monitor */
1103		skb = create_monitor_ctrl_open(sk);
1104		if (skb) {
1105			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1106					    HCI_SOCK_TRUSTED, NULL);
1107			kfree_skb(skb);
1108		}
1109	}
1110
1111	release_sock(sk);
1112
1113	switch (cmd) {
1114	case HCIGETDEVLIST:
1115		return hci_get_dev_list(argp);
1116
1117	case HCIGETDEVINFO:
1118		return hci_get_dev_info(argp);
1119
1120	case HCIGETCONNLIST:
1121		return hci_get_conn_list(argp);
1122
1123	case HCIDEVUP:
1124		if (!capable(CAP_NET_ADMIN))
1125			return -EPERM;
1126		return hci_dev_open(arg);
1127
1128	case HCIDEVDOWN:
1129		if (!capable(CAP_NET_ADMIN))
1130			return -EPERM;
1131		return hci_dev_close(arg);
1132
1133	case HCIDEVRESET:
1134		if (!capable(CAP_NET_ADMIN))
1135			return -EPERM;
1136		return hci_dev_reset(arg);
1137
1138	case HCIDEVRESTAT:
1139		if (!capable(CAP_NET_ADMIN))
1140			return -EPERM;
1141		return hci_dev_reset_stat(arg);
1142
1143	case HCISETSCAN:
1144	case HCISETAUTH:
1145	case HCISETENCRYPT:
1146	case HCISETPTYPE:
1147	case HCISETLINKPOL:
1148	case HCISETLINKMODE:
1149	case HCISETACLMTU:
1150	case HCISETSCOMTU:
1151		if (!capable(CAP_NET_ADMIN))
1152			return -EPERM;
1153		return hci_dev_cmd(cmd, argp);
1154
1155	case HCIINQUIRY:
1156		return hci_inquiry(argp);
1157	}
1158
1159	lock_sock(sk);
1160
1161	err = hci_sock_bound_ioctl(sk, cmd, arg);
1162
1163done:
1164	release_sock(sk);
1165	return err;
1166}
1167
1168#ifdef CONFIG_COMPAT
1169static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1170				 unsigned long arg)
1171{
1172	switch (cmd) {
1173	case HCIDEVUP:
1174	case HCIDEVDOWN:
1175	case HCIDEVRESET:
1176	case HCIDEVRESTAT:
1177		return hci_sock_ioctl(sock, cmd, arg);
1178	}
1179
1180	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1181}
1182#endif
1183
1184static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1185			 int addr_len)
1186{
1187	struct sockaddr_hci haddr;
1188	struct sock *sk = sock->sk;
1189	struct hci_dev *hdev = NULL;
1190	struct sk_buff *skb;
1191	int len, err = 0;
1192
1193	BT_DBG("sock %p sk %p", sock, sk);
1194
1195	if (!addr)
1196		return -EINVAL;
1197
1198	memset(&haddr, 0, sizeof(haddr));
1199	len = min_t(unsigned int, sizeof(haddr), addr_len);
1200	memcpy(&haddr, addr, len);
1201
1202	if (haddr.hci_family != AF_BLUETOOTH)
1203		return -EINVAL;
1204
1205	lock_sock(sk);
1206
1207	/* Allow detaching from dead device and attaching to alive device, if
1208	 * the caller wants to re-bind (instead of close) this socket in
1209	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1210	 */
1211	hdev = hci_pi(sk)->hdev;
1212	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1213		hci_pi(sk)->hdev = NULL;
1214		sk->sk_state = BT_OPEN;
1215		hci_dev_put(hdev);
1216	}
1217	hdev = NULL;
1218
1219	if (sk->sk_state == BT_BOUND) {
1220		err = -EALREADY;
1221		goto done;
1222	}
1223
1224	switch (haddr.hci_channel) {
1225	case HCI_CHANNEL_RAW:
1226		if (hci_pi(sk)->hdev) {
1227			err = -EALREADY;
1228			goto done;
1229		}
1230
1231		if (haddr.hci_dev != HCI_DEV_NONE) {
1232			hdev = hci_dev_get(haddr.hci_dev);
1233			if (!hdev) {
1234				err = -ENODEV;
1235				goto done;
1236			}
1237
1238			atomic_inc(&hdev->promisc);
1239		}
1240
1241		hci_pi(sk)->channel = haddr.hci_channel;
1242
1243		if (!hci_sock_gen_cookie(sk)) {
1244			/* In the case when a cookie has already been assigned,
1245			 * then there has been already an ioctl issued against
1246			 * an unbound socket and with that triggered an open
1247			 * notification. Send a close notification first to
1248			 * allow the state transition to bounded.
1249			 */
1250			skb = create_monitor_ctrl_close(sk);
1251			if (skb) {
1252				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1253						    HCI_SOCK_TRUSTED, NULL);
1254				kfree_skb(skb);
1255			}
1256		}
1257
1258		if (capable(CAP_NET_ADMIN))
1259			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1260
1261		hci_pi(sk)->hdev = hdev;
1262
1263		/* Send event to monitor */
1264		skb = create_monitor_ctrl_open(sk);
1265		if (skb) {
1266			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1267					    HCI_SOCK_TRUSTED, NULL);
1268			kfree_skb(skb);
1269		}
1270		break;
1271
1272	case HCI_CHANNEL_USER:
1273		if (hci_pi(sk)->hdev) {
1274			err = -EALREADY;
1275			goto done;
1276		}
1277
1278		if (haddr.hci_dev == HCI_DEV_NONE) {
1279			err = -EINVAL;
1280			goto done;
1281		}
1282
1283		if (!capable(CAP_NET_ADMIN)) {
1284			err = -EPERM;
1285			goto done;
1286		}
1287
1288		hdev = hci_dev_get(haddr.hci_dev);
1289		if (!hdev) {
1290			err = -ENODEV;
1291			goto done;
1292		}
1293
1294		if (test_bit(HCI_INIT, &hdev->flags) ||
1295		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1296		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1297		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1298		     test_bit(HCI_UP, &hdev->flags))) {
1299			err = -EBUSY;
1300			hci_dev_put(hdev);
1301			goto done;
1302		}
1303
1304		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1305			err = -EUSERS;
1306			hci_dev_put(hdev);
1307			goto done;
1308		}
1309
1310		mgmt_index_removed(hdev);
1311
1312		err = hci_dev_open(hdev->id);
1313		if (err) {
1314			if (err == -EALREADY) {
1315				/* In case the transport is already up and
1316				 * running, clear the error here.
1317				 *
1318				 * This can happen when opening a user
1319				 * channel and HCI_AUTO_OFF grace period
1320				 * is still active.
1321				 */
1322				err = 0;
1323			} else {
1324				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1325				mgmt_index_added(hdev);
1326				hci_dev_put(hdev);
1327				goto done;
1328			}
1329		}
1330
1331		hci_pi(sk)->channel = haddr.hci_channel;
1332
1333		if (!hci_sock_gen_cookie(sk)) {
1334			/* In the case when a cookie has already been assigned,
1335			 * this socket will transition from a raw socket into
1336			 * a user channel socket. For a clean transition, send
1337			 * the close notification first.
1338			 */
1339			skb = create_monitor_ctrl_close(sk);
1340			if (skb) {
1341				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1342						    HCI_SOCK_TRUSTED, NULL);
1343				kfree_skb(skb);
1344			}
1345		}
1346
1347		/* The user channel is restricted to CAP_NET_ADMIN
1348		 * capabilities and with that implicitly trusted.
1349		 */
1350		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1351
1352		hci_pi(sk)->hdev = hdev;
1353
1354		/* Send event to monitor */
1355		skb = create_monitor_ctrl_open(sk);
1356		if (skb) {
1357			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1358					    HCI_SOCK_TRUSTED, NULL);
1359			kfree_skb(skb);
1360		}
1361
1362		atomic_inc(&hdev->promisc);
1363		break;
1364
1365	case HCI_CHANNEL_MONITOR:
1366		if (haddr.hci_dev != HCI_DEV_NONE) {
1367			err = -EINVAL;
1368			goto done;
1369		}
1370
1371		if (!capable(CAP_NET_RAW)) {
1372			err = -EPERM;
1373			goto done;
1374		}
1375
1376		hci_pi(sk)->channel = haddr.hci_channel;
1377
1378		/* The monitor interface is restricted to CAP_NET_RAW
1379		 * capabilities and with that implicitly trusted.
1380		 */
1381		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1382
1383		send_monitor_note(sk, "Linux version %s (%s)",
1384				  init_utsname()->release,
1385				  init_utsname()->machine);
1386		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1387				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1388		send_monitor_replay(sk);
1389		send_monitor_control_replay(sk);
1390
1391		atomic_inc(&monitor_promisc);
1392		break;
1393
1394	case HCI_CHANNEL_LOGGING:
1395		if (haddr.hci_dev != HCI_DEV_NONE) {
1396			err = -EINVAL;
1397			goto done;
1398		}
1399
1400		if (!capable(CAP_NET_ADMIN)) {
1401			err = -EPERM;
1402			goto done;
1403		}
1404
1405		hci_pi(sk)->channel = haddr.hci_channel;
1406		break;
1407
1408	default:
1409		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1410			err = -EINVAL;
1411			goto done;
1412		}
1413
1414		if (haddr.hci_dev != HCI_DEV_NONE) {
1415			err = -EINVAL;
1416			goto done;
1417		}
1418
1419		/* Users with CAP_NET_ADMIN capabilities are allowed
1420		 * access to all management commands and events. For
1421		 * untrusted users the interface is restricted and
1422		 * also only untrusted events are sent.
1423		 */
1424		if (capable(CAP_NET_ADMIN))
1425			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1426
1427		hci_pi(sk)->channel = haddr.hci_channel;
1428
1429		/* At the moment the index and unconfigured index events
1430		 * are enabled unconditionally. Setting them on each
1431		 * socket when binding keeps this functionality. They
1432		 * however might be cleared later and then sending of these
1433		 * events will be disabled, but that is then intentional.
1434		 *
1435		 * This also enables generic events that are safe to be
1436		 * received by untrusted users. Example for such events
1437		 * are changes to settings, class of device, name etc.
1438		 */
1439		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1440			if (!hci_sock_gen_cookie(sk)) {
1441				/* In the case when a cookie has already been
1442				 * assigned, this socket will transition from
1443				 * a raw socket into a control socket. To
1444				 * allow for a clean transition, send the
1445				 * close notification first.
1446				 */
1447				skb = create_monitor_ctrl_close(sk);
1448				if (skb) {
1449					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1450							    HCI_SOCK_TRUSTED, NULL);
1451					kfree_skb(skb);
1452				}
1453			}
1454
1455			/* Send event to monitor */
1456			skb = create_monitor_ctrl_open(sk);
1457			if (skb) {
1458				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1459						    HCI_SOCK_TRUSTED, NULL);
1460				kfree_skb(skb);
1461			}
1462
1463			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1464			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1465			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1466			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1467			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1468			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1469		}
1470		break;
1471	}
1472
1473	/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1474	if (!hci_pi(sk)->mtu)
1475		hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1476
1477	sk->sk_state = BT_BOUND;
1478
1479done:
1480	release_sock(sk);
1481	return err;
1482}
1483
1484static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1485			    int peer)
1486{
1487	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1488	struct sock *sk = sock->sk;
1489	struct hci_dev *hdev;
1490	int err = 0;
1491
1492	BT_DBG("sock %p sk %p", sock, sk);
1493
1494	if (peer)
1495		return -EOPNOTSUPP;
1496
1497	lock_sock(sk);
1498
1499	hdev = hci_hdev_from_sock(sk);
1500	if (IS_ERR(hdev)) {
1501		err = PTR_ERR(hdev);
1502		goto done;
1503	}
1504
 
1505	haddr->hci_family = AF_BLUETOOTH;
1506	haddr->hci_dev    = hdev->id;
1507	haddr->hci_channel= hci_pi(sk)->channel;
1508	err = sizeof(*haddr);
1509
1510done:
1511	release_sock(sk);
1512	return err;
1513}
1514
1515static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1516			  struct sk_buff *skb)
1517{
1518	__u8 mask = hci_pi(sk)->cmsg_mask;
1519
1520	if (mask & HCI_CMSG_DIR) {
1521		int incoming = bt_cb(skb)->incoming;
1522		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1523			 &incoming);
1524	}
1525
1526	if (mask & HCI_CMSG_TSTAMP) {
1527#ifdef CONFIG_COMPAT
1528		struct old_timeval32 ctv;
1529#endif
1530		struct __kernel_old_timeval tv;
1531		void *data;
1532		int len;
1533
1534		skb_get_timestamp(skb, &tv);
1535
1536		data = &tv;
1537		len = sizeof(tv);
1538#ifdef CONFIG_COMPAT
1539		if (!COMPAT_USE_64BIT_TIME &&
1540		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1541			ctv.tv_sec = tv.tv_sec;
1542			ctv.tv_usec = tv.tv_usec;
1543			data = &ctv;
1544			len = sizeof(ctv);
1545		}
1546#endif
1547
1548		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1549	}
1550}
1551
1552static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1553			    size_t len, int flags)
1554{
1555	struct scm_cookie scm;
1556	struct sock *sk = sock->sk;
1557	struct sk_buff *skb;
1558	int copied, err;
1559	unsigned int skblen;
1560
1561	BT_DBG("sock %p, sk %p", sock, sk);
1562
1563	if (flags & MSG_OOB)
1564		return -EOPNOTSUPP;
1565
1566	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1567		return -EOPNOTSUPP;
1568
1569	if (sk->sk_state == BT_CLOSED)
1570		return 0;
1571
1572	skb = skb_recv_datagram(sk, flags, &err);
1573	if (!skb)
1574		return err;
1575
1576	skblen = skb->len;
1577	copied = skb->len;
1578	if (len < copied) {
1579		msg->msg_flags |= MSG_TRUNC;
1580		copied = len;
1581	}
1582
1583	skb_reset_transport_header(skb);
1584	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1585
1586	switch (hci_pi(sk)->channel) {
1587	case HCI_CHANNEL_RAW:
1588		hci_sock_cmsg(sk, msg, skb);
1589		break;
1590	case HCI_CHANNEL_USER:
1591	case HCI_CHANNEL_MONITOR:
1592		sock_recv_timestamp(msg, sk, skb);
1593		break;
1594	default:
1595		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1596			sock_recv_timestamp(msg, sk, skb);
1597		break;
1598	}
1599
1600	memset(&scm, 0, sizeof(scm));
1601	scm.creds = bt_cb(skb)->creds;
1602
1603	skb_free_datagram(sk, skb);
1604
1605	if (flags & MSG_TRUNC)
1606		copied = skblen;
1607
1608	scm_recv(sock, msg, &scm, flags);
1609
1610	return err ? : copied;
1611}
1612
1613static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1614			struct sk_buff *skb)
1615{
 
1616	u8 *cp;
1617	struct mgmt_hdr *hdr;
1618	u16 opcode, index, len;
1619	struct hci_dev *hdev = NULL;
1620	const struct hci_mgmt_handler *handler;
1621	bool var_len, no_hdev;
1622	int err;
1623
1624	BT_DBG("got %d bytes", skb->len);
1625
1626	if (skb->len < sizeof(*hdr))
1627		return -EINVAL;
1628
1629	hdr = (void *)skb->data;
 
 
 
 
 
 
 
 
 
1630	opcode = __le16_to_cpu(hdr->opcode);
1631	index = __le16_to_cpu(hdr->index);
1632	len = __le16_to_cpu(hdr->len);
1633
1634	if (len != skb->len - sizeof(*hdr)) {
1635		err = -EINVAL;
1636		goto done;
1637	}
1638
1639	if (chan->channel == HCI_CHANNEL_CONTROL) {
1640		struct sk_buff *cmd;
1641
1642		/* Send event to monitor */
1643		cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1644						  skb->data + sizeof(*hdr));
1645		if (cmd) {
1646			hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1647					    HCI_SOCK_TRUSTED, NULL);
1648			kfree_skb(cmd);
1649		}
1650	}
1651
1652	if (opcode >= chan->handler_count ||
1653	    chan->handlers[opcode].func == NULL) {
1654		BT_DBG("Unknown op %u", opcode);
1655		err = mgmt_cmd_status(sk, index, opcode,
1656				      MGMT_STATUS_UNKNOWN_COMMAND);
1657		goto done;
1658	}
1659
1660	handler = &chan->handlers[opcode];
1661
1662	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1663	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1664		err = mgmt_cmd_status(sk, index, opcode,
1665				      MGMT_STATUS_PERMISSION_DENIED);
1666		goto done;
1667	}
1668
1669	if (index != MGMT_INDEX_NONE) {
1670		hdev = hci_dev_get(index);
1671		if (!hdev) {
1672			err = mgmt_cmd_status(sk, index, opcode,
1673					      MGMT_STATUS_INVALID_INDEX);
1674			goto done;
1675		}
1676
1677		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1678		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1679		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1680			err = mgmt_cmd_status(sk, index, opcode,
1681					      MGMT_STATUS_INVALID_INDEX);
1682			goto done;
1683		}
1684
1685		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1686		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1687			err = mgmt_cmd_status(sk, index, opcode,
1688					      MGMT_STATUS_INVALID_INDEX);
1689			goto done;
1690		}
1691	}
1692
1693	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1694		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1695		if (no_hdev != !hdev) {
1696			err = mgmt_cmd_status(sk, index, opcode,
1697					      MGMT_STATUS_INVALID_INDEX);
1698			goto done;
1699		}
1700	}
1701
1702	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1703	if ((var_len && len < handler->data_len) ||
1704	    (!var_len && len != handler->data_len)) {
1705		err = mgmt_cmd_status(sk, index, opcode,
1706				      MGMT_STATUS_INVALID_PARAMS);
1707		goto done;
1708	}
1709
1710	if (hdev && chan->hdev_init)
1711		chan->hdev_init(sk, hdev);
1712
1713	cp = skb->data + sizeof(*hdr);
1714
1715	err = handler->func(sk, hdev, cp, len);
1716	if (err < 0)
1717		goto done;
1718
1719	err = skb->len;
1720
1721done:
1722	if (hdev)
1723		hci_dev_put(hdev);
1724
 
1725	return err;
1726}
1727
1728static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1729			     unsigned int flags)
1730{
1731	struct hci_mon_hdr *hdr;
 
1732	struct hci_dev *hdev;
1733	u16 index;
1734	int err;
1735
1736	/* The logging frame consists at minimum of the standard header,
1737	 * the priority byte, the ident length byte and at least one string
1738	 * terminator NUL byte. Anything shorter are invalid packets.
1739	 */
1740	if (skb->len < sizeof(*hdr) + 3)
1741		return -EINVAL;
1742
 
 
 
 
 
 
 
 
 
1743	hdr = (void *)skb->data;
1744
1745	if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1746		return -EINVAL;
 
 
1747
1748	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1749		__u8 priority = skb->data[sizeof(*hdr)];
1750		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1751
1752		/* Only the priorities 0-7 are valid and with that any other
1753		 * value results in an invalid packet.
1754		 *
1755		 * The priority byte is followed by an ident length byte and
1756		 * the NUL terminated ident string. Check that the ident
1757		 * length is not overflowing the packet and also that the
1758		 * ident string itself is NUL terminated. In case the ident
1759		 * length is zero, the length value actually doubles as NUL
1760		 * terminator identifier.
1761		 *
1762		 * The message follows the ident string (if present) and
1763		 * must be NUL terminated. Otherwise it is not a valid packet.
1764		 */
1765		if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1766		    ident_len > skb->len - sizeof(*hdr) - 3 ||
1767		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1768			return -EINVAL;
 
 
1769	} else {
1770		return -EINVAL;
 
1771	}
1772
1773	index = __le16_to_cpu(hdr->index);
1774
1775	if (index != MGMT_INDEX_NONE) {
1776		hdev = hci_dev_get(index);
1777		if (!hdev)
1778			return -ENODEV;
 
 
1779	} else {
1780		hdev = NULL;
1781	}
1782
1783	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1784
1785	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1786	err = skb->len;
1787
1788	if (hdev)
1789		hci_dev_put(hdev);
1790
 
 
1791	return err;
1792}
1793
1794static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1795			    size_t len)
1796{
1797	struct sock *sk = sock->sk;
1798	struct hci_mgmt_chan *chan;
1799	struct hci_dev *hdev;
1800	struct sk_buff *skb;
1801	int err;
1802	const unsigned int flags = msg->msg_flags;
1803
1804	BT_DBG("sock %p sk %p", sock, sk);
1805
1806	if (flags & MSG_OOB)
1807		return -EOPNOTSUPP;
1808
1809	if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1810		return -EINVAL;
1811
1812	if (len < 4 || len > hci_pi(sk)->mtu)
1813		return -EINVAL;
1814
1815	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1816	if (IS_ERR(skb))
1817		return PTR_ERR(skb);
1818
1819	lock_sock(sk);
1820
1821	switch (hci_pi(sk)->channel) {
1822	case HCI_CHANNEL_RAW:
1823	case HCI_CHANNEL_USER:
1824		break;
1825	case HCI_CHANNEL_MONITOR:
1826		err = -EOPNOTSUPP;
1827		goto drop;
1828	case HCI_CHANNEL_LOGGING:
1829		err = hci_logging_frame(sk, skb, flags);
1830		goto drop;
1831	default:
1832		mutex_lock(&mgmt_chan_list_lock);
1833		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1834		if (chan)
1835			err = hci_mgmt_cmd(chan, sk, skb);
1836		else
1837			err = -EINVAL;
1838
1839		mutex_unlock(&mgmt_chan_list_lock);
1840		goto drop;
1841	}
1842
1843	hdev = hci_hdev_from_sock(sk);
1844	if (IS_ERR(hdev)) {
1845		err = PTR_ERR(hdev);
1846		goto drop;
1847	}
1848
1849	if (!test_bit(HCI_UP, &hdev->flags)) {
1850		err = -ENETDOWN;
 
 
 
 
 
 
 
 
 
1851		goto drop;
1852	}
1853
1854	hci_skb_pkt_type(skb) = skb->data[0];
1855	skb_pull(skb, 1);
1856
1857	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1858		/* No permission check is needed for user channel
1859		 * since that gets enforced when binding the socket.
1860		 *
1861		 * However check that the packet type is valid.
1862		 */
1863		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1864		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1865		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1866		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1867			err = -EINVAL;
1868			goto drop;
1869		}
1870
1871		skb_queue_tail(&hdev->raw_q, skb);
1872		queue_work(hdev->workqueue, &hdev->tx_work);
1873	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1874		u16 opcode = get_unaligned_le16(skb->data);
1875		u16 ogf = hci_opcode_ogf(opcode);
1876		u16 ocf = hci_opcode_ocf(opcode);
1877
1878		if (((ogf > HCI_SFLT_MAX_OGF) ||
1879		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1880				   &hci_sec_filter.ocf_mask[ogf])) &&
1881		    !capable(CAP_NET_RAW)) {
1882			err = -EPERM;
1883			goto drop;
1884		}
1885
1886		/* Since the opcode has already been extracted here, store
1887		 * a copy of the value for later use by the drivers.
1888		 */
1889		hci_skb_opcode(skb) = opcode;
1890
1891		if (ogf == 0x3f) {
1892			skb_queue_tail(&hdev->raw_q, skb);
1893			queue_work(hdev->workqueue, &hdev->tx_work);
1894		} else {
1895			/* Stand-alone HCI commands must be flagged as
1896			 * single-command requests.
1897			 */
1898			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1899
1900			skb_queue_tail(&hdev->cmd_q, skb);
1901			queue_work(hdev->workqueue, &hdev->cmd_work);
1902		}
1903	} else {
1904		if (!capable(CAP_NET_RAW)) {
1905			err = -EPERM;
1906			goto drop;
1907		}
1908
1909		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1910		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1911		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1912			err = -EINVAL;
1913			goto drop;
1914		}
1915
1916		skb_queue_tail(&hdev->raw_q, skb);
1917		queue_work(hdev->workqueue, &hdev->tx_work);
1918	}
1919
1920	err = len;
1921
1922done:
1923	release_sock(sk);
1924	return err;
1925
1926drop:
1927	kfree_skb(skb);
1928	goto done;
1929}
1930
1931static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1932				   sockptr_t optval, unsigned int len)
1933{
1934	struct hci_ufilter uf = { .opcode = 0 };
1935	struct sock *sk = sock->sk;
1936	int err = 0, opt = 0;
1937
1938	BT_DBG("sk %p, opt %d", sk, optname);
1939
 
 
 
1940	lock_sock(sk);
1941
1942	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1943		err = -EBADFD;
1944		goto done;
1945	}
1946
1947	switch (optname) {
1948	case HCI_DATA_DIR:
1949		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1950			err = -EFAULT;
1951			break;
1952		}
1953
1954		if (opt)
1955			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1956		else
1957			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1958		break;
1959
1960	case HCI_TIME_STAMP:
1961		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1962			err = -EFAULT;
1963			break;
1964		}
1965
1966		if (opt)
1967			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1968		else
1969			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1970		break;
1971
1972	case HCI_FILTER:
1973		{
1974			struct hci_filter *f = &hci_pi(sk)->filter;
1975
1976			uf.type_mask = f->type_mask;
1977			uf.opcode    = f->opcode;
1978			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1979			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1980		}
1981
1982		len = min_t(unsigned int, len, sizeof(uf));
1983		if (copy_from_sockptr(&uf, optval, len)) {
1984			err = -EFAULT;
1985			break;
1986		}
1987
1988		if (!capable(CAP_NET_RAW)) {
1989			uf.type_mask &= hci_sec_filter.type_mask;
1990			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1991			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1992		}
1993
1994		{
1995			struct hci_filter *f = &hci_pi(sk)->filter;
1996
1997			f->type_mask = uf.type_mask;
1998			f->opcode    = uf.opcode;
1999			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
2000			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
2001		}
2002		break;
2003
2004	default:
2005		err = -ENOPROTOOPT;
2006		break;
2007	}
2008
2009done:
2010	release_sock(sk);
2011	return err;
2012}
2013
2014static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2015			       sockptr_t optval, unsigned int len)
2016{
2017	struct sock *sk = sock->sk;
2018	int err = 0;
2019	u16 opt;
2020
2021	BT_DBG("sk %p, opt %d", sk, optname);
2022
2023	if (level == SOL_HCI)
2024		return hci_sock_setsockopt_old(sock, level, optname, optval,
2025					       len);
2026
2027	if (level != SOL_BLUETOOTH)
2028		return -ENOPROTOOPT;
2029
2030	lock_sock(sk);
2031
2032	switch (optname) {
2033	case BT_SNDMTU:
2034	case BT_RCVMTU:
2035		switch (hci_pi(sk)->channel) {
2036		/* Don't allow changing MTU for channels that are meant for HCI
2037		 * traffic only.
2038		 */
2039		case HCI_CHANNEL_RAW:
2040		case HCI_CHANNEL_USER:
2041			err = -ENOPROTOOPT;
2042			goto done;
2043		}
2044
2045		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
2046			err = -EFAULT;
2047			break;
2048		}
2049
2050		hci_pi(sk)->mtu = opt;
2051		break;
2052
2053	default:
2054		err = -ENOPROTOOPT;
2055		break;
2056	}
2057
2058done:
2059	release_sock(sk);
2060	return err;
2061}
2062
2063static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2064				   char __user *optval, int __user *optlen)
2065{
2066	struct hci_ufilter uf;
2067	struct sock *sk = sock->sk;
2068	int len, opt, err = 0;
2069
2070	BT_DBG("sk %p, opt %d", sk, optname);
2071
 
 
 
2072	if (get_user(len, optlen))
2073		return -EFAULT;
2074
2075	lock_sock(sk);
2076
2077	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2078		err = -EBADFD;
2079		goto done;
2080	}
2081
2082	switch (optname) {
2083	case HCI_DATA_DIR:
2084		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2085			opt = 1;
2086		else
2087			opt = 0;
2088
2089		if (put_user(opt, optval))
2090			err = -EFAULT;
2091		break;
2092
2093	case HCI_TIME_STAMP:
2094		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2095			opt = 1;
2096		else
2097			opt = 0;
2098
2099		if (put_user(opt, optval))
2100			err = -EFAULT;
2101		break;
2102
2103	case HCI_FILTER:
2104		{
2105			struct hci_filter *f = &hci_pi(sk)->filter;
2106
2107			memset(&uf, 0, sizeof(uf));
2108			uf.type_mask = f->type_mask;
2109			uf.opcode    = f->opcode;
2110			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2111			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2112		}
2113
2114		len = min_t(unsigned int, len, sizeof(uf));
2115		if (copy_to_user(optval, &uf, len))
2116			err = -EFAULT;
2117		break;
2118
2119	default:
2120		err = -ENOPROTOOPT;
2121		break;
2122	}
2123
2124done:
2125	release_sock(sk);
2126	return err;
2127}
2128
2129static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2130			       char __user *optval, int __user *optlen)
2131{
2132	struct sock *sk = sock->sk;
2133	int err = 0;
2134
2135	BT_DBG("sk %p, opt %d", sk, optname);
2136
2137	if (level == SOL_HCI)
2138		return hci_sock_getsockopt_old(sock, level, optname, optval,
2139					       optlen);
2140
2141	if (level != SOL_BLUETOOTH)
2142		return -ENOPROTOOPT;
2143
2144	lock_sock(sk);
2145
2146	switch (optname) {
2147	case BT_SNDMTU:
2148	case BT_RCVMTU:
2149		if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2150			err = -EFAULT;
2151		break;
2152
2153	default:
2154		err = -ENOPROTOOPT;
2155		break;
2156	}
2157
2158	release_sock(sk);
2159	return err;
2160}
2161
2162static void hci_sock_destruct(struct sock *sk)
2163{
2164	mgmt_cleanup(sk);
2165	skb_queue_purge(&sk->sk_receive_queue);
2166	skb_queue_purge(&sk->sk_write_queue);
2167}
2168
2169static const struct proto_ops hci_sock_ops = {
2170	.family		= PF_BLUETOOTH,
2171	.owner		= THIS_MODULE,
2172	.release	= hci_sock_release,
2173	.bind		= hci_sock_bind,
2174	.getname	= hci_sock_getname,
2175	.sendmsg	= hci_sock_sendmsg,
2176	.recvmsg	= hci_sock_recvmsg,
2177	.ioctl		= hci_sock_ioctl,
2178#ifdef CONFIG_COMPAT
2179	.compat_ioctl	= hci_sock_compat_ioctl,
2180#endif
2181	.poll		= datagram_poll,
2182	.listen		= sock_no_listen,
2183	.shutdown	= sock_no_shutdown,
2184	.setsockopt	= hci_sock_setsockopt,
2185	.getsockopt	= hci_sock_getsockopt,
2186	.connect	= sock_no_connect,
2187	.socketpair	= sock_no_socketpair,
2188	.accept		= sock_no_accept,
2189	.mmap		= sock_no_mmap
2190};
2191
2192static struct proto hci_sk_proto = {
2193	.name		= "HCI",
2194	.owner		= THIS_MODULE,
2195	.obj_size	= sizeof(struct hci_pinfo)
2196};
2197
2198static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2199			   int kern)
2200{
2201	struct sock *sk;
2202
2203	BT_DBG("sock %p", sock);
2204
2205	if (sock->type != SOCK_RAW)
2206		return -ESOCKTNOSUPPORT;
2207
2208	sock->ops = &hci_sock_ops;
2209
2210	sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2211			   kern);
2212	if (!sk)
2213		return -ENOMEM;
2214
 
 
 
 
 
 
2215	sock->state = SS_UNCONNECTED;
2216	sk->sk_destruct = hci_sock_destruct;
2217
2218	bt_sock_link(&hci_sk_list, sk);
2219	return 0;
2220}
2221
2222static const struct net_proto_family hci_sock_family_ops = {
2223	.family	= PF_BLUETOOTH,
2224	.owner	= THIS_MODULE,
2225	.create	= hci_sock_create,
2226};
2227
2228int __init hci_sock_init(void)
2229{
2230	int err;
2231
2232	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2233
2234	err = proto_register(&hci_sk_proto, 0);
2235	if (err < 0)
2236		return err;
2237
2238	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2239	if (err < 0) {
2240		BT_ERR("HCI socket registration failed");
2241		goto error;
2242	}
2243
2244	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2245	if (err < 0) {
2246		BT_ERR("Failed to create HCI proc file");
2247		bt_sock_unregister(BTPROTO_HCI);
2248		goto error;
2249	}
2250
2251	BT_INFO("HCI socket layer initialized");
2252
2253	return 0;
2254
2255error:
2256	proto_unregister(&hci_sk_proto);
2257	return err;
2258}
2259
2260void hci_sock_cleanup(void)
2261{
2262	bt_procfs_cleanup(&init_net, "hci");
2263	bt_sock_unregister(BTPROTO_HCI);
2264	proto_unregister(&hci_sk_proto);
2265}
v4.10.11
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u32             cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
 
  60};
  61
 
 
 
 
 
 
 
 
 
 
 
  62void hci_sock_set_flag(struct sock *sk, int nr)
  63{
  64	set_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67void hci_sock_clear_flag(struct sock *sk, int nr)
  68{
  69	clear_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72int hci_sock_test_flag(struct sock *sk, int nr)
  73{
  74	return test_bit(nr, &hci_pi(sk)->flags);
  75}
  76
  77unsigned short hci_sock_get_channel(struct sock *sk)
  78{
  79	return hci_pi(sk)->channel;
  80}
  81
  82u32 hci_sock_get_cookie(struct sock *sk)
  83{
  84	return hci_pi(sk)->cookie;
  85}
  86
  87static bool hci_sock_gen_cookie(struct sock *sk)
  88{
  89	int id = hci_pi(sk)->cookie;
  90
  91	if (!id) {
  92		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
  93		if (id < 0)
  94			id = 0xffffffff;
  95
  96		hci_pi(sk)->cookie = id;
  97		get_task_comm(hci_pi(sk)->comm, current);
  98		return true;
  99	}
 100
 101	return false;
 102}
 103
 104static void hci_sock_free_cookie(struct sock *sk)
 105{
 106	int id = hci_pi(sk)->cookie;
 107
 108	if (id) {
 109		hci_pi(sk)->cookie = 0xffffffff;
 110		ida_simple_remove(&sock_cookie_ida, id);
 111	}
 112}
 113
 114static inline int hci_test_bit(int nr, const void *addr)
 115{
 116	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 117}
 118
 119/* Security filter */
 120#define HCI_SFLT_MAX_OGF  5
 121
 122struct hci_sec_filter {
 123	__u32 type_mask;
 124	__u32 event_mask[2];
 125	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 126};
 127
 128static const struct hci_sec_filter hci_sec_filter = {
 129	/* Packet types */
 130	0x10,
 131	/* Events */
 132	{ 0x1000d9fe, 0x0000b00c },
 133	/* Commands */
 134	{
 135		{ 0x0 },
 136		/* OGF_LINK_CTL */
 137		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 138		/* OGF_LINK_POLICY */
 139		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 140		/* OGF_HOST_CTL */
 141		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 142		/* OGF_INFO_PARAM */
 143		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 144		/* OGF_STATUS_PARAM */
 145		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 146	}
 147};
 148
 149static struct bt_sock_list hci_sk_list = {
 150	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 151};
 152
 153static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 154{
 155	struct hci_filter *flt;
 156	int flt_type, flt_event;
 157
 158	/* Apply filter */
 159	flt = &hci_pi(sk)->filter;
 160
 161	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 162
 163	if (!test_bit(flt_type, &flt->type_mask))
 164		return true;
 165
 166	/* Extra filter for event packets only */
 167	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 168		return false;
 169
 170	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 171
 172	if (!hci_test_bit(flt_event, &flt->event_mask))
 173		return true;
 174
 175	/* Check filter only when opcode is set */
 176	if (!flt->opcode)
 177		return false;
 178
 179	if (flt_event == HCI_EV_CMD_COMPLETE &&
 180	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 181		return true;
 182
 183	if (flt_event == HCI_EV_CMD_STATUS &&
 184	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 185		return true;
 186
 187	return false;
 188}
 189
 190/* Send frame to RAW socket */
 191void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 192{
 193	struct sock *sk;
 194	struct sk_buff *skb_copy = NULL;
 195
 196	BT_DBG("hdev %p len %d", hdev, skb->len);
 197
 198	read_lock(&hci_sk_list.lock);
 199
 200	sk_for_each(sk, &hci_sk_list.head) {
 201		struct sk_buff *nskb;
 202
 203		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 204			continue;
 205
 206		/* Don't send frame to the socket it came from */
 207		if (skb->sk == sk)
 208			continue;
 209
 210		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 211			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 212			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 213			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 214			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 215				continue;
 216			if (is_filtered_packet(sk, skb))
 217				continue;
 218		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 219			if (!bt_cb(skb)->incoming)
 220				continue;
 221			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 222			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 223			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 224				continue;
 225		} else {
 226			/* Don't send frame to other channel types */
 227			continue;
 228		}
 229
 230		if (!skb_copy) {
 231			/* Create a private copy with headroom */
 232			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 233			if (!skb_copy)
 234				continue;
 235
 236			/* Put type byte before the data */
 237			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 238		}
 239
 240		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 241		if (!nskb)
 242			continue;
 243
 244		if (sock_queue_rcv_skb(sk, nskb))
 245			kfree_skb(nskb);
 246	}
 247
 248	read_unlock(&hci_sk_list.lock);
 249
 250	kfree_skb(skb_copy);
 251}
 252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253/* Send frame to sockets with specific channel */
 254void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 255			 int flag, struct sock *skip_sk)
 256{
 257	struct sock *sk;
 258
 259	BT_DBG("channel %u len %d", channel, skb->len);
 260
 261	read_lock(&hci_sk_list.lock);
 262
 263	sk_for_each(sk, &hci_sk_list.head) {
 264		struct sk_buff *nskb;
 265
 266		/* Ignore socket without the flag set */
 267		if (!hci_sock_test_flag(sk, flag))
 268			continue;
 269
 270		/* Skip the original socket */
 271		if (sk == skip_sk)
 272			continue;
 273
 274		if (sk->sk_state != BT_BOUND)
 275			continue;
 276
 277		if (hci_pi(sk)->channel != channel)
 278			continue;
 279
 280		nskb = skb_clone(skb, GFP_ATOMIC);
 281		if (!nskb)
 282			continue;
 283
 284		if (sock_queue_rcv_skb(sk, nskb))
 285			kfree_skb(nskb);
 286	}
 287
 
 
 
 
 
 
 
 288	read_unlock(&hci_sk_list.lock);
 289}
 290
 291/* Send frame to monitor socket */
 292void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 293{
 294	struct sk_buff *skb_copy = NULL;
 295	struct hci_mon_hdr *hdr;
 296	__le16 opcode;
 297
 298	if (!atomic_read(&monitor_promisc))
 299		return;
 300
 301	BT_DBG("hdev %p len %d", hdev, skb->len);
 302
 303	switch (hci_skb_pkt_type(skb)) {
 304	case HCI_COMMAND_PKT:
 305		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 306		break;
 307	case HCI_EVENT_PKT:
 308		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 309		break;
 310	case HCI_ACLDATA_PKT:
 311		if (bt_cb(skb)->incoming)
 312			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 313		else
 314			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 315		break;
 316	case HCI_SCODATA_PKT:
 317		if (bt_cb(skb)->incoming)
 318			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 319		else
 320			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 321		break;
 
 
 
 
 
 
 322	case HCI_DIAG_PKT:
 323		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 324		break;
 325	default:
 326		return;
 327	}
 328
 329	/* Create a private copy with headroom */
 330	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 331	if (!skb_copy)
 332		return;
 333
 
 
 334	/* Put header before the data */
 335	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 336	hdr->opcode = opcode;
 337	hdr->index = cpu_to_le16(hdev->id);
 338	hdr->len = cpu_to_le16(skb->len);
 339
 340	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 341			    HCI_SOCK_TRUSTED, NULL);
 342	kfree_skb(skb_copy);
 343}
 344
 345void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 346				 void *data, u16 data_len, ktime_t tstamp,
 347				 int flag, struct sock *skip_sk)
 348{
 349	struct sock *sk;
 350	__le16 index;
 351
 352	if (hdev)
 353		index = cpu_to_le16(hdev->id);
 354	else
 355		index = cpu_to_le16(MGMT_INDEX_NONE);
 356
 357	read_lock(&hci_sk_list.lock);
 358
 359	sk_for_each(sk, &hci_sk_list.head) {
 360		struct hci_mon_hdr *hdr;
 361		struct sk_buff *skb;
 362
 363		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 364			continue;
 365
 366		/* Ignore socket without the flag set */
 367		if (!hci_sock_test_flag(sk, flag))
 368			continue;
 369
 370		/* Skip the original socket */
 371		if (sk == skip_sk)
 372			continue;
 373
 374		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 375		if (!skb)
 376			continue;
 377
 378		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 379		put_unaligned_le16(event, skb_put(skb, 2));
 380
 381		if (data)
 382			memcpy(skb_put(skb, data_len), data, data_len);
 383
 384		skb->tstamp = tstamp;
 385
 386		hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 387		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 388		hdr->index = index;
 389		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 390
 391		hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 392				    HCI_SOCK_TRUSTED, NULL);
 393		kfree_skb(skb);
 394	}
 395
 396	read_unlock(&hci_sk_list.lock);
 397}
 398
 399static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 400{
 401	struct hci_mon_hdr *hdr;
 402	struct hci_mon_new_index *ni;
 403	struct hci_mon_index_info *ii;
 404	struct sk_buff *skb;
 405	__le16 opcode;
 406
 407	switch (event) {
 408	case HCI_DEV_REG:
 409		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 410		if (!skb)
 411			return NULL;
 412
 413		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 414		ni->type = hdev->dev_type;
 415		ni->bus = hdev->bus;
 416		bacpy(&ni->bdaddr, &hdev->bdaddr);
 417		memcpy(ni->name, hdev->name, 8);
 
 418
 419		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 420		break;
 421
 422	case HCI_DEV_UNREG:
 423		skb = bt_skb_alloc(0, GFP_ATOMIC);
 424		if (!skb)
 425			return NULL;
 426
 427		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 428		break;
 429
 430	case HCI_DEV_SETUP:
 431		if (hdev->manufacturer == 0xffff)
 432			return NULL;
 433
 434		/* fall through */
 435
 436	case HCI_DEV_UP:
 437		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 438		if (!skb)
 439			return NULL;
 440
 441		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 442		bacpy(&ii->bdaddr, &hdev->bdaddr);
 443		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 444
 445		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 446		break;
 447
 448	case HCI_DEV_OPEN:
 449		skb = bt_skb_alloc(0, GFP_ATOMIC);
 450		if (!skb)
 451			return NULL;
 452
 453		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 454		break;
 455
 456	case HCI_DEV_CLOSE:
 457		skb = bt_skb_alloc(0, GFP_ATOMIC);
 458		if (!skb)
 459			return NULL;
 460
 461		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 462		break;
 463
 464	default:
 465		return NULL;
 466	}
 467
 468	__net_timestamp(skb);
 469
 470	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 471	hdr->opcode = opcode;
 472	hdr->index = cpu_to_le16(hdev->id);
 473	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 474
 475	return skb;
 476}
 477
 478static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 479{
 480	struct hci_mon_hdr *hdr;
 481	struct sk_buff *skb;
 482	u16 format;
 483	u8 ver[3];
 484	u32 flags;
 485
 486	/* No message needed when cookie is not present */
 487	if (!hci_pi(sk)->cookie)
 488		return NULL;
 489
 490	switch (hci_pi(sk)->channel) {
 491	case HCI_CHANNEL_RAW:
 492		format = 0x0000;
 493		ver[0] = BT_SUBSYS_VERSION;
 494		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 495		break;
 496	case HCI_CHANNEL_USER:
 497		format = 0x0001;
 498		ver[0] = BT_SUBSYS_VERSION;
 499		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 500		break;
 501	case HCI_CHANNEL_CONTROL:
 502		format = 0x0002;
 503		mgmt_fill_version_info(ver);
 504		break;
 505	default:
 506		/* No message for unsupported format */
 507		return NULL;
 508	}
 509
 510	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
 511	if (!skb)
 512		return NULL;
 513
 
 
 514	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 515
 516	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 517	put_unaligned_le16(format, skb_put(skb, 2));
 518	memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
 519	put_unaligned_le32(flags, skb_put(skb, 4));
 520	*skb_put(skb, 1) = TASK_COMM_LEN;
 521	memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
 522
 523	__net_timestamp(skb);
 524
 525	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 526	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 527	if (hci_pi(sk)->hdev)
 528		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 529	else
 530		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 531	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 532
 533	return skb;
 534}
 535
 536static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 537{
 538	struct hci_mon_hdr *hdr;
 539	struct sk_buff *skb;
 540
 541	/* No message needed when cookie is not present */
 542	if (!hci_pi(sk)->cookie)
 543		return NULL;
 544
 545	switch (hci_pi(sk)->channel) {
 546	case HCI_CHANNEL_RAW:
 547	case HCI_CHANNEL_USER:
 548	case HCI_CHANNEL_CONTROL:
 549		break;
 550	default:
 551		/* No message for unsupported format */
 552		return NULL;
 553	}
 554
 555	skb = bt_skb_alloc(4, GFP_ATOMIC);
 556	if (!skb)
 557		return NULL;
 558
 
 
 559	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 560
 561	__net_timestamp(skb);
 562
 563	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 564	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 565	if (hci_pi(sk)->hdev)
 566		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 567	else
 568		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 569	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 570
 571	return skb;
 572}
 573
 574static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 575						   u16 opcode, u16 len,
 576						   const void *buf)
 577{
 578	struct hci_mon_hdr *hdr;
 579	struct sk_buff *skb;
 580
 581	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 582	if (!skb)
 583		return NULL;
 584
 
 
 585	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 586	put_unaligned_le16(opcode, skb_put(skb, 2));
 587
 588	if (buf)
 589		memcpy(skb_put(skb, len), buf, len);
 590
 591	__net_timestamp(skb);
 592
 593	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 594	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 595	hdr->index = cpu_to_le16(index);
 596	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 597
 598	return skb;
 599}
 600
 601static void __printf(2, 3)
 602send_monitor_note(struct sock *sk, const char *fmt, ...)
 603{
 604	size_t len;
 605	struct hci_mon_hdr *hdr;
 606	struct sk_buff *skb;
 607	va_list args;
 608
 609	va_start(args, fmt);
 610	len = vsnprintf(NULL, 0, fmt, args);
 611	va_end(args);
 612
 613	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 614	if (!skb)
 615		return;
 616
 
 
 617	va_start(args, fmt);
 618	vsprintf(skb_put(skb, len), fmt, args);
 619	*skb_put(skb, 1) = 0;
 620	va_end(args);
 621
 622	__net_timestamp(skb);
 623
 624	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 625	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 626	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 627	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 628
 629	if (sock_queue_rcv_skb(sk, skb))
 630		kfree_skb(skb);
 631}
 632
 633static void send_monitor_replay(struct sock *sk)
 634{
 635	struct hci_dev *hdev;
 636
 637	read_lock(&hci_dev_list_lock);
 638
 639	list_for_each_entry(hdev, &hci_dev_list, list) {
 640		struct sk_buff *skb;
 641
 642		skb = create_monitor_event(hdev, HCI_DEV_REG);
 643		if (!skb)
 644			continue;
 645
 646		if (sock_queue_rcv_skb(sk, skb))
 647			kfree_skb(skb);
 648
 649		if (!test_bit(HCI_RUNNING, &hdev->flags))
 650			continue;
 651
 652		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 653		if (!skb)
 654			continue;
 655
 656		if (sock_queue_rcv_skb(sk, skb))
 657			kfree_skb(skb);
 658
 659		if (test_bit(HCI_UP, &hdev->flags))
 660			skb = create_monitor_event(hdev, HCI_DEV_UP);
 661		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 662			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 663		else
 664			skb = NULL;
 665
 666		if (skb) {
 667			if (sock_queue_rcv_skb(sk, skb))
 668				kfree_skb(skb);
 669		}
 670	}
 671
 672	read_unlock(&hci_dev_list_lock);
 673}
 674
 675static void send_monitor_control_replay(struct sock *mon_sk)
 676{
 677	struct sock *sk;
 678
 679	read_lock(&hci_sk_list.lock);
 680
 681	sk_for_each(sk, &hci_sk_list.head) {
 682		struct sk_buff *skb;
 683
 684		skb = create_monitor_ctrl_open(sk);
 685		if (!skb)
 686			continue;
 687
 688		if (sock_queue_rcv_skb(mon_sk, skb))
 689			kfree_skb(skb);
 690	}
 691
 692	read_unlock(&hci_sk_list.lock);
 693}
 694
 695/* Generate internal stack event */
 696static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 697{
 698	struct hci_event_hdr *hdr;
 699	struct hci_ev_stack_internal *ev;
 700	struct sk_buff *skb;
 701
 702	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 703	if (!skb)
 704		return;
 705
 706	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 707	hdr->evt  = HCI_EV_STACK_INTERNAL;
 708	hdr->plen = sizeof(*ev) + dlen;
 709
 710	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 711	ev->type = type;
 712	memcpy(ev->data, data, dlen);
 713
 714	bt_cb(skb)->incoming = 1;
 715	__net_timestamp(skb);
 716
 717	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 718	hci_send_to_sock(hdev, skb);
 719	kfree_skb(skb);
 720}
 721
 722void hci_sock_dev_event(struct hci_dev *hdev, int event)
 723{
 724	BT_DBG("hdev %s event %d", hdev->name, event);
 725
 726	if (atomic_read(&monitor_promisc)) {
 727		struct sk_buff *skb;
 728
 729		/* Send event to monitor */
 730		skb = create_monitor_event(hdev, event);
 731		if (skb) {
 732			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 733					    HCI_SOCK_TRUSTED, NULL);
 734			kfree_skb(skb);
 735		}
 736	}
 737
 738	if (event <= HCI_DEV_DOWN) {
 739		struct hci_ev_si_device ev;
 740
 741		/* Send event to sockets */
 742		ev.event  = event;
 743		ev.dev_id = hdev->id;
 744		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 745	}
 746
 747	if (event == HCI_DEV_UNREG) {
 748		struct sock *sk;
 749
 750		/* Detach sockets from device */
 751		read_lock(&hci_sk_list.lock);
 752		sk_for_each(sk, &hci_sk_list.head) {
 753			bh_lock_sock_nested(sk);
 754			if (hci_pi(sk)->hdev == hdev) {
 755				hci_pi(sk)->hdev = NULL;
 756				sk->sk_err = EPIPE;
 757				sk->sk_state = BT_OPEN;
 758				sk->sk_state_change(sk);
 759
 760				hci_dev_put(hdev);
 761			}
 762			bh_unlock_sock(sk);
 763		}
 764		read_unlock(&hci_sk_list.lock);
 765	}
 766}
 767
 768static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 769{
 770	struct hci_mgmt_chan *c;
 771
 772	list_for_each_entry(c, &mgmt_chan_list, list) {
 773		if (c->channel == channel)
 774			return c;
 775	}
 776
 777	return NULL;
 778}
 779
 780static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 781{
 782	struct hci_mgmt_chan *c;
 783
 784	mutex_lock(&mgmt_chan_list_lock);
 785	c = __hci_mgmt_chan_find(channel);
 786	mutex_unlock(&mgmt_chan_list_lock);
 787
 788	return c;
 789}
 790
 791int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 792{
 793	if (c->channel < HCI_CHANNEL_CONTROL)
 794		return -EINVAL;
 795
 796	mutex_lock(&mgmt_chan_list_lock);
 797	if (__hci_mgmt_chan_find(c->channel)) {
 798		mutex_unlock(&mgmt_chan_list_lock);
 799		return -EALREADY;
 800	}
 801
 802	list_add_tail(&c->list, &mgmt_chan_list);
 803
 804	mutex_unlock(&mgmt_chan_list_lock);
 805
 806	return 0;
 807}
 808EXPORT_SYMBOL(hci_mgmt_chan_register);
 809
 810void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 811{
 812	mutex_lock(&mgmt_chan_list_lock);
 813	list_del(&c->list);
 814	mutex_unlock(&mgmt_chan_list_lock);
 815}
 816EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 817
 818static int hci_sock_release(struct socket *sock)
 819{
 820	struct sock *sk = sock->sk;
 821	struct hci_dev *hdev;
 822	struct sk_buff *skb;
 823
 824	BT_DBG("sock %p sk %p", sock, sk);
 825
 826	if (!sk)
 827		return 0;
 828
 829	hdev = hci_pi(sk)->hdev;
 830
 831	switch (hci_pi(sk)->channel) {
 832	case HCI_CHANNEL_MONITOR:
 833		atomic_dec(&monitor_promisc);
 834		break;
 835	case HCI_CHANNEL_RAW:
 836	case HCI_CHANNEL_USER:
 837	case HCI_CHANNEL_CONTROL:
 838		/* Send event to monitor */
 839		skb = create_monitor_ctrl_close(sk);
 840		if (skb) {
 841			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 842					    HCI_SOCK_TRUSTED, NULL);
 843			kfree_skb(skb);
 844		}
 845
 846		hci_sock_free_cookie(sk);
 847		break;
 848	}
 849
 850	bt_sock_unlink(&hci_sk_list, sk);
 851
 
 852	if (hdev) {
 853		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 854			/* When releasing an user channel exclusive access,
 
 855			 * call hci_dev_do_close directly instead of calling
 856			 * hci_dev_close to ensure the exclusive access will
 857			 * be released and the controller brought back down.
 858			 *
 859			 * The checking of HCI_AUTO_OFF is not needed in this
 860			 * case since it will have been cleared already when
 861			 * opening the user channel.
 
 
 
 
 
 862			 */
 863			hci_dev_do_close(hdev);
 864			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 865			mgmt_index_added(hdev);
 866		}
 867
 868		atomic_dec(&hdev->promisc);
 869		hci_dev_put(hdev);
 870	}
 871
 872	sock_orphan(sk);
 873
 874	skb_queue_purge(&sk->sk_receive_queue);
 875	skb_queue_purge(&sk->sk_write_queue);
 876
 877	sock_put(sk);
 878	return 0;
 879}
 880
 881static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 882{
 883	bdaddr_t bdaddr;
 884	int err;
 885
 886	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 887		return -EFAULT;
 888
 889	hci_dev_lock(hdev);
 890
 891	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 892
 893	hci_dev_unlock(hdev);
 894
 895	return err;
 896}
 897
 898static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 899{
 900	bdaddr_t bdaddr;
 901	int err;
 902
 903	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 904		return -EFAULT;
 905
 906	hci_dev_lock(hdev);
 907
 908	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 909
 910	hci_dev_unlock(hdev);
 911
 912	return err;
 913}
 914
 915/* Ioctls that require bound socket */
 916static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 917				unsigned long arg)
 918{
 919	struct hci_dev *hdev = hci_pi(sk)->hdev;
 920
 921	if (!hdev)
 922		return -EBADFD;
 923
 924	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 925		return -EBUSY;
 926
 927	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 928		return -EOPNOTSUPP;
 929
 930	if (hdev->dev_type != HCI_PRIMARY)
 931		return -EOPNOTSUPP;
 932
 933	switch (cmd) {
 934	case HCISETRAW:
 935		if (!capable(CAP_NET_ADMIN))
 936			return -EPERM;
 937		return -EOPNOTSUPP;
 938
 939	case HCIGETCONNINFO:
 940		return hci_get_conn_info(hdev, (void __user *)arg);
 941
 942	case HCIGETAUTHINFO:
 943		return hci_get_auth_info(hdev, (void __user *)arg);
 944
 945	case HCIBLOCKADDR:
 946		if (!capable(CAP_NET_ADMIN))
 947			return -EPERM;
 948		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 949
 950	case HCIUNBLOCKADDR:
 951		if (!capable(CAP_NET_ADMIN))
 952			return -EPERM;
 953		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 954	}
 955
 956	return -ENOIOCTLCMD;
 957}
 958
 959static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 960			  unsigned long arg)
 961{
 962	void __user *argp = (void __user *)arg;
 963	struct sock *sk = sock->sk;
 964	int err;
 965
 966	BT_DBG("cmd %x arg %lx", cmd, arg);
 967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968	lock_sock(sk);
 969
 970	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 971		err = -EBADFD;
 972		goto done;
 973	}
 974
 975	/* When calling an ioctl on an unbound raw socket, then ensure
 976	 * that the monitor gets informed. Ensure that the resulting event
 977	 * is only send once by checking if the cookie exists or not. The
 978	 * socket cookie will be only ever generated once for the lifetime
 979	 * of a given socket.
 980	 */
 981	if (hci_sock_gen_cookie(sk)) {
 982		struct sk_buff *skb;
 983
 984		if (capable(CAP_NET_ADMIN))
 
 
 
 
 
 
 
 985			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 986
 987		/* Send event to monitor */
 988		skb = create_monitor_ctrl_open(sk);
 989		if (skb) {
 990			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 991					    HCI_SOCK_TRUSTED, NULL);
 992			kfree_skb(skb);
 993		}
 994	}
 995
 996	release_sock(sk);
 997
 998	switch (cmd) {
 999	case HCIGETDEVLIST:
1000		return hci_get_dev_list(argp);
1001
1002	case HCIGETDEVINFO:
1003		return hci_get_dev_info(argp);
1004
1005	case HCIGETCONNLIST:
1006		return hci_get_conn_list(argp);
1007
1008	case HCIDEVUP:
1009		if (!capable(CAP_NET_ADMIN))
1010			return -EPERM;
1011		return hci_dev_open(arg);
1012
1013	case HCIDEVDOWN:
1014		if (!capable(CAP_NET_ADMIN))
1015			return -EPERM;
1016		return hci_dev_close(arg);
1017
1018	case HCIDEVRESET:
1019		if (!capable(CAP_NET_ADMIN))
1020			return -EPERM;
1021		return hci_dev_reset(arg);
1022
1023	case HCIDEVRESTAT:
1024		if (!capable(CAP_NET_ADMIN))
1025			return -EPERM;
1026		return hci_dev_reset_stat(arg);
1027
1028	case HCISETSCAN:
1029	case HCISETAUTH:
1030	case HCISETENCRYPT:
1031	case HCISETPTYPE:
1032	case HCISETLINKPOL:
1033	case HCISETLINKMODE:
1034	case HCISETACLMTU:
1035	case HCISETSCOMTU:
1036		if (!capable(CAP_NET_ADMIN))
1037			return -EPERM;
1038		return hci_dev_cmd(cmd, argp);
1039
1040	case HCIINQUIRY:
1041		return hci_inquiry(argp);
1042	}
1043
1044	lock_sock(sk);
1045
1046	err = hci_sock_bound_ioctl(sk, cmd, arg);
1047
1048done:
1049	release_sock(sk);
1050	return err;
1051}
1052
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1054			 int addr_len)
1055{
1056	struct sockaddr_hci haddr;
1057	struct sock *sk = sock->sk;
1058	struct hci_dev *hdev = NULL;
1059	struct sk_buff *skb;
1060	int len, err = 0;
1061
1062	BT_DBG("sock %p sk %p", sock, sk);
1063
1064	if (!addr)
1065		return -EINVAL;
1066
1067	memset(&haddr, 0, sizeof(haddr));
1068	len = min_t(unsigned int, sizeof(haddr), addr_len);
1069	memcpy(&haddr, addr, len);
1070
1071	if (haddr.hci_family != AF_BLUETOOTH)
1072		return -EINVAL;
1073
1074	lock_sock(sk);
1075
 
 
 
 
 
 
 
 
 
 
 
 
1076	if (sk->sk_state == BT_BOUND) {
1077		err = -EALREADY;
1078		goto done;
1079	}
1080
1081	switch (haddr.hci_channel) {
1082	case HCI_CHANNEL_RAW:
1083		if (hci_pi(sk)->hdev) {
1084			err = -EALREADY;
1085			goto done;
1086		}
1087
1088		if (haddr.hci_dev != HCI_DEV_NONE) {
1089			hdev = hci_dev_get(haddr.hci_dev);
1090			if (!hdev) {
1091				err = -ENODEV;
1092				goto done;
1093			}
1094
1095			atomic_inc(&hdev->promisc);
1096		}
1097
1098		hci_pi(sk)->channel = haddr.hci_channel;
1099
1100		if (!hci_sock_gen_cookie(sk)) {
1101			/* In the case when a cookie has already been assigned,
1102			 * then there has been already an ioctl issued against
1103			 * an unbound socket and with that triggerd an open
1104			 * notification. Send a close notification first to
1105			 * allow the state transition to bounded.
1106			 */
1107			skb = create_monitor_ctrl_close(sk);
1108			if (skb) {
1109				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1110						    HCI_SOCK_TRUSTED, NULL);
1111				kfree_skb(skb);
1112			}
1113		}
1114
1115		if (capable(CAP_NET_ADMIN))
1116			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1117
1118		hci_pi(sk)->hdev = hdev;
1119
1120		/* Send event to monitor */
1121		skb = create_monitor_ctrl_open(sk);
1122		if (skb) {
1123			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1124					    HCI_SOCK_TRUSTED, NULL);
1125			kfree_skb(skb);
1126		}
1127		break;
1128
1129	case HCI_CHANNEL_USER:
1130		if (hci_pi(sk)->hdev) {
1131			err = -EALREADY;
1132			goto done;
1133		}
1134
1135		if (haddr.hci_dev == HCI_DEV_NONE) {
1136			err = -EINVAL;
1137			goto done;
1138		}
1139
1140		if (!capable(CAP_NET_ADMIN)) {
1141			err = -EPERM;
1142			goto done;
1143		}
1144
1145		hdev = hci_dev_get(haddr.hci_dev);
1146		if (!hdev) {
1147			err = -ENODEV;
1148			goto done;
1149		}
1150
1151		if (test_bit(HCI_INIT, &hdev->flags) ||
1152		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1153		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1154		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1155		     test_bit(HCI_UP, &hdev->flags))) {
1156			err = -EBUSY;
1157			hci_dev_put(hdev);
1158			goto done;
1159		}
1160
1161		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1162			err = -EUSERS;
1163			hci_dev_put(hdev);
1164			goto done;
1165		}
1166
1167		mgmt_index_removed(hdev);
1168
1169		err = hci_dev_open(hdev->id);
1170		if (err) {
1171			if (err == -EALREADY) {
1172				/* In case the transport is already up and
1173				 * running, clear the error here.
1174				 *
1175				 * This can happen when opening an user
1176				 * channel and HCI_AUTO_OFF grace period
1177				 * is still active.
1178				 */
1179				err = 0;
1180			} else {
1181				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1182				mgmt_index_added(hdev);
1183				hci_dev_put(hdev);
1184				goto done;
1185			}
1186		}
1187
1188		hci_pi(sk)->channel = haddr.hci_channel;
1189
1190		if (!hci_sock_gen_cookie(sk)) {
1191			/* In the case when a cookie has already been assigned,
1192			 * this socket will transition from a raw socket into
1193			 * an user channel socket. For a clean transition, send
1194			 * the close notification first.
1195			 */
1196			skb = create_monitor_ctrl_close(sk);
1197			if (skb) {
1198				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1199						    HCI_SOCK_TRUSTED, NULL);
1200				kfree_skb(skb);
1201			}
1202		}
1203
1204		/* The user channel is restricted to CAP_NET_ADMIN
1205		 * capabilities and with that implicitly trusted.
1206		 */
1207		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1208
1209		hci_pi(sk)->hdev = hdev;
1210
1211		/* Send event to monitor */
1212		skb = create_monitor_ctrl_open(sk);
1213		if (skb) {
1214			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1215					    HCI_SOCK_TRUSTED, NULL);
1216			kfree_skb(skb);
1217		}
1218
1219		atomic_inc(&hdev->promisc);
1220		break;
1221
1222	case HCI_CHANNEL_MONITOR:
1223		if (haddr.hci_dev != HCI_DEV_NONE) {
1224			err = -EINVAL;
1225			goto done;
1226		}
1227
1228		if (!capable(CAP_NET_RAW)) {
1229			err = -EPERM;
1230			goto done;
1231		}
1232
1233		hci_pi(sk)->channel = haddr.hci_channel;
1234
1235		/* The monitor interface is restricted to CAP_NET_RAW
1236		 * capabilities and with that implicitly trusted.
1237		 */
1238		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1239
1240		send_monitor_note(sk, "Linux version %s (%s)",
1241				  init_utsname()->release,
1242				  init_utsname()->machine);
1243		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1244				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1245		send_monitor_replay(sk);
1246		send_monitor_control_replay(sk);
1247
1248		atomic_inc(&monitor_promisc);
1249		break;
1250
1251	case HCI_CHANNEL_LOGGING:
1252		if (haddr.hci_dev != HCI_DEV_NONE) {
1253			err = -EINVAL;
1254			goto done;
1255		}
1256
1257		if (!capable(CAP_NET_ADMIN)) {
1258			err = -EPERM;
1259			goto done;
1260		}
1261
1262		hci_pi(sk)->channel = haddr.hci_channel;
1263		break;
1264
1265	default:
1266		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1267			err = -EINVAL;
1268			goto done;
1269		}
1270
1271		if (haddr.hci_dev != HCI_DEV_NONE) {
1272			err = -EINVAL;
1273			goto done;
1274		}
1275
1276		/* Users with CAP_NET_ADMIN capabilities are allowed
1277		 * access to all management commands and events. For
1278		 * untrusted users the interface is restricted and
1279		 * also only untrusted events are sent.
1280		 */
1281		if (capable(CAP_NET_ADMIN))
1282			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1283
1284		hci_pi(sk)->channel = haddr.hci_channel;
1285
1286		/* At the moment the index and unconfigured index events
1287		 * are enabled unconditionally. Setting them on each
1288		 * socket when binding keeps this functionality. They
1289		 * however might be cleared later and then sending of these
1290		 * events will be disabled, but that is then intentional.
1291		 *
1292		 * This also enables generic events that are safe to be
1293		 * received by untrusted users. Example for such events
1294		 * are changes to settings, class of device, name etc.
1295		 */
1296		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1297			if (!hci_sock_gen_cookie(sk)) {
1298				/* In the case when a cookie has already been
1299				 * assigned, this socket will transtion from
1300				 * a raw socket into a control socket. To
1301				 * allow for a clean transtion, send the
1302				 * close notification first.
1303				 */
1304				skb = create_monitor_ctrl_close(sk);
1305				if (skb) {
1306					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1307							    HCI_SOCK_TRUSTED, NULL);
1308					kfree_skb(skb);
1309				}
1310			}
1311
1312			/* Send event to monitor */
1313			skb = create_monitor_ctrl_open(sk);
1314			if (skb) {
1315				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1316						    HCI_SOCK_TRUSTED, NULL);
1317				kfree_skb(skb);
1318			}
1319
1320			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1321			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1322			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1323			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1324			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1325			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1326		}
1327		break;
1328	}
1329
 
 
 
 
1330	sk->sk_state = BT_BOUND;
1331
1332done:
1333	release_sock(sk);
1334	return err;
1335}
1336
1337static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1338			    int *addr_len, int peer)
1339{
1340	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1341	struct sock *sk = sock->sk;
1342	struct hci_dev *hdev;
1343	int err = 0;
1344
1345	BT_DBG("sock %p sk %p", sock, sk);
1346
1347	if (peer)
1348		return -EOPNOTSUPP;
1349
1350	lock_sock(sk);
1351
1352	hdev = hci_pi(sk)->hdev;
1353	if (!hdev) {
1354		err = -EBADFD;
1355		goto done;
1356	}
1357
1358	*addr_len = sizeof(*haddr);
1359	haddr->hci_family = AF_BLUETOOTH;
1360	haddr->hci_dev    = hdev->id;
1361	haddr->hci_channel= hci_pi(sk)->channel;
 
1362
1363done:
1364	release_sock(sk);
1365	return err;
1366}
1367
1368static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1369			  struct sk_buff *skb)
1370{
1371	__u32 mask = hci_pi(sk)->cmsg_mask;
1372
1373	if (mask & HCI_CMSG_DIR) {
1374		int incoming = bt_cb(skb)->incoming;
1375		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1376			 &incoming);
1377	}
1378
1379	if (mask & HCI_CMSG_TSTAMP) {
1380#ifdef CONFIG_COMPAT
1381		struct compat_timeval ctv;
1382#endif
1383		struct timeval tv;
1384		void *data;
1385		int len;
1386
1387		skb_get_timestamp(skb, &tv);
1388
1389		data = &tv;
1390		len = sizeof(tv);
1391#ifdef CONFIG_COMPAT
1392		if (!COMPAT_USE_64BIT_TIME &&
1393		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1394			ctv.tv_sec = tv.tv_sec;
1395			ctv.tv_usec = tv.tv_usec;
1396			data = &ctv;
1397			len = sizeof(ctv);
1398		}
1399#endif
1400
1401		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1402	}
1403}
1404
1405static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1406			    size_t len, int flags)
1407{
1408	int noblock = flags & MSG_DONTWAIT;
1409	struct sock *sk = sock->sk;
1410	struct sk_buff *skb;
1411	int copied, err;
1412	unsigned int skblen;
1413
1414	BT_DBG("sock %p, sk %p", sock, sk);
1415
1416	if (flags & MSG_OOB)
1417		return -EOPNOTSUPP;
1418
1419	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1420		return -EOPNOTSUPP;
1421
1422	if (sk->sk_state == BT_CLOSED)
1423		return 0;
1424
1425	skb = skb_recv_datagram(sk, flags, noblock, &err);
1426	if (!skb)
1427		return err;
1428
1429	skblen = skb->len;
1430	copied = skb->len;
1431	if (len < copied) {
1432		msg->msg_flags |= MSG_TRUNC;
1433		copied = len;
1434	}
1435
1436	skb_reset_transport_header(skb);
1437	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1438
1439	switch (hci_pi(sk)->channel) {
1440	case HCI_CHANNEL_RAW:
1441		hci_sock_cmsg(sk, msg, skb);
1442		break;
1443	case HCI_CHANNEL_USER:
1444	case HCI_CHANNEL_MONITOR:
1445		sock_recv_timestamp(msg, sk, skb);
1446		break;
1447	default:
1448		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1449			sock_recv_timestamp(msg, sk, skb);
1450		break;
1451	}
1452
 
 
 
1453	skb_free_datagram(sk, skb);
1454
1455	if (flags & MSG_TRUNC)
1456		copied = skblen;
1457
 
 
1458	return err ? : copied;
1459}
1460
1461static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1462			struct msghdr *msg, size_t msglen)
1463{
1464	void *buf;
1465	u8 *cp;
1466	struct mgmt_hdr *hdr;
1467	u16 opcode, index, len;
1468	struct hci_dev *hdev = NULL;
1469	const struct hci_mgmt_handler *handler;
1470	bool var_len, no_hdev;
1471	int err;
1472
1473	BT_DBG("got %zu bytes", msglen);
1474
1475	if (msglen < sizeof(*hdr))
1476		return -EINVAL;
1477
1478	buf = kmalloc(msglen, GFP_KERNEL);
1479	if (!buf)
1480		return -ENOMEM;
1481
1482	if (memcpy_from_msg(buf, msg, msglen)) {
1483		err = -EFAULT;
1484		goto done;
1485	}
1486
1487	hdr = buf;
1488	opcode = __le16_to_cpu(hdr->opcode);
1489	index = __le16_to_cpu(hdr->index);
1490	len = __le16_to_cpu(hdr->len);
1491
1492	if (len != msglen - sizeof(*hdr)) {
1493		err = -EINVAL;
1494		goto done;
1495	}
1496
1497	if (chan->channel == HCI_CHANNEL_CONTROL) {
1498		struct sk_buff *skb;
1499
1500		/* Send event to monitor */
1501		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1502						  buf + sizeof(*hdr));
1503		if (skb) {
1504			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1505					    HCI_SOCK_TRUSTED, NULL);
1506			kfree_skb(skb);
1507		}
1508	}
1509
1510	if (opcode >= chan->handler_count ||
1511	    chan->handlers[opcode].func == NULL) {
1512		BT_DBG("Unknown op %u", opcode);
1513		err = mgmt_cmd_status(sk, index, opcode,
1514				      MGMT_STATUS_UNKNOWN_COMMAND);
1515		goto done;
1516	}
1517
1518	handler = &chan->handlers[opcode];
1519
1520	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1521	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1522		err = mgmt_cmd_status(sk, index, opcode,
1523				      MGMT_STATUS_PERMISSION_DENIED);
1524		goto done;
1525	}
1526
1527	if (index != MGMT_INDEX_NONE) {
1528		hdev = hci_dev_get(index);
1529		if (!hdev) {
1530			err = mgmt_cmd_status(sk, index, opcode,
1531					      MGMT_STATUS_INVALID_INDEX);
1532			goto done;
1533		}
1534
1535		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1536		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1537		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1538			err = mgmt_cmd_status(sk, index, opcode,
1539					      MGMT_STATUS_INVALID_INDEX);
1540			goto done;
1541		}
1542
1543		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1544		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1545			err = mgmt_cmd_status(sk, index, opcode,
1546					      MGMT_STATUS_INVALID_INDEX);
1547			goto done;
1548		}
1549	}
1550
1551	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1552	if (no_hdev != !hdev) {
1553		err = mgmt_cmd_status(sk, index, opcode,
1554				      MGMT_STATUS_INVALID_INDEX);
1555		goto done;
 
 
1556	}
1557
1558	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1559	if ((var_len && len < handler->data_len) ||
1560	    (!var_len && len != handler->data_len)) {
1561		err = mgmt_cmd_status(sk, index, opcode,
1562				      MGMT_STATUS_INVALID_PARAMS);
1563		goto done;
1564	}
1565
1566	if (hdev && chan->hdev_init)
1567		chan->hdev_init(sk, hdev);
1568
1569	cp = buf + sizeof(*hdr);
1570
1571	err = handler->func(sk, hdev, cp, len);
1572	if (err < 0)
1573		goto done;
1574
1575	err = msglen;
1576
1577done:
1578	if (hdev)
1579		hci_dev_put(hdev);
1580
1581	kfree(buf);
1582	return err;
1583}
1584
1585static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
 
1586{
1587	struct hci_mon_hdr *hdr;
1588	struct sk_buff *skb;
1589	struct hci_dev *hdev;
1590	u16 index;
1591	int err;
1592
1593	/* The logging frame consists at minimum of the standard header,
1594	 * the priority byte, the ident length byte and at least one string
1595	 * terminator NUL byte. Anything shorter are invalid packets.
1596	 */
1597	if (len < sizeof(*hdr) + 3)
1598		return -EINVAL;
1599
1600	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1601	if (!skb)
1602		return err;
1603
1604	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1605		err = -EFAULT;
1606		goto drop;
1607	}
1608
1609	hdr = (void *)skb->data;
1610
1611	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1612		err = -EINVAL;
1613		goto drop;
1614	}
1615
1616	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1617		__u8 priority = skb->data[sizeof(*hdr)];
1618		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1619
1620		/* Only the priorities 0-7 are valid and with that any other
1621		 * value results in an invalid packet.
1622		 *
1623		 * The priority byte is followed by an ident length byte and
1624		 * the NUL terminated ident string. Check that the ident
1625		 * length is not overflowing the packet and also that the
1626		 * ident string itself is NUL terminated. In case the ident
1627		 * length is zero, the length value actually doubles as NUL
1628		 * terminator identifier.
1629		 *
1630		 * The message follows the ident string (if present) and
1631		 * must be NUL terminated. Otherwise it is not a valid packet.
1632		 */
1633		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1634		    ident_len > len - sizeof(*hdr) - 3 ||
1635		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1636			err = -EINVAL;
1637			goto drop;
1638		}
1639	} else {
1640		err = -EINVAL;
1641		goto drop;
1642	}
1643
1644	index = __le16_to_cpu(hdr->index);
1645
1646	if (index != MGMT_INDEX_NONE) {
1647		hdev = hci_dev_get(index);
1648		if (!hdev) {
1649			err = -ENODEV;
1650			goto drop;
1651		}
1652	} else {
1653		hdev = NULL;
1654	}
1655
1656	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1657
1658	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1659	err = len;
1660
1661	if (hdev)
1662		hci_dev_put(hdev);
1663
1664drop:
1665	kfree_skb(skb);
1666	return err;
1667}
1668
1669static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1670			    size_t len)
1671{
1672	struct sock *sk = sock->sk;
1673	struct hci_mgmt_chan *chan;
1674	struct hci_dev *hdev;
1675	struct sk_buff *skb;
1676	int err;
 
1677
1678	BT_DBG("sock %p sk %p", sock, sk);
1679
1680	if (msg->msg_flags & MSG_OOB)
1681		return -EOPNOTSUPP;
1682
1683	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1684		return -EINVAL;
1685
1686	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1687		return -EINVAL;
1688
 
 
 
 
1689	lock_sock(sk);
1690
1691	switch (hci_pi(sk)->channel) {
1692	case HCI_CHANNEL_RAW:
1693	case HCI_CHANNEL_USER:
1694		break;
1695	case HCI_CHANNEL_MONITOR:
1696		err = -EOPNOTSUPP;
1697		goto done;
1698	case HCI_CHANNEL_LOGGING:
1699		err = hci_logging_frame(sk, msg, len);
1700		goto done;
1701	default:
1702		mutex_lock(&mgmt_chan_list_lock);
1703		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1704		if (chan)
1705			err = hci_mgmt_cmd(chan, sk, msg, len);
1706		else
1707			err = -EINVAL;
1708
1709		mutex_unlock(&mgmt_chan_list_lock);
1710		goto done;
1711	}
1712
1713	hdev = hci_pi(sk)->hdev;
1714	if (!hdev) {
1715		err = -EBADFD;
1716		goto done;
1717	}
1718
1719	if (!test_bit(HCI_UP, &hdev->flags)) {
1720		err = -ENETDOWN;
1721		goto done;
1722	}
1723
1724	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1725	if (!skb)
1726		goto done;
1727
1728	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1729		err = -EFAULT;
1730		goto drop;
1731	}
1732
1733	hci_skb_pkt_type(skb) = skb->data[0];
1734	skb_pull(skb, 1);
1735
1736	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1737		/* No permission check is needed for user channel
1738		 * since that gets enforced when binding the socket.
1739		 *
1740		 * However check that the packet type is valid.
1741		 */
1742		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1743		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1744		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1745			err = -EINVAL;
1746			goto drop;
1747		}
1748
1749		skb_queue_tail(&hdev->raw_q, skb);
1750		queue_work(hdev->workqueue, &hdev->tx_work);
1751	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1752		u16 opcode = get_unaligned_le16(skb->data);
1753		u16 ogf = hci_opcode_ogf(opcode);
1754		u16 ocf = hci_opcode_ocf(opcode);
1755
1756		if (((ogf > HCI_SFLT_MAX_OGF) ||
1757		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1758				   &hci_sec_filter.ocf_mask[ogf])) &&
1759		    !capable(CAP_NET_RAW)) {
1760			err = -EPERM;
1761			goto drop;
1762		}
1763
1764		/* Since the opcode has already been extracted here, store
1765		 * a copy of the value for later use by the drivers.
1766		 */
1767		hci_skb_opcode(skb) = opcode;
1768
1769		if (ogf == 0x3f) {
1770			skb_queue_tail(&hdev->raw_q, skb);
1771			queue_work(hdev->workqueue, &hdev->tx_work);
1772		} else {
1773			/* Stand-alone HCI commands must be flagged as
1774			 * single-command requests.
1775			 */
1776			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1777
1778			skb_queue_tail(&hdev->cmd_q, skb);
1779			queue_work(hdev->workqueue, &hdev->cmd_work);
1780		}
1781	} else {
1782		if (!capable(CAP_NET_RAW)) {
1783			err = -EPERM;
1784			goto drop;
1785		}
1786
1787		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1788		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1789			err = -EINVAL;
1790			goto drop;
1791		}
1792
1793		skb_queue_tail(&hdev->raw_q, skb);
1794		queue_work(hdev->workqueue, &hdev->tx_work);
1795	}
1796
1797	err = len;
1798
1799done:
1800	release_sock(sk);
1801	return err;
1802
1803drop:
1804	kfree_skb(skb);
1805	goto done;
1806}
1807
1808static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1809			       char __user *optval, unsigned int len)
1810{
1811	struct hci_ufilter uf = { .opcode = 0 };
1812	struct sock *sk = sock->sk;
1813	int err = 0, opt = 0;
1814
1815	BT_DBG("sk %p, opt %d", sk, optname);
1816
1817	if (level != SOL_HCI)
1818		return -ENOPROTOOPT;
1819
1820	lock_sock(sk);
1821
1822	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1823		err = -EBADFD;
1824		goto done;
1825	}
1826
1827	switch (optname) {
1828	case HCI_DATA_DIR:
1829		if (get_user(opt, (int __user *)optval)) {
1830			err = -EFAULT;
1831			break;
1832		}
1833
1834		if (opt)
1835			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1836		else
1837			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1838		break;
1839
1840	case HCI_TIME_STAMP:
1841		if (get_user(opt, (int __user *)optval)) {
1842			err = -EFAULT;
1843			break;
1844		}
1845
1846		if (opt)
1847			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1848		else
1849			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1850		break;
1851
1852	case HCI_FILTER:
1853		{
1854			struct hci_filter *f = &hci_pi(sk)->filter;
1855
1856			uf.type_mask = f->type_mask;
1857			uf.opcode    = f->opcode;
1858			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1859			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1860		}
1861
1862		len = min_t(unsigned int, len, sizeof(uf));
1863		if (copy_from_user(&uf, optval, len)) {
1864			err = -EFAULT;
1865			break;
1866		}
1867
1868		if (!capable(CAP_NET_RAW)) {
1869			uf.type_mask &= hci_sec_filter.type_mask;
1870			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1871			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1872		}
1873
1874		{
1875			struct hci_filter *f = &hci_pi(sk)->filter;
1876
1877			f->type_mask = uf.type_mask;
1878			f->opcode    = uf.opcode;
1879			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1880			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1881		}
1882		break;
1883
1884	default:
1885		err = -ENOPROTOOPT;
1886		break;
1887	}
1888
1889done:
1890	release_sock(sk);
1891	return err;
1892}
1893
1894static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1895			       char __user *optval, int __user *optlen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1896{
1897	struct hci_ufilter uf;
1898	struct sock *sk = sock->sk;
1899	int len, opt, err = 0;
1900
1901	BT_DBG("sk %p, opt %d", sk, optname);
1902
1903	if (level != SOL_HCI)
1904		return -ENOPROTOOPT;
1905
1906	if (get_user(len, optlen))
1907		return -EFAULT;
1908
1909	lock_sock(sk);
1910
1911	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1912		err = -EBADFD;
1913		goto done;
1914	}
1915
1916	switch (optname) {
1917	case HCI_DATA_DIR:
1918		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1919			opt = 1;
1920		else
1921			opt = 0;
1922
1923		if (put_user(opt, optval))
1924			err = -EFAULT;
1925		break;
1926
1927	case HCI_TIME_STAMP:
1928		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1929			opt = 1;
1930		else
1931			opt = 0;
1932
1933		if (put_user(opt, optval))
1934			err = -EFAULT;
1935		break;
1936
1937	case HCI_FILTER:
1938		{
1939			struct hci_filter *f = &hci_pi(sk)->filter;
1940
1941			memset(&uf, 0, sizeof(uf));
1942			uf.type_mask = f->type_mask;
1943			uf.opcode    = f->opcode;
1944			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1945			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1946		}
1947
1948		len = min_t(unsigned int, len, sizeof(uf));
1949		if (copy_to_user(optval, &uf, len))
1950			err = -EFAULT;
1951		break;
1952
1953	default:
1954		err = -ENOPROTOOPT;
1955		break;
1956	}
1957
1958done:
1959	release_sock(sk);
1960	return err;
1961}
1962
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1963static const struct proto_ops hci_sock_ops = {
1964	.family		= PF_BLUETOOTH,
1965	.owner		= THIS_MODULE,
1966	.release	= hci_sock_release,
1967	.bind		= hci_sock_bind,
1968	.getname	= hci_sock_getname,
1969	.sendmsg	= hci_sock_sendmsg,
1970	.recvmsg	= hci_sock_recvmsg,
1971	.ioctl		= hci_sock_ioctl,
 
 
 
1972	.poll		= datagram_poll,
1973	.listen		= sock_no_listen,
1974	.shutdown	= sock_no_shutdown,
1975	.setsockopt	= hci_sock_setsockopt,
1976	.getsockopt	= hci_sock_getsockopt,
1977	.connect	= sock_no_connect,
1978	.socketpair	= sock_no_socketpair,
1979	.accept		= sock_no_accept,
1980	.mmap		= sock_no_mmap
1981};
1982
1983static struct proto hci_sk_proto = {
1984	.name		= "HCI",
1985	.owner		= THIS_MODULE,
1986	.obj_size	= sizeof(struct hci_pinfo)
1987};
1988
1989static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1990			   int kern)
1991{
1992	struct sock *sk;
1993
1994	BT_DBG("sock %p", sock);
1995
1996	if (sock->type != SOCK_RAW)
1997		return -ESOCKTNOSUPPORT;
1998
1999	sock->ops = &hci_sock_ops;
2000
2001	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
 
2002	if (!sk)
2003		return -ENOMEM;
2004
2005	sock_init_data(sock, sk);
2006
2007	sock_reset_flag(sk, SOCK_ZAPPED);
2008
2009	sk->sk_protocol = protocol;
2010
2011	sock->state = SS_UNCONNECTED;
2012	sk->sk_state = BT_OPEN;
2013
2014	bt_sock_link(&hci_sk_list, sk);
2015	return 0;
2016}
2017
2018static const struct net_proto_family hci_sock_family_ops = {
2019	.family	= PF_BLUETOOTH,
2020	.owner	= THIS_MODULE,
2021	.create	= hci_sock_create,
2022};
2023
2024int __init hci_sock_init(void)
2025{
2026	int err;
2027
2028	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2029
2030	err = proto_register(&hci_sk_proto, 0);
2031	if (err < 0)
2032		return err;
2033
2034	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2035	if (err < 0) {
2036		BT_ERR("HCI socket registration failed");
2037		goto error;
2038	}
2039
2040	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2041	if (err < 0) {
2042		BT_ERR("Failed to create HCI proc file");
2043		bt_sock_unregister(BTPROTO_HCI);
2044		goto error;
2045	}
2046
2047	BT_INFO("HCI socket layer initialized");
2048
2049	return 0;
2050
2051error:
2052	proto_unregister(&hci_sk_proto);
2053	return err;
2054}
2055
2056void hci_sock_cleanup(void)
2057{
2058	bt_procfs_cleanup(&init_net, "hci");
2059	bt_sock_unregister(BTPROTO_HCI);
2060	proto_unregister(&hci_sk_proto);
2061}